From pypy.commits at gmail.com Thu Dec 1 00:41:13 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 30 Nov 2016 21:41:13 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Copy _io._BufferedIOBase docstrings from CPython Message-ID: <583fb7f9.c9b3c20a.40d2f.e857@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88784:f0f3c53e6757 Date: 2016-12-01 05:40 +0000 http://bitbucket.org/pypy/pypy/changeset/f0f3c53e6757/ Log: Copy _io._BufferedIOBase docstrings from CPython diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -63,15 +63,47 @@ raise NotImplementedError def read_w(self, space, w_size=None): + """Read and return up to n bytes. + +If the argument is omitted, None, or negative, reads and +returns all data until EOF. + +If the argument is positive, and the underlying raw stream is +not 'interactive', multiple raw reads may be issued to satisfy +the byte count (unless EOF is reached first). But for +interactive raw streams (as well as sockets and pipes), at most +one raw read will be issued, and a short result does not imply +that EOF is imminent. + +Returns an empty bytes object on EOF. + +Returns None if the underlying raw stream was open in non-blocking +mode and no data is available at the moment.""" self._unsupportedoperation(space, "read") def read1_w(self, space, w_size): + """Read and return up to n bytes, with at most one read() call +to the underlying raw stream. A short result does not imply +that EOF is imminent. + +Returns an empty bytes object on EOF.""" self._unsupportedoperation(space, "read1") def write_w(self, space, w_data): + """Write the given buffer to the IO stream. + +Returns the number of bytes written, which is always the length of b +in bytes. + +Raises BlockingIOError if the buffer is full and the +underlying raw stream cannot accept more data at the moment.""" self._unsupportedoperation(space, "write") def detach_w(self, space): + """Disconnect this buffer from its underlying raw stream and return it. + +After the raw stream has been detached, the buffer is in an unusable +state.""" self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): @@ -92,6 +124,20 @@ W_BufferedIOBase.typedef = TypeDef( '_io._BufferedIOBase', W_IOBase.typedef, + __doc__="""Base class for buffered IO objects. + +The main difference with RawIOBase is that the read() method +supports omitting the size argument, and does not have a default +implementation that defers to readinto(). + +In addition, read(), readinto() and write() may raise +BlockingIOError if the underlying raw stream is in non-blocking +mode and not ready; unlike their raw counterparts, they will never +return None. + +A typical implementation should not inherit from a RawIOBase +implementation, but wrap one. +""", __new__ = generic_new_descr(W_BufferedIOBase), read = interp2app(W_BufferedIOBase.read_w), read1 = interp2app(W_BufferedIOBase.read1_w), From pypy.commits at gmail.com Thu Dec 1 01:35:38 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 30 Nov 2016 22:35:38 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Make sure that BufferedReader passes down a valid memoryview to reader.raw.readinto() Message-ID: <583fc4ba.a285c20a.08c8.09bc@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88785:58cf1903ac70 Date: 2016-12-01 06:34 +0000 http://bitbucket.org/pypy/pypy/changeset/58cf1903ac70/ Log: Make sure that BufferedReader passes down a valid memoryview to reader.raw.readinto() diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -158,6 +158,9 @@ def getlength(self): return self.length + def getitem(self, index): + return self.buf[index] + def setitem(self, index, char): self.buf[self.start + index] = char diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -65,6 +65,23 @@ bufio = _io.BufferedReader(MockIO()) assert bufio.read(9000) == b"abcdefg" + def test_valid_buffer(self): + import _io + + class MockIO(_io._IOBase): + def readable(self): + return True + + def readinto(self, buf): + # Check that `buf` is a valid memoryview object + assert buf.itemsize == 1 + assert buf.strides == (1,) + assert buf.shape == (len(buf),) + return len(bytes(buf)) + + bufio = _io.BufferedReader(MockIO()) + assert len(bufio.read(5)) == 5 # Note: PyPy zeros the buffer, CPython does not + def test_buffering(self): import _io data = b"abcdefghi" @@ -695,7 +712,7 @@ expected[j] = 2 expected[i] = 1 assert raw.getvalue() == expected - + def test_interleaved_read_write(self): import _io as io # Test for issue #12213 From pypy.commits at gmail.com Thu Dec 1 06:46:22 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 03:46:22 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: disable the importing of the _ssl module (not a builtin module anymore) Message-ID: <58400d8e.08301c0a.5fc48.91d4@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88786:ef10bb6be703 Date: 2016-11-29 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/ef10bb6be703/ Log: disable the importing of the _ssl module (not a builtin module anymore) diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -156,7 +156,7 @@ assert space1.str_w(w_str) == "hello" class TestMixedModuleUnfreeze: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_socket',)) def test_random_stuff_can_unfreeze(self): # When a module contains an "import" statement in applevel code, the @@ -167,13 +167,13 @@ # at runtime, like setting os.environ (posix module) or initializing # the winsock library (_socket module) w_socket = self.space.builtin_modules['_socket'] - w_ssl = self.space.builtin_modules['_ssl'] + # _ssl is not builtin anymore, this test also tried to _cleanup_ on + # the wrapped ssl object + # w_ssl = self.space.builtin_modules['_ssl'] # Uncomment this line for a workaround # space.getattr(w_ssl, space.wrap('SSLError')) w_socket._cleanup_() assert w_socket.startup_called == False - w_ssl._cleanup_() # w_ssl.appleveldefs['SSLError'] imports _socket - assert w_socket.startup_called == False From pypy.commits at gmail.com Thu Dec 1 06:46:24 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 03:46:24 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: fail on socket recv_into if length <= -1 or the length is bigger than the buffer can hold + test Message-ID: <58400d90.272cc20a.647e8.7a1c@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88787:3259c787b38b Date: 2016-12-01 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3259c787b38b/ Log: fail on socket recv_into if length <= -1 or the length is bigger than the buffer can hold + test diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -517,8 +517,12 @@ """ rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() - if nbytes == 0 or nbytes > lgt: + if nbytes < 0: + raise oefmt(space.w_ValueError, "negative buffersize in recv_into") + if nbytes == 0: nbytes = lgt + if lgt < nbytes: + raise oefmt(space.w_ValueError, "buffer too small for requested bytes") try: return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags)) except SocketError as e: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -868,6 +868,22 @@ posix.close(fileno) cli.close() + def test_recv_into_params(self): + import os + import _socket + cli = _socket.socket() + cli.connect(self.serv.getsockname()) + fileno, addr = self.serv._accept() + os.write(fileno, b"abcdef") + # + m = memoryview(bytearray(5)) + raises(ValueError, cli.recv_into, m, -1) + raises(ValueError, cli.recv_into, m, 6) + cli.recv_into(m,5) + assert m.tobytes() == b"abcde" + os.close(fileno) + cli.close() + class AppTestErrno: spaceconfig = {'usemodules': ['_socket', 'select']} From pypy.commits at gmail.com Thu Dec 1 06:51:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 03:51:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: merge py3.5 Message-ID: <58400ed9.ce841c0a.b0b67.9104@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88788:c75ec5f968ae Date: 2016-12-01 12:51 +0100 http://bitbucket.org/pypy/pypy/changeset/c75ec5f968ae/ Log: merge py3.5 diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -60,6 +60,8 @@ def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" + so_list = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION] + so_ext = (so_list or ['.so'])[0] g = {} g['CC'] = "gcc -pthread" g['CXX'] = "g++ -pthread" @@ -67,7 +69,7 @@ g['CFLAGS'] = "-DNDEBUG -O2" g['CCSHARED'] = "-fPIC" g['LDSHARED'] = "gcc -pthread -shared" - g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] + g['SO'] = so_ext g['SHLIB_SUFFIX'] = g['SO'] g['AR'] = "ar" g['ARFLAGS'] = "rc" diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -192,6 +192,14 @@ assert self._finalize_.im_func is not W_Root._finalize_.im_func space.finalizer_queue.register_finalizer(self) + def may_unregister_rpython_finalizer(self, space): + """Optimization hint only: if there is no user-defined __del__() + method, pass the hint ``don't call any finalizer'' to rgc. + """ + if not self.getclass(space).hasuserdel: + from rpython.rlib import rgc + rgc.may_ignore_finalizer(self) + # hooks that the mapdict implementations needs: def _get_mapdict_map(self): return None diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -3,7 +3,7 @@ from pypy.interpreter.pyopcode import LoopBlock, SApplicationException, Yield from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY from pypy.interpreter.astcompiler import consts -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_uint @@ -20,7 +20,7 @@ self.running = False self._name = name # may be null, use get_name() self._qualname = qualname # may be null, use get_qualname() - if (isinstance(self, Coroutine) # XXX would be cool not to need this + if (isinstance(self, Coroutine) or self.pycode.co_flags & CO_YIELD_INSIDE_TRY): self.register_finalizer(self.space) self.saved_operr = None @@ -89,7 +89,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: - self.frame = None + self.frame_is_finished() if space.is_w(w_result, space.w_None): raise OperationError(space.w_StopIteration, space.w_None) else: @@ -107,6 +107,14 @@ if self.saved_operr is not None: ec.set_sys_exc_info(self.saved_operr) self.saved_operr = None + # + # Optimization only: after we've started a Coroutine without + # CO_YIELD_INSIDE_TRY, then Coroutine._finalize_() will be a no-op + if (isinstance(self, Coroutine) + and frame.last_instr == -1 + and not (self.pycode.co_flags & CO_YIELD_INSIDE_TRY)): + rgc.may_ignore_finalizer(self) + # self.running = True try: w_result = frame.execute_frame(self, w_arg_or_err) @@ -116,7 +124,7 @@ if e.match(space, space.w_StopIteration): self._leak_stopiteration(e) finally: - self.frame = None + self.frame_is_finished() raise finally: frame.f_backref = jit.vref_None @@ -323,6 +331,10 @@ break block = block.previous + def frame_is_finished(self): + self.frame = None + rgc.may_ignore_finalizer(self) + class GeneratorIterator(GeneratorOrCoroutine): "An iterator created by a generator." @@ -364,7 +376,7 @@ break # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: - self.frame = None + self.frame_is_finished() break results.append(w_result) # YIELDed return unpack_into diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -121,31 +121,8 @@ return space.newtuple(tup_return) def descr_module__repr__(self, space): - w_loader = space.finditem(self.w_dict, space.wrap('__loader__')) - if w_loader is not None: - try: - return space.call_method(w_loader, "module_repr", self) - except OperationError: - pass - try: - w_name = space.getattr(self, space.wrap('__name__')) - name = space.unicode_w(space.repr(w_name)) - except OperationError: - name = u"'?'" - - try: - w___file__ = space.getattr(self, space.wrap('__file__')) - except OperationError: - w___file__ = space.w_None - if not space.isinstance_w(w___file__, space.w_unicode): - if w_loader is not None: - w_loader_repr = space.unicode_w(space.repr(w_loader)) - return space.wrap(u"" % (name, w_loader_repr)) - else: - return space.wrap(u"" % (name,)) - else: - __file__ = space.unicode_w(space.repr(w___file__)) - return space.wrap(u"" % (name, __file__)) + w_importlib = space.getbuiltinmodule('_frozen_importlib') + return space.call_method(w_importlib, "_module_repr", self) def descr_getattribute(self, space, w_attr): from pypy.objspace.descroperation import object_getattribute diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -129,6 +129,20 @@ expected_repr = "".format(loader_repr) assert mod_repr == expected_repr + def test_repr_with_loader_with_raising_module_repr2(self): + import sys + test_module = type(sys)("test_module", "doc") + # If an exception occurs in module_repr(), the exception is caught + # and discarded, and the calculation of the module’s repr continues + # as if module_repr() did not exist. + class CustomLoaderWithRaisingRepr: + @classmethod + def module_repr(cls, module): + raise KeyboardInterrupt + + test_module.__loader__ = CustomLoaderWithRaisingRepr + raises(KeyboardInterrupt, 'repr(test_module)') + def test_repr_with_raising_loader_and___file__(self): import sys test_module = type(sys)("test_module", "doc") diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -1,3 +1,4 @@ +from rpython.rlib import jit from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec @@ -67,29 +68,43 @@ # fallback to object.__getattribute__() return space.call_function(object_getattribute(space), self, w_name) -def _super_from_frame(space, frame): - """super() without args -- fill in from __class__ and first local - variable on the stack. - """ - code = frame.pycode - if not code: - raise oefmt(space.w_RuntimeError, "super(): no code object") + at jit.elidable +def _get_self_location(space, code): if code.co_argcount == 0: raise oefmt(space.w_RuntimeError, "super(): no arguments") - w_obj = frame.locals_cells_stack_w[0] - if not w_obj: - raise oefmt(space.w_RuntimeError, "super(): arg[0] deleted") + args_to_copy = code._args_as_cellvars + for i in range(len(args_to_copy)): + if args_to_copy[i] == 0: + self_cell = i + break + else: + self_cell = -1 for index, name in enumerate(code.co_freevars): if name == '__class__': break else: raise oefmt(space.w_RuntimeError, "super(): __class__ cell not found") + class_cell = len(code.co_cellvars) + index + return self_cell, class_cell + +def _super_from_frame(space, frame): + """super() without args -- fill in from __class__ and first local + variable on the stack. + """ + if frame is None: + raise oefmt(space.w_RuntimeError, "super(): no frame object") + self_cell, class_cell = _get_self_location(space, frame.getcode()) + if self_cell < 0: + w_obj = frame.locals_cells_stack_w[0] + else: + w_obj = frame._getcell(self_cell).w_value + if not w_obj: + raise oefmt(space.w_RuntimeError, "super(): arg[0] deleted") + # a kind of LOAD_DEREF - cell = frame._getcell(len(code.co_cellvars) + index) - try: - w_starttype = cell.get() - except ValueError: + w_starttype = frame._getcell(class_cell).w_value + if w_starttype is None: raise oefmt(space.w_RuntimeError, "super(): empty __class__ cell") return w_starttype, w_obj diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -508,4 +508,15 @@ del __class__ super() raises(RuntimeError, X().f) + class X: + def f(self): + def g(): + print(self) # make 'self' a closure inside 'f' + del self + super() + raises(RuntimeError, X().f) + class X: + def f(*args): + super() + raises(RuntimeError, X().f) """ diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -397,7 +397,7 @@ space = self.space if space.is_none(w_destructor): if isinstance(self, W_CDataGCP): - self.w_destructor = None + self.detach_destructor() return space.w_None raise oefmt(space.w_TypeError, "Can remove destructor only on a object " @@ -604,6 +604,10 @@ self.w_destructor = None self.space.call_function(w_destructor, self.w_original_cdata) + def detach_destructor(self): + self.w_destructor = None + self.may_unregister_rpython_finalizer(self.space) + W_CData.typedef = TypeDef( '_cffi_backend.CData', diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py --- a/pypy/module/_cffi_backend/cdlopen.py +++ b/pypy/module/_cffi_backend/cdlopen.py @@ -55,6 +55,7 @@ if not libhandle: raise oefmt(self.ffi.w_FFIError, "library '%s' is already closed", self.libname) + self.may_unregister_rpython_finalizer(self.ffi.space) # Clear the dict to force further accesses to do cdlopen_fetch() # again, and fail because the library was closed. Note that the diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -63,15 +63,47 @@ raise NotImplementedError def read_w(self, space, w_size=None): + """Read and return up to n bytes. + +If the argument is omitted, None, or negative, reads and +returns all data until EOF. + +If the argument is positive, and the underlying raw stream is +not 'interactive', multiple raw reads may be issued to satisfy +the byte count (unless EOF is reached first). But for +interactive raw streams (as well as sockets and pipes), at most +one raw read will be issued, and a short result does not imply +that EOF is imminent. + +Returns an empty bytes object on EOF. + +Returns None if the underlying raw stream was open in non-blocking +mode and no data is available at the moment.""" self._unsupportedoperation(space, "read") def read1_w(self, space, w_size): + """Read and return up to n bytes, with at most one read() call +to the underlying raw stream. A short result does not imply +that EOF is imminent. + +Returns an empty bytes object on EOF.""" self._unsupportedoperation(space, "read1") def write_w(self, space, w_data): + """Write the given buffer to the IO stream. + +Returns the number of bytes written, which is always the length of b +in bytes. + +Raises BlockingIOError if the buffer is full and the +underlying raw stream cannot accept more data at the moment.""" self._unsupportedoperation(space, "write") def detach_w(self, space): + """Disconnect this buffer from its underlying raw stream and return it. + +After the raw stream has been detached, the buffer is in an unusable +state.""" self._unsupportedoperation(space, "detach") def readinto_w(self, space, w_buffer): @@ -92,6 +124,20 @@ W_BufferedIOBase.typedef = TypeDef( '_io._BufferedIOBase', W_IOBase.typedef, + __doc__="""Base class for buffered IO objects. + +The main difference with RawIOBase is that the read() method +supports omitting the size argument, and does not have a default +implementation that defers to readinto(). + +In addition, read(), readinto() and write() may raise +BlockingIOError if the underlying raw stream is in non-blocking +mode and not ready; unlike their raw counterparts, they will never +return None. + +A typical implementation should not inherit from a RawIOBase +implementation, but wrap one. +""", __new__ = generic_new_descr(W_BufferedIOBase), read = interp2app(W_BufferedIOBase.read_w), read1 = interp2app(W_BufferedIOBase.read1_w), @@ -112,6 +158,9 @@ def getlength(self): return self.length + def getitem(self, index): + return self.buf[index] + def setitem(self, index, char): self.buf[self.start + index] = char diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -65,6 +65,23 @@ bufio = _io.BufferedReader(MockIO()) assert bufio.read(9000) == b"abcdefg" + def test_valid_buffer(self): + import _io + + class MockIO(_io._IOBase): + def readable(self): + return True + + def readinto(self, buf): + # Check that `buf` is a valid memoryview object + assert buf.itemsize == 1 + assert buf.strides == (1,) + assert buf.shape == (len(buf),) + return len(bytes(buf)) + + bufio = _io.BufferedReader(MockIO()) + assert len(bufio.read(5)) == 5 # Note: PyPy zeros the buffer, CPython does not + def test_buffering(self): import _io data = b"abcdefghi" @@ -695,7 +712,7 @@ expected[j] = 2 expected[i] = 1 assert raw.getvalue() == expected - + def test_interleaved_read_write(self): import _io as io # Test for issue #12213 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -216,7 +216,7 @@ def descr_repr(self, space): fd = intmask(self.sock.fd) # Force to signed type even on Windows. return space.wrap("" % + " type=%d, proto=%d>" % (fd, self.sock.family, self.sock.type, self.sock.proto)) @@ -266,6 +266,7 @@ except SocketError: # cpython doesn't return any errors on close pass + self.may_unregister_rpython_finalizer(space) def connect_w(self, space, w_addr): """connect(address) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -373,12 +373,12 @@ import _socket s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) try: - expected = ('' + expected = ('' % (s.fileno(), s.family, s.type, s.proto)) assert repr(s) == expected finally: s.close() - expected = ('' + expected = ('' % (s.family, s.type, s.proto)) assert repr(s) == expected diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -218,7 +218,7 @@ return self.space.w_None return w_obj - def descr__eq__(self, space, w_ref2): + def compare(self, space, w_ref2, invert): if not isinstance(w_ref2, W_Weakref): return space.w_NotImplemented ref1 = self @@ -226,11 +226,18 @@ w_obj1 = ref1.dereference() w_obj2 = ref2.dereference() if w_obj1 is None or w_obj2 is None: - return space.is_(ref1, ref2) - return space.eq(w_obj1, w_obj2) + w_res = space.is_(ref1, ref2) + else: + w_res = space.eq(w_obj1, w_obj2) + if invert: + w_res = space.not_(w_res) + return w_res + + def descr__eq__(self, space, w_ref2): + return self.compare(space, w_ref2, invert=False) def descr__ne__(self, space, w_ref2): - return space.not_(space.eq(self, w_ref2)) + return self.compare(space, w_ref2, invert=True) def descr_callback(self, space): return self.w_callable diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -153,6 +153,14 @@ assert not (ref1 == []) assert ref1 != [] + def test_ne(self): + import _weakref + class X(object): + pass + ref1 = _weakref.ref(X()) + assert ref1.__eq__(X()) is NotImplemented + assert ref1.__ne__(X()) is NotImplemented + def test_getweakrefs(self): import _weakref, gc class A(object): diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -272,11 +272,11 @@ _write = libc.load_function(BWrite, 'write') i = 0 fd0, fd1 = os.pipe() - buffer = _cffi_backend.newp(BCharP, 'A') + buffer = _cffi_backend.newp(BCharP, b'A') while i < 300: tmp = _write(fd1, buffer, 1) # ID: cfficall assert tmp == 1 - assert os.read(fd0, 2) == 'A' + assert os.read(fd0, 2) == b'A' i += 1 os.close(fd0) os.close(fd1) @@ -410,7 +410,7 @@ i161 = int_lt(i160, i43) guard_true(i161, descr=...) i162 = int_add(i160, 1) - setfield_gc(p22, i162, descr=) + setfield_gc(p22, i162, descr=) guard_not_invalidated(descr=...) p163 = force_token() p164 = force_token() diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,5 +16,6 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ + p1 = getfield_gc_r(..., descr=...) # dead guard_not_invalidated(descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -320,3 +320,36 @@ --TICK-- jump(..., descr=...) """) + + def test_super_no_args(self): + def main(): + class A(object): + def m(self, x): + return x + 1 + class B(A): + def m(self, x): + return super().m(x) + i = 0 + while i < 300: + i = B().m(i) + return i + + log = self.run(main, []) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i78 = int_lt(i72, 300) + guard_true(i78, descr=...) + guard_not_invalidated(descr=...) + p1 = force_token() + p65 = force_token() + p3 = force_token() + i81 = int_add(i72, 1) + + # can't use TICK here, because of the extra setfield_gc + ticker0 = getfield_raw_i(#, descr=) + setfield_gc(p0, p65, descr=) + ticker_cond0 = int_lt(ticker0, 0) + guard_false(ticker_cond0, descr=...) + + jump(..., descr=...) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -38,7 +38,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" ... - p76 = call_assembler_r(_, _, _, _, descr=...) + p76 = call_assembler_r(..., descr=...) ... """) loop2 = log.loops[0] @@ -79,6 +79,6 @@ assert len(guards) < 20 assert loop.match(""" ... - p76 = call_assembler_r(_, _, _, _, descr=...) + p76 = call_assembler_r(..., descr=...) ... """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -247,6 +247,13 @@ """) def test_dont_trace_every_iteration(self): + def reference(a, b): + i = sa = 0 + while i < 300: + sa += a % b + i += 1 + return sa + # def main(a, b): i = sa = 0 while i < 300: @@ -258,9 +265,12 @@ i += 1 return sa # + log_ref = self.run(reference, [10, 20]) + assert log_ref.result == 300 * (10 % 20) + # log = self.run(main, [10, 20]) assert log.result == 300 * (10 % 20) - assert log.jit_summary.tracing_no == 1 + assert log.jit_summary.tracing_no == log_ref.jit_summary.tracing_no loop, = log.loops_by_filename(self.filepath) assert loop.match(""" i11 = int_lt(i7, 300) @@ -274,7 +284,7 @@ # log = self.run(main, [-10, -20]) assert log.result == 300 * (-10 % -20) - assert log.jit_summary.tracing_no == 1 + assert log.jit_summary.tracing_no == log_ref.jit_summary.tracing_no def test_overflow_checking(self): """ @@ -297,6 +307,7 @@ self.run_and_check(main, []) def test_global(self): + # check that the global read is removed even from the entry bridge log = self.run(""" i = 0 globalinc = 1 @@ -308,7 +319,10 @@ """, [1000]) loop, = log.loops_by_id("globalread", is_entry_bridge=True) - assert len(loop.ops_by_id("globalread")) == 0 + assert loop.match_by_id("globalread", """ + # only a dead read + p26 = getfield_gc_r(ConstPtr(ptr25), descr=) + """) def test_eval(self): def main(): @@ -349,7 +363,8 @@ def test_long_comparison(self): def main(n): while n: - 12345L > 123L # ID: long_op + x = 12345678901234567890123456 + x > 1231231231231231231231231 # ID: long_op n -= 1 log = self.run(main, [300]) diff --git a/pypy/module/pypyjit/test_pypy_c/test_shift.py b/pypy/module/pypyjit/test_pypy_c/test_shift.py --- a/pypy/module/pypyjit/test_pypy_c/test_shift.py +++ b/pypy/module/pypyjit/test_pypy_c/test_shift.py @@ -196,8 +196,7 @@ """ from sys import maxint - def main(a, b, c): - from sys import maxint + def main(a, b, c, maxint): i = sa = 0 while i < 300: if 0 < a < 10: pass @@ -210,9 +209,9 @@ sa += (b<<100)>>100 sa += (c<<100)>>100 i += 1 - return long(sa) + return sa for a in (1, 4, 8, 100): for b in (-10, 10, -201, 201, -maxint/3, maxint/3): for c in (-10, 10, -maxint/3, maxint/3): - yield self.run_and_check, main, [a, b, c] + yield self.run_and_check, main, [a, b, c, maxint] diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -5,50 +5,102 @@ class TestString(BaseTestPyPyC): + + def test_python3_missing_bchr(self): + # Check that 'bytes([i])' is special-cased into something + # efficient, as Python 3.5 doesn't have a bchr() function or + # anything more direct. + def main(n): + i = 0 + result = b'' + while i < n: + c = bytes([i]) + result += c + i += 1 + return i + log = self.run(main, [255]) + assert log.result == 255 + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + # nothing left like allocating a list object or doing any + # residual call + i49 = int_lt(i38, i26) + guard_true(i49, descr=...) + guard_not_invalidated(descr=...) + i51 = int_lt(i38, 256) + guard_true(i51, descr=...) + i53 = int_add(i38, 1) + --TICK-- + i58 = strlen(p46) + i60 = int_add(i58, 1) + p61 = newstr(i60) + copystrcontent(p46, p61, 0, 0, i58) + strsetitem(p61, i58, i38) + p62 = newstr(1) + strsetitem(p62, 0, i38) + jump(..., descr=...) + """) + def test_lookup_default_encoding(self): def main(n): - import string i = 0 - letters = string.letters - uletters = unicode(string.letters) + letters = b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + uletters = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' while i < n: - i += letters[i % len(letters)] == uletters[i % len(letters)] + c = bytes([letters[i % len(uletters)]]) + i += (c.decode() == uletters[i % len(uletters)]) return i log = self.run(main, [300], import_site=True) assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i14 = int_lt(i6, i9) - guard_true(i14, descr=...) + i88 = int_lt(i83, i36) + guard_true(i88, descr=...) + p90 = getfield_gc_r(ConstPtr(ptr89), descr=) guard_not_invalidated(descr=...) - i16 = int_eq(i6, %d) - i19 = call_i(ConstClass(ll_int_py_mod__Signed_Signed), i6, i10, descr=) - i21 = int_lt(i19, 0) - guard_false(i21, descr=...) - i22 = int_ge(i19, i10) - guard_false(i22, descr=...) - i23 = strgetitem(p11, i19) - i24 = int_ge(i19, i12) - guard_false(i24, descr=...) - i25 = unicodegetitem(p13, i19) - p27 = newstr(1) - strsetitem(p27, 0, i23) - p30 = call_r(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=...) + i92 = int_eq(i83, %d) + i94 = call_i(ConstClass(ll_int_py_mod__Signed_Signed), i83, i46, descr=) + i96 = int_lt(i94, 0) + guard_false(i96, descr=...) + i97 = int_ge(i94, i53) + guard_false(i97, descr=...) + i98 = strgetitem(p52, i94) + p100 = getfield_gc_r(ConstPtr(ptr99), descr=) + p101 = force_token() + p103 = newstr(1) + strsetitem(p103, 0, i98) + p104 = new(descr=) + p106 = newunicode(1) + setfield_gc(p0, p101, descr=) + setfield_gc(p104, p106, descr=) + setfield_gc(p104, 0, descr=) + setfield_gc(p104, 1, descr=) + setfield_gc(p104, 1, descr=) + i113 = call_may_force_i(ConstClass(str_decode_utf_8_impl), p103, 1, ConstPtr(null), 1, 0, 0, p104, descr=) + guard_not_forced(descr=...) guard_no_exception(descr=...) - i32 = call_i(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=...) - guard_true(i32, descr=...) - i34 = int_add(i6, 1) + p116 = call_r(ConstClass(ll_build_trampoline__v1351___simple_call__function_), p104, descr=) + guard_no_exception(descr=...) + guard_nonnull(p116, descr=...) + p118 = getfield_gc_r(ConstPtr(ptr117), descr=) + guard_not_invalidated(descr=...) + i119 = int_ge(i94, i46) + guard_false(i119, descr=...) + i120 = unicodegetitem(p45, i94) + i122 = call_i(ConstClass(_ll_2_str_eq_nonnull_char__rpy_unicodePtr_UniChar), p116, i120, descr=) + guard_true(i122, descr=...) + i124 = int_add(i83, 1) --TICK-- jump(..., descr=...) """ % (-sys.maxint-1,)) - def test_long(self): + def test_int_base_16(self): def main(n): - import string i = 1 while i < n: - i += int(long(string.digits[i % len(string.digits)], 16)) + digits = '0123456789' + i += int(digits[i % len(digits)], 16) return i log = self.run(main, [1100], import_site=True) @@ -61,7 +113,9 @@ assert loop.match(""" i11 = int_lt(i6, i7) guard_true(i11, descr=...) + p70 = getfield_gc_r(ConstPtr(ptr69), descr=) guard_not_invalidated(descr=...) + p72 = getfield_gc_r(ConstPtr(ptr71), descr=) i13 = int_eq(i6, %d) # value provided below # "mod 10" block: @@ -73,17 +127,20 @@ i87 = int_mul(i85, 10) i19 = int_sub(i6, i87) - i23 = strgetitem(p10, i19) - p25 = newstr(1) - strsetitem(p25, 0, i23) - p93 = call_r(ConstClass(fromstr), p25, 16, descr=) + i23 = unicodegetitem(ConstPtr(ptr92), i19) + p25 = newunicode(1) + unicodesetitem(p25, 0, i23) + p97 = call_r(ConstClass(_rpy_unicode_to_decimal_w), p25, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_i(p93, descr=) - i96 = int_gt(i95, #) - guard_false(i96, descr=...) - i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) + i98 = unicodelen(p97) + p99 = force_token() + setfield_gc(p0, p99, descr=) + p104 = call_may_force_r(ConstClass(unicode_encode_utf_8_impl), p97, i98, ConstPtr(ptr101), 1, 1, descr=) + guard_not_forced(descr=...) guard_no_exception(descr=...) - i95 = int_add_ovf(i6, i94) + i107 = call_i(ConstClass(string_to_int), p104, 16, descr=) + guard_no_exception(descr=...) + i95 = int_add_ovf(i6, i107) guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -21,7 +21,7 @@ while i < n: buf = struct.pack(" Author: Richard Plangger Branch: Changeset: r88789:8096cd4c9209 Date: 2016-12-01 13:40 +0100 http://bitbucket.org/pypy/pypy/changeset/8096cd4c9209/ Log: precision loss, big number * small number has a different result then doing the calculation in order, disable the feature diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -518,12 +518,10 @@ def test_prod(self): result = self.run("prod") assert int(result) == 576 - self.check_vectorized(1, 1) def test_prod_zero(self): result = self.run("prod_zero") assert int(result) == 0 - self.check_vectorized(1, 1) def define_max(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -76,7 +76,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), ('sum','float', 2581, 2581, 1), - ('prod','float', 1, 3178, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1128,7 +1128,7 @@ value = sum(value) elif info.accum_operation == '*': def prod(acc, x): return acc * x - value = reduce(prod, value, 1) + value = reduce(prod, value, 1.0) else: raise NotImplementedError("accum operator in fail guard") values[i] = value diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -980,7 +980,6 @@ class AccumPack(Pack): SUPPORTED = { rop.FLOAT_ADD: '+', rop.INT_ADD: '+', - rop.FLOAT_MUL: '*', } def __init__(self, nodes, operator, position): diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -847,6 +847,10 @@ vecop, count) oplist.append(vecop) elif pack.reduce_init() == 1: + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied. giving an + # inprecision result, thus this is disabled now + raise NotImplementedError # multiply is only supported by floats vecop = OpHelpers.create_vec_expand(ConstFloat(1.0), bytesize, signed, count) diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -414,7 +414,9 @@ lambda a,b: lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed) small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False, allow_infinity=False) test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE) - test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied losing precision + # test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) def test_constant_expand(self): From pypy.commits at gmail.com Thu Dec 1 09:06:20 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 01 Dec 2016 06:06:20 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: replace more w_str and str_w instances Message-ID: <58402e5c.46bb1c0a.1c422.aa21@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88790:d05e3f3498f3 Date: 2016-12-01 13:31 +0100 http://bitbucket.org/pypy/pypy/changeset/d05e3f3498f3/ Log: replace more w_str and str_w instances diff --git a/pypy/interpreter/pyparser/error.py b/pypy/interpreter/pyparser/error.py --- a/pypy/interpreter/pyparser/error.py +++ b/pypy/interpreter/pyparser/error.py @@ -15,7 +15,7 @@ if self.filename is None: w_filename = space.w_None else: - w_filename = space.newtext(self.filename) + w_filename = space.newtext_or_none(self.filename) if self.text is None: w_text = space.w_None else: diff --git a/pypy/module/__pypy__/interp_debug.py b/pypy/module/__pypy__/interp_debug.py --- a/pypy/module/__pypy__/interp_debug.py +++ b/pypy/module/__pypy__/interp_debug.py @@ -9,7 +9,7 @@ @jit.dont_look_inside def debug_print(space, args_w): - parts = [space.str_w(space.str(w_item)) for w_item in args_w] + parts = [space.text_w(space.str(w_item)) for w_item in args_w] debug.debug_print(' '.join(parts)) @jit.dont_look_inside diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -183,8 +183,8 @@ jit.promote(space.int_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_float): jit.promote(space.float_w(w_obj)) - elif space.is_w(space.type(w_obj), space.w_str): - jit.promote_string(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_bytes): + jit.promote_string(space.bytes_w(w_obj)) elif space.is_w(space.type(w_obj), space.w_unicode): raise oefmt(space.w_TypeError, "promoting unicode unsupported") else: diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -91,7 +91,7 @@ if not space.isinstance_w(w_data, space.w_str): raise oefmt(space.w_TypeError, "read() should return bytes") - data = space.str_w(w_data) + data = space.bytes_w(w_data) rwbuffer.setslice(0, data) return space.newint(len(data)) @@ -225,7 +225,7 @@ raise return space.newtext("<%s>" % (typename,)) else: - name_repr = space.str_w(space.repr(w_name)) + name_repr = space.text_w(space.repr(w_name)) return space.newtext("<%s name=%s>" % (typename, name_repr)) # ______________________________________________ diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -326,7 +326,7 @@ w_repr = space.repr(self.w_name) return space.newtext( "<_io.FileIO name=%s mode='%s'>" % ( - space.str_w(w_repr), self._mode())) + space.text_w(w_repr), self._mode())) # ______________________________________________ diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -375,9 +375,8 @@ else: newline = space.unicode_w(w_newline) if newline and newline not in (u'\n', u'\r\n', u'\r'): - r = space.str_w(space.repr(w_newline)) raise oefmt(space.w_ValueError, - "illegal newline value: %s", r) + "illegal newline value: %R", w_newline) self.line_buffering = line_buffering @@ -398,7 +397,7 @@ # build the decoder object if space.is_true(space.call_method(w_buffer, "readable")): w_codec = interp_codecs.lookup_codec(space, - space.str_w(self.w_encoding)) + space.text_w(self.w_encoding)) self.w_decoder = space.call_method(w_codec, "incrementaldecoder", w_errors) if self.readuniversal: @@ -409,7 +408,7 @@ # build the encoder object if space.is_true(space.call_method(w_buffer, "writable")): w_codec = interp_codecs.lookup_codec(space, - space.str_w(self.w_encoding)) + space.text_w(self.w_encoding)) self.w_encoder = space.call_method(w_codec, "incrementalencoder", w_errors) @@ -875,9 +874,8 @@ whence) if space.is_true(space.lt(w_pos, space.newint(0))): - r = space.str_w(space.repr(w_pos)) raise oefmt(space.w_ValueError, - "negative seek position %s", r) + "negative seek position %R", w_pos) space.call_method(self, "flush") @@ -984,7 +982,7 @@ w_state = space.call_method(self.w_decoder, "getstate") w_dec_buffer, w_flags = space.unpackiterable(w_state, 2) - dec_buffer_len = len(space.str_w(w_dec_buffer)) + dec_buffer_len = space.len_w(w_dec_buffer) if dec_buffer_len == 0 and chars_decoded <= chars_to_skip: # Decoder buffer is empty, so this is a safe start point. diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -20,9 +20,9 @@ def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): argtypes_w, argtypes, w_restype, restype = unpack_argtypes( space, w_argtypes, w_restype) - if (space.isinstance_w(w_name, space.w_str) or + if (space.isinstance_w(w_name, space.w_bytes) or space.isinstance_w(w_name, space.w_unicode)): - name = space.str_w(w_name) + name = space.text_w(w_name) try: func = CDLL.cdll.getpointer(name, argtypes, restype, flags = CDLL.flags) @@ -54,7 +54,7 @@ else: @unwrap_spec(name=str) def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): - name = space.str_w(w_name) + name = space.text_w(w_name) argtypes_w, argtypes, w_restype, restype = unpack_argtypes( space, w_argtypes, w_restype) try: diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -79,11 +79,11 @@ def maybe_handle_char_or_unichar_p(self, w_ffitype, w_obj): w_type = jit.promote(self.space.type(w_obj)) - if w_ffitype.is_char_p() and w_type is self.space.w_str: + if w_ffitype.is_char_p() and w_type is self.space.w_bytes: strval = self.space.str_w(w_obj) self.handle_char_p(w_ffitype, w_obj, strval) return True - elif w_ffitype.is_unichar_p() and (w_type is self.space.w_str or + elif w_ffitype.is_unichar_p() and (w_type is self.space.w_bytes or w_type is self.space.w_unicode): unicodeval = self.space.unicode_w(w_obj) self.handle_unichar_p(w_ffitype, w_obj, unicodeval) diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -185,7 +185,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.str_w(w_value) + value = space.bytes_w(w_value) if start + len(value) != stop: raise oefmt(space.w_ValueError, "cannot resize array") ll_buffer = self.ll_buffer diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -79,7 +79,7 @@ self.argtypes = unpack_argshapes(space, w_args) ffiargs = [tp.get_basic_ffi_type() for tp in self.argtypes] if not space.is_w(w_result, space.w_None): - self.result = space.str_w(w_result) + self.result = space.text_w(w_result) ffiresult = letter2tp(space, self.result).get_basic_ffi_type() else: self.result = None diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -100,7 +100,7 @@ def unpack_simple_shape(space, w_shape): # 'w_shape' must be either a letter or a tuple (struct, 1). if space.isinstance_w(w_shape, space.w_str): - letter = space.str_w(w_shape) + letter = space.text_w(w_shape) return letter2tp(space, letter) else: w_shapetype, w_length = space.fixedview(w_shape, expected_length=2) @@ -110,8 +110,8 @@ def unpack_shape_with_length(space, w_shape): # Allow 'w_shape' to be a letter or any (shape, number). # The result is always a W_Array. - if space.isinstance_w(w_shape, space.w_str): - letter = space.str_w(w_shape) + if space.isinstance_w(w_shape, space.w_text): + letter = space.text_w(w_shape) return letter2tp(space, letter) else: w_shapetype, w_length = space.fixedview(w_shape, expected_length=2) @@ -186,8 +186,8 @@ else: ffi_restype = ffi_type_void - if space.isinstance_w(w_name, space.w_str): - name = space.str_w(w_name) + if space.isinstance_w(w_name, space.w_text): + name = space.text_w(w_name) try: ptr = self.cdll.getrawpointer(name, ffi_argtypes, ffi_restype, @@ -408,7 +408,7 @@ push_func(add_arg, argdesc, rffi.cast(rffi.LONGDOUBLE, space.float_w(w_arg))) elif letter == "c": - s = space.str_w(w_arg) + s = space.bytes_w(w_arg) if len(s) != 1: raise oefmt(space.w_TypeError, "Expected string of length one as character") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -37,7 +37,7 @@ "Expected list of 2- or 3-size tuples") try: - name = space.str_w(l_w[0]) + name = space.text_w(l_w[0]) except OperationError: raise oefmt(space.w_TypeError, "structure field name must be string not %T", l_w[0]) diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -77,7 +77,7 @@ if space.is_w(w_proto, space.w_None): proto = None else: - proto = space.str_w(w_proto) + proto = space.text_w(w_proto) try: port = rsocket.getservbyname(name, proto) except SocketError as e: @@ -95,7 +95,7 @@ if space.is_w(w_proto, space.w_None): proto = None else: - proto = space.str_w(w_proto) + proto = space.text_w(w_proto) if port < 0 or port > 0xffff: raise oefmt(space.w_ValueError, "getservbyport: port must be 0-65535.") diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -83,7 +83,7 @@ def addr_from_object(family, fd, space, w_address): if family == rsocket.AF_INET: w_host, w_port = space.unpackiterable(w_address, 2) - host = space.str_w(w_host) + host = space.text_w(w_host) port = space.int_w(w_port) port = make_ushort_port(space, port) return rsocket.INETAddress(host, port) @@ -93,7 +93,7 @@ raise oefmt(space.w_TypeError, "AF_INET6 address must be a tuple of length 2 " "to 4, not %d", len(pieces_w)) - host = space.str_w(pieces_w[0]) + host = space.text_w(pieces_w[0]) port = space.int_w(pieces_w[1]) port = make_ushort_port(space, port) if len(pieces_w) > 2: flowinfo = space.int_w(pieces_w[2]) @@ -103,7 +103,7 @@ flowinfo = make_unsigned_flowinfo(space, flowinfo) return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: - return rsocket.UNIXAddress(space.str_w(w_address)) + return rsocket.UNIXAddress(space.text_w(w_address)) if rsocket.HAS_AF_NETLINK and family == rsocket.AF_NETLINK: w_pid, w_groups = space.unpackiterable(w_address, 2) return rsocket.NETLINKAddress(space.uint_w(w_pid), space.uint_w(w_groups)) @@ -113,14 +113,14 @@ raise oefmt(space.w_TypeError, "AF_PACKET address must be a tuple of length 2 " "to 5, not %d", len(pieces_w)) - ifname = space.str_w(pieces_w[0]) + ifname = space.text_w(pieces_w[0]) ifindex = rsocket.PacketAddress.get_ifindex_from_ifname(fd, ifname) protocol = space.int_w(pieces_w[1]) if len(pieces_w) > 2: pkttype = space.int_w(pieces_w[2]) else: pkttype = 0 if len(pieces_w) > 3: hatype = space.int_w(pieces_w[3]) else: hatype = 0 - if len(pieces_w) > 4: haddr = space.str_w(pieces_w[4]) + if len(pieces_w) > 4: haddr = space.text_w(pieces_w[4]) else: haddr = "" if len(haddr) > 8: raise oefmt(space.w_ValueError, @@ -144,7 +144,7 @@ # XXX Hack to seperate rpython and pypy def ipaddr_from_object(space, w_sockaddr): - host = space.str_w(space.getitem(w_sockaddr, space.newint(0))) + host = space.text_w(space.getitem(w_sockaddr, space.newint(0))) addr = rsocket.makeipaddr(host) fill_from_object(addr, space, w_sockaddr) return addr @@ -165,7 +165,7 @@ is_open = self.sock.fd >= 0 if is_open and self.space.sys.track_resources: w_repr = self.space.repr(self) - str_repr = self.space.str_w(w_repr) + str_repr = self.space.text_w(w_repr) w_msg = self.space.newtext("WARNING: unclosed " + str_repr) self.space.resource_warning(w_msg, self.w_tb) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -116,8 +116,8 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) - elif space.isinstance_w(w_string, space.w_str): - str = space.str_w(w_string) + elif space.isinstance_w(w_string, space.w_bytes): + str = space.bytes_w(w_string) if pos > len(str): pos = len(str) if endpos > len(str): diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -413,7 +413,7 @@ raise oefmt(self.space.w_EOFError, "compressed file ended before the logical " "end-of-the-stream was detected") - result = self.space.str_w(w_result) + result = self.space.bytes_w(w_result) self.readlength += len(result) else: result = "" @@ -443,7 +443,7 @@ self.finished = True return "" raise - self.buffer = self.space.str_w(w_read) + self.buffer = self.space.bytes_w(w_read) self.pos = 0 if len(self.buffer) - self.pos >= n: pos = self.pos @@ -478,11 +478,11 @@ self.writtenlength = 0 def close1(self, closefileno): - self.stream.write(self.space.str_w(self.compressor.flush())) + self.stream.write(self.space.bytes_w(self.compressor.flush())) self.stream.close1(closefileno) def write(self, data): - self.stream.write(self.space.str_w(self.compressor.compress(data))) + self.stream.write(self.space.bytes_w(self.compressor.compress(data))) self.writtenlength += len(data) def tell(self): diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -178,7 +178,7 @@ if not e.match(space, space.w_StopIteration): raise break # done - self.write(space.str_w(w_line)) + self.write(space.text_w(w_line)) # ____________________________________________________________ diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -79,7 +79,7 @@ c_ob_sval must not be modified. """ py_str = rffi.cast(PyBytesObject, py_obj) - s = space.str_w(w_obj) + s = space.bytes_w(w_obj) if py_str.c_ob_size < len(s): raise oefmt(space.w_ValueError, "bytes_attach called on object with ob_size %d but trying to store %d", diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -30,7 +30,7 @@ @cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) def _PyInstance_Lookup(space, w_instance, w_name): - name = space.str_w(w_name) + name = space.text_w(w_name) assert isinstance(w_instance, W_InstanceObject) w_result = w_instance.getdictvalue(space, name) if w_result is not None: diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -118,8 +118,8 @@ assert isinstance(w_method, Method) return w_method.w_class # borrowed ref -def unwrap_list_of_strings(space, w_list): - return [space.str_w(w_item) for w_item in space.fixedview(w_list)] +def unwrap_list_of_texts(space, w_list): + return [space.text_w(w_item) for w_item in space.fixedview(w_list)] @cpython_api([rffi.INT_real, rffi.INT_real, rffi.INT_real, rffi.INT_real, PyObject, PyObject, PyObject, PyObject, PyObject, PyObject, @@ -136,16 +136,16 @@ nlocals=rffi.cast(lltype.Signed, nlocals), stacksize=rffi.cast(lltype.Signed, stacksize), flags=rffi.cast(lltype.Signed, flags), - code=space.str_w(w_code), + code=space.bytes_w(w_code), consts=space.fixedview(w_consts), - names=unwrap_list_of_strings(space, w_names), - varnames=unwrap_list_of_strings(space, w_varnames), - filename=space.str_w(w_filename), - name=space.str_w(w_funcname), + names=unwrap_list_of_texts(space, w_names), + varnames=unwrap_list_of_texts(space, w_varnames), + filename=space.text_w(w_filename), + name=space.text_w(w_funcname), firstlineno=rffi.cast(lltype.Signed, firstlineno), - lnotab=space.str_w(w_lnotab), - freevars=unwrap_list_of_strings(space, w_freevars), - cellvars=unwrap_list_of_strings(space, w_cellvars)) + lnotab=space.bytes_w(w_lnotab), + freevars=unwrap_list_of_texts(space, w_freevars), + cellvars=unwrap_list_of_texts(space, w_cellvars)) @cpython_api([CONST_STRING, CONST_STRING, rffi.INT_real], PyCodeObject) def PyCode_NewEmpty(space, filename, funcname, firstlineno): diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -170,7 +170,7 @@ except ValueError: w_s = w_obj.descr_tobytes(space) view.c_obj = make_ref(space, w_s) - view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.text_w(w_s), track_allocation=False)) rffi.setintfield(view, 'c_readonly', 1) isstr = True return view diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -173,7 +173,7 @@ 0, NULL); except OperationError as e: if e.match(space, space.w_NotImplementedError): - errstr = space.str_w(e.get_w_value(space)) + errstr = space.text_w(e.get_w_value(space)) raise oefmt(space.w_NotImplementedError, "_PyArray_FromObject %s", errstr[16:]) raise diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -471,7 +471,7 @@ w_str = space.repr(w_obj) count = space.len_w(w_str) - data = space.str_w(w_str) + data = space.text_w(w_str) with rffi.scoped_nonmovingbuffer(data) as buf: fwrite(buf, 1, count, fp) return 0 diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -338,7 +338,7 @@ state = space.fromcache(State) operror = state.clear_exception() if operror: - operror.write_unraisable(space, space.str_w(space.repr(w_where))) + operror.write_unraisable(space, space.text_w(space.repr(w_where))) @cpython_api([], lltype.Void) def PyErr_SetInterrupt(space): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -139,7 +139,7 @@ func_target = rffi.cast(getattrfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - name_ptr = rffi.str2charp(space.str_w(args_w[0])) + name_ptr = rffi.str2charp(space.text_w(args_w[0])) try: return generic_cpy_call(space, func_target, w_self, name_ptr) finally: @@ -636,10 +636,11 @@ view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) view.c_obj = make_ref(space, w_obj) except ValueError: - w_s = space.newbytes(buf.as_str()) + s = buf.as_str() + w_s = space.newbytes(s) view.c_obj = make_ref(space, w_s) view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp( - space.str_w(w_s), track_allocation=False)) + s, track_allocation=False)) rffi.setintfield(view, 'c_readonly', 1) ret = fill_Py_buffer(space, buf, view) return ret diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -100,7 +100,7 @@ argv = space.sys.get('argv') if space.len_w(argv): argv0 = space.getitem(argv, space.newint(0)) - progname = space.str_w(argv0) + progname = space.text_w(argv0) else: progname = "pypy" self.programname = rffi.str2charp(progname) @@ -111,7 +111,7 @@ if not self.version: space = self.space w_version = space.sys.get('version') - version = space.str_w(w_version) + version = space.text_w(w_version) self.version = rffi.str2charp(version) lltype.render_immortal(self.version) return self.version diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -115,7 +115,7 @@ return 0 if member_type == T_CHAR: - str_value = space.str_w(w_value) + str_value = space.text_w(w_value) if len(str_value) != 1: raise oefmt(space.w_TypeError, "string of length 1 expected") array = rffi.cast(rffi.CCHARP, addr) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -947,7 +947,7 @@ if not space.isinstance_w(w_name, space.w_str): return None - name = space.str_w(w_name) + name = space.text_w(w_name) w_obj = w_type.lookup(name) # this assumes that w_obj is not dynamically created, but will stay alive # until w_type is modified or dies. Assuming this, we return a borrowed ref diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -502,11 +502,11 @@ @staticmethod def _op_val(space, w_other, strict=None): - if strict and not space.isinstance_w(w_other, space.w_str): + if strict and not space.isinstance_w(w_other, space.w_bytes): raise oefmt(space.w_TypeError, "%s arg must be None, str or unicode", strict) try: - return space.str_w(w_other) + return space.bytes_w(w_other) except OperationError as e: if not e.match(space, space.w_TypeError): raise @@ -569,7 +569,7 @@ # NB. the default value of w_object is really a *wrapped* empty string: # there is gateway magic at work w_obj = space.str(w_object) - if space.is_w(w_stringtype, space.w_str): + if space.is_w(w_stringtype, space.w_bytes): return w_obj # XXX might be reworked when space.str() typechecks value = space.bytes_w(w_obj) w_obj = space.allocate_instance(W_BytesObject, w_stringtype) @@ -596,9 +596,9 @@ return newformat.format_method(space, self, __args__, is_unicode=False) def descr__format__(self, space, w_format_spec): - if not space.isinstance_w(w_format_spec, space.w_str): + if not space.isinstance_w(w_format_spec, space.w_bytes): w_format_spec = space.str(w_format_spec) - spec = space.str_w(w_format_spec) + spec = space.bytes_w(w_format_spec) formatter = newformat.str_formatter(space, spec) return formatter.format_string(self._value) @@ -826,11 +826,11 @@ return self._StringMethods_descr_rpartition(space, w_sub) def _join_return_one(self, space, w_obj): - return (space.is_w(space.type(w_obj), space.w_str) or + return (space.is_w(space.type(w_obj), space.w_bytes) or space.is_w(space.type(w_obj), space.w_unicode)) def _join_check_item(self, space, w_obj): - if space.isinstance_w(w_obj, space.w_str): + if space.isinstance_w(w_obj, space.w_bytes): return 0 if space.isinstance_w(w_obj, space.w_unicode): return 2 @@ -851,12 +851,12 @@ def descr_formatter_parser(self, space): from pypy.objspace.std.newformat import str_template_formatter - tformat = str_template_formatter(space, space.str_w(self)) + tformat = str_template_formatter(space, space.bytes_w(self)) return tformat.formatter_parser() def descr_formatter_field_name_split(self, space): from pypy.objspace.std.newformat import str_template_formatter - tformat = str_template_formatter(space, space.str_w(self)) + tformat = str_template_formatter(space, space.bytes_w(self)) return tformat.formatter_field_name_split() diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -352,7 +352,7 @@ if recursive: spec = self._build_string(spec_start, end, level) w_rendered = self.space.format(w_obj, self.wrap(spec)) - unwrapper = "unicode_w" if self.is_unicode else "str_w" + unwrapper = "unicode_w" if self.is_unicode else "bytes_w" to_interp = getattr(self.space, unwrapper) return to_interp(w_rendered) @@ -382,7 +382,7 @@ space.unicode_w(w_string)) return space.newunicode(template.build(args)) else: - template = str_template_formatter(space, space.str_w(w_string)) + template = str_template_formatter(space, space.bytes_w(w_string)) return space.newbytes(template.build(args)) @@ -1175,5 +1175,5 @@ formatter = unicode_formatter(space, space.unicode_w(w_format_spec)) return getattr(formatter, meth)(*args) else: - formatter = str_formatter(space, space.str_w(w_format_spec)) + formatter = str_formatter(space, space.bytes_w(w_format_spec)) return getattr(formatter, meth)(*args) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -77,7 +77,7 @@ return space.newint(uid) def str_w(self, space): - return space.str_w(space.str(self)) + return space.text_w(space.str(self)) def unicode_w(self, space): return self._value @@ -450,8 +450,8 @@ def _get_encoding_and_errors(space, w_encoding, w_errors): - encoding = None if w_encoding is None else space.str_w(w_encoding) - errors = None if w_errors is None else space.str_w(w_errors) + encoding = None if w_encoding is None else space.text_w(w_encoding) + errors = None if w_errors is None else space.text_w(w_errors) return encoding, errors @@ -569,7 +569,7 @@ encoding = getdefaultencoding(space) if encoding != 'ascii': return unicode_from_encoded_object(space, w_str, encoding, "strict") - s = space.str_w(w_str) + s = space.bytes_w(w_str) try: return W_UnicodeObject(s.decode("ascii")) except UnicodeDecodeError: From pypy.commits at gmail.com Thu Dec 1 09:06:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 01 Dec 2016 06:06:22 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: make sure that hacked_filename is a str Message-ID: <58402e5e.6737c20a.352bb.236c@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88791:2c4d7a3d4373 Date: 2016-12-01 13:46 +0100 http://bitbucket.org/pypy/pypy/changeset/2c4d7a3d4373/ Log: make sure that hacked_filename is a str diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py --- a/pypy/interpreter/interactive.py +++ b/pypy/interpreter/interactive.py @@ -169,12 +169,12 @@ def runsource(self, source, ignored_filename="", symbol="single"): # the following hacked file name is recognized specially by error.py - hacked_filename = '\n' + source compiler = self.space.getexecutioncontext().compiler # CPython 2.6 turns console input into unicode if isinstance(source, unicode): source = source.encode(sys.stdin.encoding) + hacked_filename = '\n' + source def doit(): # compile the provided input From pypy.commits at gmail.com Thu Dec 1 09:06:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 01 Dec 2016 06:06:24 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: str_w in micronumpy Message-ID: <58402e60.12ad1c0a.84e10.64bc@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88792:00b54f1e2516 Date: 2016-12-01 15:05 +0100 http://bitbucket.org/pypy/pypy/changeset/00b54f1e2516/ Log: str_w in micronumpy diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -403,7 +403,7 @@ Supports the standard %s and %d formats, plus the following: %N - The result of w_arg.getname(space) - %R - The result of space.str_w(space.repr(w_arg)) + %R - The result of space.text_w(space.repr(w_arg)) %T - The result of space.type(w_arg).name """ diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -556,7 +556,7 @@ class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): if space.isinstance_w(w_item, space.w_basestring): - item = space.str_w(w_item) + item = space.text_w(w_item) elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: @@ -587,7 +587,7 @@ def descr_setitem(self, space, w_item, w_value): if space.isinstance_w(w_item, space.w_basestring): - item = space.str_w(w_item) + item = space.text_w(w_item) elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: @@ -622,7 +622,7 @@ class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): from pypy.module.micronumpy.descriptor import new_string_dtype - arg = space.str_w(space.str(w_arg)) + arg = space.text_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): arr.storage[i] = arg[i] diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -76,6 +76,7 @@ w_tuple = W_TypeObject('tuple') w_slice = W_TypeObject("slice") w_str = W_TypeObject("str") + w_bytes = w_str w_unicode = W_TypeObject("unicode") w_complex = W_TypeObject("complex") w_dict = W_TypeObject("dict") diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -25,8 +25,8 @@ def clipmode_converter(space, w_mode): if space.is_none(w_mode): return NPY.RAISE - if space.isinstance_w(w_mode, space.w_str): - mode = space.str_w(w_mode) + if space.isinstance_w(w_mode, space.w_text): + mode = space.text_w(w_mode) if mode.startswith('C') or mode.startswith('c'): return NPY.CLIP if mode.startswith('W') or mode.startswith('w'): @@ -42,7 +42,7 @@ def searchside_converter(space, w_obj): try: - s = space.str_w(w_obj) + s = space.text_w(w_obj) except OperationError: s = None if not s: @@ -66,7 +66,7 @@ else: return NPY.CORDER else: - order = space.str_w(w_order) + order = space.text_w(w_order) if order.startswith('C') or order.startswith('c'): return NPY.CORDER elif order.startswith('F') or order.startswith('f'): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -20,11 +20,11 @@ "argument 1 must be numpy.dtype, not %T", w_dtype) if w_dtype.elsize == 0: raise oefmt(space.w_TypeError, "Empty data-type") - if not space.isinstance_w(w_state, space.w_str): + if not space.isinstance_w(w_state, space.w_bytes): # py3 accepts unicode here too raise oefmt(space.w_TypeError, "initializing object must be a string") if space.len_w(w_state) != w_dtype.elsize: raise oefmt(space.w_ValueError, "initialization string is too small") - state = rffi.str2charp(space.str_w(w_state)) + state = rffi.str2charp(space.text_w(w_state)) box = w_dtype.itemtype.box_raw_data(state) lltype.free(state, flavor="raw") return box @@ -212,7 +212,7 @@ if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) if w_array is None: - if ( not space.isinstance_w(w_object, space.w_str) and + if ( not space.isinstance_w(w_object, space.w_bytes) and not space.isinstance_w(w_object, space.w_unicode) and not isinstance(w_object, W_GenericBox)): # use buffer interface @@ -323,7 +323,7 @@ return _find_shape_and_elems(space, w_iterable, is_rec_type) def is_scalar_like(space, w_obj, dtype): - isstr = space.isinstance_w(w_obj, space.w_str) + isstr = space.isinstance_w(w_obj, space.w_bytes) if not support.issequence_w(space, w_obj) or isstr: if dtype is None or dtype.char != NPY.CHARLTR: return True diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -40,7 +40,7 @@ return out def byteorder_w(space, w_str): - order = space.str_w(w_str) + order = space.text_w(w_str) if len(order) != 1: raise oefmt(space.w_ValueError, "endian is not 1-char string in Numpy dtype unpickling") @@ -275,13 +275,13 @@ for name, title in self.names: offset, subdtype = self.fields[name] if subdtype.is_record(): - substr = [space.str_w(space.str(subdtype.descr_get_descr( + substr = [space.text_w(space.str(subdtype.descr_get_descr( space, style='descr_subdtype'))), ","] elif subdtype.subdtype is not None: - substr = ["(", space.str_w(space.str( + substr = ["(", space.text_w(space.str( subdtype.subdtype.descr_get_descr(space, style='descr_subdtype'))), ', ', - space.str_w(space.repr(space.newtuple([space.newint(s) for s in subdtype.shape]))), + space.text_w(space.repr(space.newtuple([space.newint(s) for s in subdtype.shape]))), "),"] else: substr = ["'", subdtype.get_str(ignore=''), "',"] @@ -351,7 +351,7 @@ subdescr.append(subdtype.descr_get_shape(space)) descr.append(space.newtuple(subdescr[:])) if self.alignment >= 0 and not style.endswith('subdtype'): - return space.newtext(space.str_w(space.repr(space.newlist(descr))) + ', align=True') + return space.newtext(space.text_w(space.repr(space.newlist(descr))) + ', align=True') return space.newlist(descr) def descr_get_hasobject(self, space): @@ -418,7 +418,7 @@ raise oefmt(space.w_ValueError, "item #%d of names is of type %T and not string", len(names), w_name) - names.append((space.str_w(w_name), title)) + names.append((space.text_w(w_name), title)) fields = {} for i in range(len(self.names)): if names[i][0] in fields: @@ -523,7 +523,7 @@ def descr_str(self, space): if self.fields: r = self.descr_get_descr(space, style='str') - name = space.str_w(space.str(self.w_box_type)) + name = space.text_w(space.str(self.w_box_type)) if name != "": boxname = space.str(self.w_box_type) r = space.newtuple([self.w_box_type, r]) @@ -543,7 +543,7 @@ return space.newtext("dtype('S1')") if self.fields: r = self.descr_get_descr(space, style='repr') - name = space.str_w(space.str(self.w_box_type)) + name = space.text_w(space.str(self.w_box_type)) if name != "": r = space.newtuple([self.w_box_type, r]) elif self.subdtype is not None: @@ -562,15 +562,15 @@ else: r = self.descr_get_name(space, quote=True) if space.isinstance_w(r, space.w_str): - return space.newtext("dtype(%s)" % space.str_w(r)) - return space.newtext("dtype(%s)" % space.str_w(space.repr(r))) + return space.newtext("dtype(%s)" % space.text_w(r)) + return space.newtext("dtype(%s)" % space.text_w(space.repr(r))) def descr_getitem(self, space, w_item): if not self.fields: raise oefmt(space.w_KeyError, "There are no fields in dtype %s.", self.get_name()) if space.isinstance_w(w_item, space.w_basestring): - item = space.str_w(w_item) + item = space.text_w(w_item) elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: @@ -749,15 +749,15 @@ w_flddesc, maxalign, w_shape=w_shape) if space.isinstance_w(w_fldname, space.w_tuple): fldlist = space.listview(w_fldname) - fldnames[i] = space.str_w(fldlist[0]) + fldnames[i] = space.text_w(fldlist[0]) if space.is_w(fldlist[1], space.w_None): titles[i] = None else: - titles[i] = space.str_w(fldlist[1]) + titles[i] = space.text_w(fldlist[1]) if len(fldlist) != 2: raise oefmt(space.w_TypeError, "data type not understood") elif space.isinstance_w(w_fldname, space.w_str): - fldnames[i] = space.str_w(w_fldname) + fldnames[i] = space.text_w(w_fldname) else: raise oefmt(space.w_TypeError, "data type not understood") if fldnames[i] == '': @@ -851,7 +851,7 @@ # Only for testing, a shortened version of the real _usefields allfields = [] for fname_w in space.unpackiterable(w_dict): - obj = _get_list_or_none(space, w_dict, space.str_w(fname_w)) + obj = _get_list_or_none(space, w_dict, space.text_w(fname_w)) num = space.int_w(obj[1]) if align: alignment = 0 diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -61,7 +61,7 @@ return space.newint(self.flags) def descr_getitem(self, space, w_item): - key = space.str_w(w_item) + key = space.text_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": return self.descr_c_contiguous(space) if key == "F" or key == "FORTRAN" or key == "F_CONTIGUOUS": diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -250,8 +250,8 @@ def descr_getitem(self, space, w_idx): if self.get_dtype().is_record(): - if space.isinstance_w(w_idx, space.w_str): - idx = space.str_w(w_idx) + if space.isinstance_w(w_idx, space.w_text): + idx = space.text_w(w_idx) return self.getfield(space, idx) if space.is_w(w_idx, space.w_Ellipsis): return self.descr_view(space, space.type(self)) @@ -287,8 +287,8 @@ def descr_setitem(self, space, w_idx, w_value): if self.get_dtype().is_record(): - if space.isinstance_w(w_idx, space.w_str): - idx = space.str_w(w_idx) + if space.isinstance_w(w_idx, space.w_text): + idx = space.text_w(w_idx) view = self.getfield(space, idx) w_value = convert_to_array(space, w_value) view.implementation.setslice(space, w_value) @@ -735,10 +735,9 @@ # XXX Should not happen raise oefmt(space.w_ValueError, "new dtype has elsize of 0") if not can_cast_array(space, self, new_dtype, casting): - raise oefmt(space.w_TypeError, "Cannot cast array from %s to %s" - "according to the rule %s", - space.str_w(self.get_dtype().descr_repr(space)), - space.str_w(new_dtype.descr_repr(space)), casting) + raise oefmt(space.w_TypeError, "Cannot cast array from %R to %R" + "according to the rule %s", self.get_dtype(), + new_dtype, casting) order = order_converter(space, space.newtext(order), self.get_order()) if (not copy and new_dtype == self.get_dtype() and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order()) diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -53,7 +53,7 @@ def parse_op_flag(space, lst): op_flag = OpFlag() for w_item in lst: - item = space.str_w(w_item) + item = space.text_w(w_item) if item == 'readonly': op_flag.rw = 'r' elif item == 'readwrite': @@ -102,12 +102,12 @@ 'Iter global flags must be a list or tuple of strings') lst = space.listview(w_flags) for w_item in lst: - if not space.isinstance_w(w_item, space.w_str) and not \ + if not space.isinstance_w(w_item, space.w_bytes) and not \ space.isinstance_w(w_item, space.w_unicode): raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", w_item) - item = space.str_w(w_item) + item = space.text_w(w_item) if item == 'external_loop': nditer.external_loop = True elif item == 'buffered': @@ -365,7 +365,7 @@ self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): - self.casting = space.str_w(w_casting) + self.casting = space.text_w(w_casting) else: self.casting = 'safe' # convert w_seq operands to a list of W_NDimArray @@ -483,11 +483,9 @@ if not can_cast_array( space, self.seq[i], self_d, self.casting): raise oefmt(space.w_TypeError, "Iterator operand %d" - " dtype could not be cast from %s to %s" - " according to the rule '%s'", i, - space.str_w(seq_d.descr_repr(space)), - space.str_w(self_d.descr_repr(space)), - self.casting) + " dtype could not be cast from %R to %R" + " according to the rule '%s'", + i, seq_d, self_d, self.casting) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) @@ -501,11 +499,9 @@ space, self_d, seq_d, self.casting): raise oefmt(space.w_TypeError, "Iterator" " requested dtype could not be cast from " - " %s to %s, the operand %d dtype, accord" - "ing to the rule '%s'", - space.str_w(self_d.descr_repr(space)), - space.str_w(seq_d.descr_repr(space)), - i, self.casting) + " %R to %R, the operand %d dtype, accord" + "ing to the rule '%s'", + self_d, seq_d, i, self.casting) elif self.buffered and not (self.external_loop and len(self.seq)<2): for i in range(len(self.seq)): if i not in outargs: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -205,7 +205,7 @@ if space.is_none(w_docstring): doc = None else: - doc = space.str_w(w_docstring) + doc = space.text_w(w_docstring) w_obj.doc = doc return app_set_docstring(space, w_obj, w_docstring) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1929,12 +1929,12 @@ def str_format(self, box, add_quotes=True): if not add_quotes: - as_str = self.space.str_w(self.space.repr(self.unbox(box))) + as_str = self.space.text_w(self.space.repr(self.unbox(box))) as_strl = len(as_str) - 1 if as_strl>1 and as_str[0] == "'" and as_str[as_strl] == "'": as_str = as_str[1:as_strl] return as_str - return self.space.str_w(self.space.repr(self.unbox(box))) + return self.space.text_w(self.space.repr(self.unbox(box))) def runpack_str(self, space, s, native): raise oefmt(space.w_NotImplementedError, @@ -2165,7 +2165,7 @@ return w_item if w_item is None: w_item = space.newbytes('') - arg = space.str_w(space.str(w_item)) + arg = space.text_w(space.str(w_item)) arr = VoidBoxStorage(dtype.elsize, dtype) with arr as storage: j = min(len(arg), dtype.elsize) @@ -2314,7 +2314,7 @@ assert isinstance(item, boxes.W_UnicodeBox) if add_quotes: w_unicode = self.to_builtin_type(self.space, item) - return self.space.str_w(self.space.repr(w_unicode)) + return self.space.text_w(self.space.repr(w_unicode)) else: # Same as W_UnicodeBox.descr_repr() but without quotes and prefix from rpython.rlib.runicode import unicode_encode_unicode_escape diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -153,7 +153,7 @@ if w_casting is None: casting = 'unsafe' else: - casting = space.str_w(w_casting) + casting = space.text_w(w_casting) retval = self.call(space, args_w, sig, casting, extobj) keepalive_until_here(args_w) return retval @@ -983,7 +983,7 @@ if sig: raise oefmt(space.w_RuntimeError, "cannot specify both 'sig' and 'dtype'") - sig = space.str_w(kwargs_w[kw]) + sig = space.text_w(kwargs_w[kw]) parsed_kw.append(kw) elif kw.startswith('where'): raise oefmt(space.w_NotImplementedError, @@ -1504,7 +1504,7 @@ elif (space.isinstance_w(w_dtypes, space.w_tuple) or space.isinstance_w(w_dtypes, space.w_list)): _dtypes = space.listview(w_dtypes) - if space.isinstance_w(_dtypes[0], space.w_str) and space.str_w(_dtypes[0]) == 'match': + if space.isinstance_w(_dtypes[0], space.w_text) and space.text_w(_dtypes[0]) == 'match': dtypes = [] match_dtypes = True else: From pypy.commits at gmail.com Thu Dec 1 10:55:02 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 07:55:02 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Mark more tests based on gc.is_tracked() as impl detail Message-ID: <584047d6.46bb1c0a.221f4.f39a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88793:cd880034bb7c Date: 2016-12-01 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/cd880034bb7c/ Log: Mark more tests based on gc.is_tracked() as impl detail diff --git a/lib-python/3/test/test_finalization.py b/lib-python/3/test/test_finalization.py --- a/lib-python/3/test/test_finalization.py +++ b/lib-python/3/test/test_finalization.py @@ -181,7 +181,8 @@ def test_non_gc(self): with SimpleBase.test(): s = NonGC() - self.assertFalse(gc.is_tracked(s)) + if support.check_impl_detail(): + self.assertFalse(gc.is_tracked(s)) ids = [id(s)] del s gc.collect() @@ -194,7 +195,8 @@ def test_non_gc_resurrect(self): with SimpleBase.test(): s = NonGCResurrector() - self.assertFalse(gc.is_tracked(s)) + if support.check_impl_detail(): + self.assertFalse(gc.is_tracked(s)) ids = [id(s)] del s gc.collect() diff --git a/lib-python/3/test/test_gc.py b/lib-python/3/test/test_gc.py --- a/lib-python/3/test/test_gc.py +++ b/lib-python/3/test/test_gc.py @@ -526,6 +526,7 @@ self.assertEqual(gc.get_referents(1, 'a', 4j), []) + @cpython_only def test_is_tracked(self): # Atomic built-in types are not tracked, user-defined objects and # mutable containers are. From pypy.commits at gmail.com Thu Dec 1 11:23:03 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 08:23:03 -0800 (PST) Subject: [pypy-commit] pypy default: Tweak gc.collect() when gc.disable() was called, see comment Message-ID: <58404e67.e626c20a.a1dc0.6292@mx.google.com> Author: Armin Rigo Branch: Changeset: r88794:2aa7dea5ad0f Date: 2016-12-01 17:21 +0100 http://bitbucket.org/pypy/pypy/changeset/2aa7dea5ad0f/ Log: Tweak gc.collect() when gc.disable() was called, see comment diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -14,7 +14,19 @@ cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() + rgc.collect() + + # if we are running in gc.disable() mode but gc.collect() is called, + # we should still call the finalizers now. We do this as an attempt + # to get closer to CPython's behavior: in Py3.5 some tests + # specifically rely on that. This is similar to how, in CPython, an + # explicit gc.collect() will invoke finalizers from cycles and fully + # ignore the gc.disable() mode. + if not space.user_del_action.enabled_at_app_level: + enable_finalizers(space) + disable_finalizers(space) + return space.wrap(0) def enable(space): From pypy.commits at gmail.com Thu Dec 1 11:23:05 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 08:23:05 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <58404e69.624fc20a.7231f.5edf@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88795:eb1bdeb6f204 Date: 2016-12-01 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/eb1bdeb6f204/ Log: hg merge default diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -14,7 +14,19 @@ cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() + rgc.collect() + + # if we are running in gc.disable() mode but gc.collect() is called, + # we should still call the finalizers now. We do this as an attempt + # to get closer to CPython's behavior: in Py3.5 some tests + # specifically rely on that. This is similar to how, in CPython, an + # explicit gc.collect() will invoke finalizers from cycles and fully + # ignore the gc.disable() mode. + if not space.user_del_action.enabled_at_app_level: + enable_finalizers(space) + disable_finalizers(space) + return space.wrap(0) def enable(space): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -518,12 +518,10 @@ def test_prod(self): result = self.run("prod") assert int(result) == 576 - self.check_vectorized(1, 1) def test_prod_zero(self): result = self.run("prod_zero") assert int(result) == 0 - self.check_vectorized(1, 1) def define_max(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -77,7 +77,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), ('sum','float', 2581, 2581, 1), - ('prod','float', 1, 3178, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1128,7 +1128,7 @@ value = sum(value) elif info.accum_operation == '*': def prod(acc, x): return acc * x - value = reduce(prod, value, 1) + value = reduce(prod, value, 1.0) else: raise NotImplementedError("accum operator in fail guard") values[i] = value diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -980,7 +980,6 @@ class AccumPack(Pack): SUPPORTED = { rop.FLOAT_ADD: '+', rop.INT_ADD: '+', - rop.FLOAT_MUL: '*', } def __init__(self, nodes, operator, position): diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -847,6 +847,10 @@ vecop, count) oplist.append(vecop) elif pack.reduce_init() == 1: + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied. giving an + # inprecision result, thus this is disabled now + raise NotImplementedError # multiply is only supported by floats vecop = OpHelpers.create_vec_expand(ConstFloat(1.0), bytesize, signed, count) diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -414,7 +414,9 @@ lambda a,b: lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed) small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False, allow_infinity=False) test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE) - test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied losing precision + # test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) def test_constant_expand(self): From pypy.commits at gmail.com Thu Dec 1 11:37:56 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 08:37:56 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Allow cffi compilation and extension-finding to work even if cpyext is disabled Message-ID: <584051e4.624fc20a.7231f.6644@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88796:bdbd6c72e191 Date: 2016-12-01 16:48 +0000 http://bitbucket.org/pypy/pypy/changeset/bdbd6c72e191/ Log: Allow cffi compilation and extension-finding to work even if cpyext is disabled diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -9,7 +9,7 @@ def extension_suffixes(space): suffixes_w = [] - if space.config.objspace.usemodules.cpyext: + if 1: #if space.config.objspace.usemodules.cpyext: suffixes_w.append(space.wrap(importing.get_so_extension(space))) return space.newlist(suffixes_w) From pypy.commits at gmail.com Thu Dec 1 12:01:54 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 09:01:54 -0800 (PST) Subject: [pypy-commit] pypy default: deduplicate test defined twice, change check Message-ID: <58405782.c5371c0a.66192.b1b4@mx.google.com> Author: Richard Plangger Branch: Changeset: r88799:d6e2601a07f1 Date: 2016-12-01 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/d6e2601a07f1/ Log: deduplicate test defined twice, change check diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -374,17 +374,7 @@ def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_vectorized(1, 1) - - def define_sum(): - return """ - a = |30| - sum(a) - """ - def test_sum(self): - result = self.run("sum") - assert result == sum(range(30)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_int(): return """ From pypy.commits at gmail.com Thu Dec 1 12:01:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 09:01:50 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: failing test for memoryview slicing case Message-ID: <5840577e.212dc20a.b3dfa.7764@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88797:1d009c35c573 Date: 2016-12-01 15:33 +0100 http://bitbucket.org/pypy/pypy/changeset/1d009c35c573/ Log: failing test for memoryview slicing case diff --git a/lib-python/3/http/client.py b/lib-python/3/http/client.py --- a/lib-python/3/http/client.py +++ b/lib-python/3/http/client.py @@ -541,6 +541,7 @@ try: while True: chunk_left = self._get_chunk_left() + print("chunk_left", chunk_left) if chunk_left is None: break value.append(self._safe_read(chunk_left)) @@ -590,6 +591,7 @@ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) + print("read chunk %d %d", len(chunk), min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(b''.join(s), amt) s.append(chunk) diff --git a/lib-python/3/socket.py b/lib-python/3/socket.py --- a/lib-python/3/socket.py +++ b/lib-python/3/socket.py @@ -572,6 +572,7 @@ raise OSError("cannot read from timed out object") while True: try: + import pdb; pdb.set_trace() return self._sock.recv_into(b) except timeout: self._timeout_occurred = True diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -409,3 +409,10 @@ v = view.cast('h', shape=(3,2)) assert v.tolist() == [[2,3],[4,5],[6,7]] raises(TypeError, "view.cast('h', shape=(3,3))") + + def test_reversed(self): + bytes = b"\x01\x00\x02\x00\x03\x00" + view = memoryview(bytes) + revlist = list(reversed(view.tolist())) + assert list(reversed(view)) == revlist + assert list(reversed(view)) == view[::-1].tolist() From pypy.commits at gmail.com Thu Dec 1 12:01:52 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 09:01:52 -0800 (PST) Subject: [pypy-commit] pypy default: disable sum accumulation for floats as well (pointed out by armin, thanks) Message-ID: <58405780.624fc20a.7231f.7290@mx.google.com> Author: Richard Plangger Branch: Changeset: r88798:5055d03e5f24 Date: 2016-12-01 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/5055d03e5f24/ Log: disable sum accumulation for floats as well (pointed out by armin, thanks) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -978,9 +978,7 @@ self.right is other.right class AccumPack(Pack): - SUPPORTED = { rop.FLOAT_ADD: '+', - rop.INT_ADD: '+', - } + SUPPORTED = { rop.INT_ADD: '+', } def __init__(self, nodes, operator, position): Pack.__init__(self, nodes) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py @@ -197,7 +197,7 @@ f13 = float_add(f12, f11) """) savings = self.savings(loop1) - assert savings == 2 + assert savings == -2 @py.test.mark.parametrize("bytes,s", [(4,0),(8,0)]) def test_sum_float_to_int(self, bytes, s): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1162,32 +1162,32 @@ vopt = self.vectorize(loop,1) self.assert_equal(loop, self.parse_loop(opt)) - def test_accumulate_basic(self): - trace = """ - [p0, i0, f0] - f1 = raw_load_f(p0, i0, descr=floatarraydescr) - f2 = float_add(f0, f1) - i1 = int_add(i0, 8) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, f2] - jump(p0, i1, f2) - """ - trace_opt = """ - [p0, i0, f0] - v6[0xf64] = vec_f() - v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) - v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) - label(p0, i0, v2[2xf64]) - i1 = int_add(i0, 16) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, v2[2xf64]] - v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) - v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) - jump(p0, i1, v3[2xf64]) - """ - loop = self.parse_loop(trace) - opt = self.vectorize(loop) - self.assert_equal(loop, self.parse_loop(trace_opt)) + #def test_accumulate_basic(self): + # trace = """ + # [p0, i0, f0] + # f1 = raw_load_f(p0, i0, descr=floatarraydescr) + # f2 = float_add(f0, f1) + # i1 = int_add(i0, 8) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, f2] + # jump(p0, i1, f2) + # """ + # trace_opt = """ + # [p0, i0, f0] + # v6[0xf64] = vec_f() + # v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) + # v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) + # label(p0, i0, v2[2xf64]) + # i1 = int_add(i0, 16) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, v2[2xf64]] + # v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) + # v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) + # jump(p0, i1, v3[2xf64]) + # """ + # loop = self.parse_loop(trace) + # opt = self.vectorize(loop) + # self.assert_equal(loop, self.parse_loop(trace_opt)) def test_element_f45_in_guard_failargs(self): trace = self.parse_loop(""" diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -842,7 +842,8 @@ oplist.append(vecop) opnum = rop.VEC_INT_XOR if datatype == FLOAT: - opnum = rop.VEC_FLOAT_XOR + # see PRECISION loss below + raise NotImplementedError vecop = VecOperation(opnum, [vecop, vecop], vecop, count) oplist.append(vecop) From pypy.commits at gmail.com Thu Dec 1 12:01:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 01 Dec 2016 09:01:56 -0800 (PST) Subject: [pypy-commit] pypy default: change tests to match the new behaviour Message-ID: <58405784.8b9a1c0a.d5a72.b373@mx.google.com> Author: Richard Plangger Branch: Changeset: r88800:50e5ff8f14e9 Date: 2016-12-01 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/50e5ff8f14e9/ Log: change tests to match the new behaviour diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -398,7 +398,7 @@ def test_sum_multi(self): result = self.run("sum_multi") assert result == sum(range(30)) + sum(range(60)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_float_to_int16(): return """ @@ -480,7 +480,7 @@ assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(2, 1) + self.check_vectorized(2, 0) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -491,7 +491,7 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(3, 1) + self.check_vectorized(3, 0) def define_prod(): return """ From pypy.commits at gmail.com Thu Dec 1 12:04:14 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 01 Dec 2016 09:04:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5: make sure that fromfile does not emit a warning Message-ID: <5840580e.52301c0a.d7d50.b668@mx.google.com> Author: Carl Friedrich Bolz Branch: py3.5 Changeset: r88801:b93629909a8f Date: 2016-12-01 18:02 +0100 http://bitbucket.org/pypy/pypy/changeset/b93629909a8f/ Log: make sure that fromfile does not emit a warning diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -185,7 +185,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.str_w(w_value) + value = space.bytes_w(w_value) if start + len(value) != stop: raise oefmt(space.w_ValueError, "cannot resize array") ll_buffer = self.ll_buffer diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -308,7 +308,7 @@ """ fromfile(f, n) Read n objects from the file object f and append them to the end of the - array. Also called as read. + array. """ try: size = ovfcheck(self.itemsize * n) @@ -323,7 +323,7 @@ item = item[0:elems] self._frombytes(space, item) raise oefmt(space.w_EOFError, "not enough items in file") - self.descr_fromstring(space, w_item) + self._frombytes(space, item) def descr_tofile(self, space, w_f): """ tofile(f) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -227,6 +227,19 @@ raises(EOFError, a.fromfile, myfile(b'\x01', 2 + i), 2) assert len(a) == 1 and a[0] == 257 + def test_fromfile_no_warning(self): + import warnings + # check that fromfile defers to frombytes, not fromstring + class FakeF(object): + def read(self, n): + return b"a" * n + a = self.array('b') + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + a.fromfile(FakeF(), 4) + assert len(w) == 0 + def test_fromlist(self): a = self.array('b') raises(OverflowError, a.fromlist, [1, 2, 400]) From pypy.commits at gmail.com Thu Dec 1 16:37:37 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:37 -0800 (PST) Subject: [pypy-commit] pypy py3.5: kill duplicate function (there is another one below) Message-ID: <58409821.54b31c0a.92145.1c3c@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88802:9bed0c6cb3d1 Date: 2016-12-01 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9bed0c6cb3d1/ Log: kill duplicate function (there is another one below) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -83,10 +83,6 @@ raise OperationError(get_error(space), space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) -def clearcache(space): - "Clear the internal cache." - # No cache in this implementation - @unwrap_spec(format=str) def unpack(space, format, w_str): From pypy.commits at gmail.com Thu Dec 1 16:37:33 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:33 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5840981d.54161c0a.94711.2112@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r826:43cab4e5c9a3 Date: 2016-12-01 22:37 +0100 http://bitbucket.org/pypy/pypy.org/changeset/43cab4e5c9a3/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66339 of $105000 (63.2%) + $66348 of $105000 (63.2%)
@@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Thu Dec 1 16:37:40 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:40 -0800 (PST) Subject: [pypy-commit] pypy py3.5: small fixes in struct.iter_unpack() Message-ID: <58409824.876ec20a.35ac6.df95@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88804:cca0cbd9a9d6 Date: 2016-12-01 18:19 +0100 http://bitbucket.org/pypy/pypy/changeset/cca0cbd9a9d6/ Log: small fixes in struct.iter_unpack() diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -105,7 +105,12 @@ class W_UnpackIter(W_Root): - def __init__(self, w_struct, buf): + def __init__(self, space, w_struct, w_buffer): + buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) + if buf.getlength() % w_struct.size != 0: + raise oefmt(get_error(space), + "iterative unpacking requires a bytes length multiple of %d", + w_struct.size) self.w_struct = w_struct self.buf = buf self.index = 0 @@ -159,8 +164,7 @@ return unpack_from(space, jit.promote_string(self.format), w_buffer, offset) def descr_iter_unpack(self, space, w_buffer): - buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) - return W_UnpackIter(self, buf) + return W_UnpackIter(space, self, w_buffer) W_Struct.typedef = TypeDef("Struct", __new__=interp2app(W_Struct.descr__new__.im_func), @@ -174,15 +178,7 @@ iter_unpack=interp2app(W_Struct.descr_iter_unpack), ) - at unwrap_spec(w_struct=W_Struct) -def new_unpackiter(space, w_subtype, w_struct, w_buffer): - buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) - w_res = space.allocate_instance(W_UnpackIter, w_subtype) - w_res.__init__(w_struct, buf) - return w_res - W_UnpackIter.typedef = TypeDef("unpack_iterator", - __new__=interp2app(new_unpackiter), __iter__=interp2app(W_UnpackIter.descr_iter), __next__=interp2app(W_UnpackIter.descr_next), __length_hint__=interp2app(W_UnpackIter.descr_length_hint) @@ -191,8 +187,7 @@ @unwrap_spec(format=str) def iter_unpack(space, format, w_buffer): w_struct = W_Struct(space, format) - buf = space.buffer_w(w_buffer, space.BUF_SIMPLE) - return W_UnpackIter(w_struct, buf) + return W_UnpackIter(space, w_struct, w_buffer) def clearcache(space): """No-op on PyPy""" diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -403,6 +403,25 @@ assert list(it) == [(0, 0), (0, 0)] it = self.struct.iter_unpack('ii', b) assert list(it) == [(0, 0), (0, 0)] + # + it = s.iter_unpack(b) + next(it) + assert it.__length_hint__() == 1 + next(it) + assert it.__length_hint__() == 0 + assert list(it) == [] + assert it.__length_hint__() == 0 + + def test_iter_unpack_bad_length(self): + struct = self.struct + s = struct.Struct('!i') + lst = list(s.iter_unpack(b'1234')) + assert lst == [(0x31323334,)] + lst = list(s.iter_unpack(b'')) + assert lst == [] + raises(struct.error, s.iter_unpack, b'12345') + raises(struct.error, s.iter_unpack, b'123') + raises(struct.error, struct.iter_unpack, 'h', b'12345') def test___float__(self): class MyFloat(object): From pypy.commits at gmail.com Thu Dec 1 16:37:42 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:42 -0800 (PST) Subject: [pypy-commit] pypy py3.5: rename (for test_structseq) Message-ID: <58409826.4f831c0a.130b1.21cc@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88805:be0cbcb1d37c Date: 2016-12-01 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/be0cbcb1d37c/ Log: rename (for test_structseq) diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -19,7 +19,7 @@ class stat_result(metaclass=structseqtype): - name = osname + ".stat_result" + name = "os.stat_result" st_mode = structseqfield(0, "protection bits") st_ino = structseqfield(1, "inode") From pypy.commits at gmail.com Thu Dec 1 16:37:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:38 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix this test: it has two 'mistakes' per line and happens to test that CPython reports a specific one of them before the other, whereas PyPy reports them in opposite order so far. This was probably not intended. Message-ID: <58409822.0e0a1c0a.fda03.2493@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88803:a59665c98dd5 Date: 2016-12-01 17:59 +0100 http://bitbucket.org/pypy/pypy/changeset/a59665c98dd5/ Log: fix this test: it has two 'mistakes' per line and happens to test that CPython reports a specific one of them before the other, whereas PyPy reports them in opposite order so far. This was probably not intended. diff --git a/lib-python/3/test/test_struct.py b/lib-python/3/test/test_struct.py --- a/lib-python/3/test/test_struct.py +++ b/lib-python/3/test/test_struct.py @@ -547,19 +547,19 @@ self.assertRaises(struct.error, struct.unpack_from, '12345', store, 0) # Format lists with trailing count spec should result in an error - self.assertRaises(struct.error, struct.pack, 'c12345', 'x') - self.assertRaises(struct.error, struct.unpack, 'c12345', 'x') + self.assertRaises(struct.error, struct.pack, 'c12345', b'x') + self.assertRaises(struct.error, struct.unpack, 'c12345', b'x') self.assertRaises(struct.error, struct.pack_into, 'c12345', store, 0, - 'x') + b'x') self.assertRaises(struct.error, struct.unpack_from, 'c12345', store, 0) # Mixed format tests - self.assertRaises(struct.error, struct.pack, '14s42', 'spam and eggs') + self.assertRaises(struct.error, struct.pack, '14s42', b'spam and eggs') self.assertRaises(struct.error, struct.unpack, '14s42', - 'spam and eggs') + b'spam and eggs') self.assertRaises(struct.error, struct.pack_into, '14s42', store, 0, - 'spam and eggs') + b'spam and eggs') self.assertRaises(struct.error, struct.unpack_from, '14s42', store, 0) def test_Struct_reinitialization(self): From pypy.commits at gmail.com Thu Dec 1 16:37:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:44 -0800 (PST) Subject: [pypy-commit] pypy py3.5: detail changed in AST between 3.3 and 3.5 Message-ID: <58409828.46831c0a.a820e.8303@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88806:22496395faa8 Date: 2016-12-01 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/22496395faa8/ Log: detail changed in AST between 3.3 and 3.5 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -484,9 +484,6 @@ def visit_Global(self, glob): for name in glob.names: - if isinstance(self.scope, ClassScope) and name == '__class__': - raise SyntaxError("cannot make __class__ global", - glob.lineno, glob.col_offset) old_role = self.scope.lookup_role(name) if old_role & (SYM_USED | SYM_ASSIGNED): if old_role & SYM_ASSIGNED: From pypy.commits at gmail.com Thu Dec 1 16:37:46 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 01 Dec 2016 13:37:46 -0800 (PST) Subject: [pypy-commit] pypy py3.5: merge heads Message-ID: <5840982a.46bb1c0a.1c422.6725@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88807:4e784ab053d6 Date: 2016-12-01 22:36 +0100 http://bitbucket.org/pypy/pypy/changeset/4e784ab053d6/ Log: merge heads diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -185,7 +185,7 @@ def setslice(self, space, w_slice, w_value): start, stop = self.decodeslice(space, w_slice) - value = space.str_w(w_value) + value = space.bytes_w(w_value) if start + len(value) != stop: raise oefmt(space.w_ValueError, "cannot resize array") ll_buffer = self.ll_buffer diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -308,7 +308,7 @@ """ fromfile(f, n) Read n objects from the file object f and append them to the end of the - array. Also called as read. + array. """ try: size = ovfcheck(self.itemsize * n) @@ -323,7 +323,7 @@ item = item[0:elems] self._frombytes(space, item) raise oefmt(space.w_EOFError, "not enough items in file") - self.descr_fromstring(space, w_item) + self._frombytes(space, item) def descr_tofile(self, space, w_f): """ tofile(f) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -227,6 +227,19 @@ raises(EOFError, a.fromfile, myfile(b'\x01', 2 + i), 2) assert len(a) == 1 and a[0] == 257 + def test_fromfile_no_warning(self): + import warnings + # check that fromfile defers to frombytes, not fromstring + class FakeF(object): + def read(self, n): + return b"a" * n + a = self.array('b') + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + a.fromfile(FakeF(), 4) + assert len(w) == 0 + def test_fromlist(self): a = self.array('b') raises(OverflowError, a.fromlist, [1, 2, 400]) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -9,7 +9,7 @@ def extension_suffixes(space): suffixes_w = [] - if space.config.objspace.usemodules.cpyext: + if 1: #if space.config.objspace.usemodules.cpyext: suffixes_w.append(space.wrap(importing.get_so_extension(space))) return space.newlist(suffixes_w) From pypy.commits at gmail.com Fri Dec 2 03:49:12 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 00:49:12 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Some failing tests for the compiler Message-ID: <58413588.8675c20a.108e7.8cb0@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88808:2647ec2b2b73 Date: 2016-12-02 09:48 +0100 http://bitbucket.org/pypy/pypy/changeset/2647ec2b2b73/ Log: Some failing tests for the compiler diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -374,6 +374,44 @@ ex.normalize_exception(space) assert ex.match(space, space.w_SyntaxError) + def test_no_warning_run(self): + space = self.space + w_mod = space.appexec((), '():\n import warnings\n return warnings\n') #sys.getmodule('warnings') + w_filterwarnings = space.getattr(w_mod, space.wrap('filterwarnings')) + filter_arg = Arguments(space, [ space.wrap('error') ], ["module"], + [space.wrap("")]) + for code in [''' +class C: + global __class__ + __class__ = 42 +def testing(): + return __class__ +''', ''' +def testing(): + __class__ = 0 + def f(): + nonlocal __class__ + __class__ = 42 + f() + return __class__ +''', ''' +class Y: + class X: + nonlocal __class__ + __class__ = 42 +def testing(): + return 42 # 'Y.__class__' is *not* set to 42, at least on CPython 3.5.2 +''' + ]: + space.call_args(w_filterwarnings, filter_arg) + pycode = self.compiler.compile(code, '', 'exec', 0) + space.call_method(w_mod, 'resetwarnings') + w_d = space.newdict() + pycode.exec_code(space, w_d, w_d) + w_res = space.call_function( + space.getitem(w_d, space.wrap('testing'))) + assert space.unwrap(w_res) == 42 + def test_firstlineno(self): snippet = str(py.code.Source(r''' def f(): "line 2" From pypy.commits at gmail.com Fri Dec 2 04:14:15 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 02 Dec 2016 01:14:15 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: progress: remove space.w_str completely (replaced by space.w_bytes and Message-ID: <58413b67.031f1c0a.d4af1.a530@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88809:485d75d6a904 Date: 2016-12-02 10:13 +0100 http://bitbucket.org/pypy/pypy/changeset/485d75d6a904/ Log: progress: remove space.w_str completely (replaced by space.w_bytes and space.w_text). mark space.str_w as not_rpython, since many tests still use it diff too long, truncating to 2000 out of 2006 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -14,7 +14,7 @@ def check_string(space, w_obj): - if not (space.isinstance_w(w_obj, space.w_str) or + if not (space.isinstance_w(w_obj, space.w_bytes) or space.isinstance_w(w_obj, space.w_unicode)): raise oefmt(space.w_TypeError, "AST string must be of type str or unicode") diff --git a/pypy/interpreter/astcompiler/misc.py b/pypy/interpreter/astcompiler/misc.py --- a/pypy/interpreter/astcompiler/misc.py +++ b/pypy/interpreter/astcompiler/misc.py @@ -110,9 +110,9 @@ def intern_if_common_string(space, w_const): # only intern identifier-like strings - if not space.is_w(space.type(w_const), space.w_str): + if not space.is_w(space.type(w_const), space.w_text): return w_const - for c in space.str_w(w_const): + for c in space.text_w(w_const): if not (c.isalnum() or c == '_'): return w_const return space.new_interned_w_str(w_const) diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -401,7 +401,7 @@ def check_string(space, w_obj): - if not (space.isinstance_w(w_obj, space.w_str) or + if not (space.isinstance_w(w_obj, space.w_bytes) or space.isinstance_w(w_obj, space.w_unicode)): raise oefmt(space.w_TypeError, "AST string must be of type str or unicode") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -5,7 +5,7 @@ from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id, specialize) + compute_unique_id, specialize, not_rpython) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX @@ -328,8 +328,8 @@ constructed before there is an object space instance. """ return self + @not_rpython def unwrap(self, space): - """NOT_RPYTHON""" # _____ this code is here to support testing only _____ return self @@ -412,8 +412,9 @@ """Base class for the interpreter-level implementations of object spaces. http://pypy.readthedocs.org/en/latest/objspace.html""" + @not_rpython def __init__(self, config=None): - "NOT_RPYTHON: Basic initialization of objects." + "Basic initialization of objects." self.fromcache = InternalSpaceCache(self).getorbuild self.threadlocals = ThreadLocals() # set recursion limit @@ -489,8 +490,9 @@ except AttributeError: return self.__class__.__name__ + @not_rpython def setbuiltinmodule(self, importname): - """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" + """load a lazy pypy/module and put it into sys.modules""" if '.' in importname: fullname = importname importname = fullname.rsplit('.', 1)[1] @@ -548,8 +550,8 @@ self.setitem(w_modules, w_name, w_mod) return w_mod + @not_rpython def get_builtinmodule_to_install(self): - """NOT_RPYTHON""" try: return self._builtinmodule_list except AttributeError: @@ -588,8 +590,9 @@ 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', ], None) + @not_rpython def make_builtins(self): - "NOT_RPYTHON: only for initializing the space." + "only for initializing the space." from pypy.module.exceptions import Module w_name = self.newtext('exceptions') @@ -642,8 +645,8 @@ objects.""" raise NotImplementedError + @not_rpython def export_builtin_exceptions(self): - """NOT_RPYTHON""" w_dic = self.exceptions_module.getdict(self) w_keys = self.call_method(w_dic, "keys") exc_types_w = {} @@ -656,8 +659,8 @@ setattr(self, "w_" + excname, w_exc) return exc_types_w + @not_rpython def install_mixedmodule(self, mixedname, installed_builtin_modules): - """NOT_RPYTHON""" modname = self.setbuiltinmodule(mixedname) if modname: assert modname not in installed_builtin_modules, ( @@ -665,8 +668,9 @@ "app-level module %r" % (modname,)) installed_builtin_modules.append(modname) + @not_rpython def setup_builtin_modules(self): - "NOT_RPYTHON: only for initializing the space." + "only for initializing the space." if self.config.objspace.usemodules.cpyext: from pypy.module.cpyext.state import State self.fromcache(State).build_api(self) @@ -676,8 +680,9 @@ for mod in self.builtin_modules.values(): mod.setup_after_space_initialization() + @not_rpython def initialize(self): - """NOT_RPYTHON: Abstract method that should put some minimal + """Abstract method that should put some minimal content into the w_builtins.""" def getexecutioncontext(self): @@ -833,7 +838,7 @@ def new_interned_w_str(self, w_s): assert isinstance(w_s, W_Root) # and is not None - s = self.str_w(w_s) + s = self.bytes_w(w_s) if not we_are_translated(): assert type(s) is str w_s1 = self.interned_strings.get(s) @@ -1289,14 +1294,16 @@ def exception_issubclass_w(self, w_cls1, w_cls2): return self.issubtype_w(w_cls1, w_cls2) + @not_rpython def new_exception_class(self, *args, **kwargs): - "NOT_RPYTHON; convenience method to create excceptions in modules" + "convenience method to create excceptions in modules" return new_exception_class(self, *args, **kwargs) # end of special support code + @not_rpython def eval(self, expression, w_globals, w_locals, hidden_applevel=False): - "NOT_RPYTHON: For internal debugging." + "For internal debugging." if isinstance(expression, str): compiler = self.createcompiler() expression = compiler.compile(expression, '?', 'eval', 0, @@ -1305,9 +1312,10 @@ raise TypeError('space.eval(): expected a string, code or PyCode object') return expression.exec_code(self, w_globals, w_locals) + @not_rpython def exec_(self, statement, w_globals, w_locals, hidden_applevel=False, filename=None): - "NOT_RPYTHON: For internal debugging." + "For internal debugging." if filename is None: filename = '?' from pypy.interpreter.pycode import PyCode @@ -1523,7 +1531,7 @@ return None code = 's*' if code == 's*': - if self.isinstance_w(w_obj, self.w_str): + if self.isinstance_w(w_obj, self.w_bytes): return w_obj.readbuf_w(self) if self.isinstance_w(w_obj, self.w_unicode): return self.str(w_obj).readbuf_w(self) @@ -1536,7 +1544,7 @@ except BufferInterfaceNotFound: self._getarg_error("string or buffer", w_obj) elif code == 's#': - if self.isinstance_w(w_obj, self.w_str): + if self.isinstance_w(w_obj, self.w_bytes): return w_obj.str_w(self) if self.isinstance_w(w_obj, self.w_unicode): return self.str(w_obj).str_w(self) @@ -1592,13 +1600,18 @@ return buf.as_str() def str_or_None_w(self, w_obj): - return None if self.is_none(w_obj) else self.str_w(w_obj) + # YYY rename + return None if self.is_none(w_obj) else self.bytes_w(w_obj) + def bytes_w(self, w_obj): + return w_obj.str_w(self) + text_w = bytes_w # equivalent to identifier_w on Python3 + + @not_rpython def str_w(self, w_obj): - return w_obj.str_w(self) + # XXX there are still some tests that call it + return self.bytes_w(w_obj) - bytes_w = str_w # the same on Python3 - text_w = str_w # equivalent to identifier_w on Python3 def str0_w(self, w_obj): "Like str_w, but rejects strings with NUL bytes." @@ -1644,10 +1657,11 @@ return w_obj.float_w(self, allow_conversion) def realstr_w(self, w_obj): - # Like str_w, but only works if w_obj is really of type 'str'. - if not self.isinstance_w(w_obj, self.w_str): + # YYY rename + # Like bytes_w, but only works if w_obj is really of type 'str'. + if not self.isinstance_w(w_obj, self.w_bytes): raise oefmt(self.w_TypeError, "argument must be a string") - return self.str_w(w_obj) + return self.bytes_w(w_obj) def unicode_w(self, w_obj): return w_obj.unicode_w(self) @@ -1859,8 +1873,8 @@ class AppExecCache(SpaceCache): + @not_rpython def build(cache, source): - """ NOT_RPYTHON """ space = cache.space # XXX will change once we have our own compiler import py diff --git a/pypy/interpreter/interactive.py b/pypy/interpreter/interactive.py --- a/pypy/interpreter/interactive.py +++ b/pypy/interpreter/interactive.py @@ -152,7 +152,7 @@ if not k.startswith('w_')])) del local['locals'] for w_name in self.space.unpackiterable(self.w_globals): - local['w_' + self.space.str_w(w_name)] = ( + local['w_' + self.space.text_w(w_name)] = ( self.space.getitem(self.w_globals, w_name)) code.interact(banner=banner, local=local) # copy back 'w_' names diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -137,7 +137,7 @@ exitcode = space.int_w(w_exitcode, allow_conversion=False) except OperationError: # not an integer: print it to stderr - msg = space.str_w(space.str(w_exitcode)) + msg = space.text_w(space.str(w_exitcode)) print >> sys.stderr, msg exitcode = 1 raise SystemExit(exitcode) diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -91,7 +91,7 @@ def descr__reduce__(self, space): w_name = space.finditem(self.w_dict, space.newtext('__name__')) if (w_name is None or - not space.isinstance_w(w_name, space.w_str)): + not space.isinstance_w(w_name, space.w_text)): # maybe raise exception here (XXX this path is untested) return space.w_None w_modules = space.sys.get('modules') diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -25,7 +25,7 @@ # helper -def unpack_str_tuple(space,w_str_tuple): +def unpack_text_tuple(space,w_str_tuple): return [space.text_w(w_el) for w_el in space.unpackiterable(w_str_tuple)] @@ -382,14 +382,14 @@ if not space.isinstance_w(w_constants, space.w_tuple): raise oefmt(space.w_TypeError, "Expected tuple for constants") consts_w = space.fixedview(w_constants) - names = unpack_str_tuple(space, w_names) - varnames = unpack_str_tuple(space, w_varnames) + names = unpack_text_tuple(space, w_names) + varnames = unpack_text_tuple(space, w_varnames) if w_freevars is not None: - freevars = unpack_str_tuple(space, w_freevars) + freevars = unpack_text_tuple(space, w_freevars) else: freevars = [] if w_cellvars is not None: - cellvars = unpack_str_tuple(space, w_cellvars) + cellvars = unpack_text_tuple(space, w_cellvars) else: cellvars = [] code = space.allocate_instance(PyCode, w_subtype) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -904,7 +904,7 @@ def unpickle_block(space, w_tup): w_opname, w_handlerposition, w_valuestackdepth = space.unpackiterable(w_tup) - opname = space.str_w(w_opname) + opname = space.text_w(w_opname) handlerposition = space.int_w(w_handlerposition) valuestackdepth = space.int_w(w_valuestackdepth) assert valuestackdepth >= 0 diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -956,10 +956,10 @@ space = self.space if space.isinstance_w(w_2, space.w_tuple): for w_t in space.fixedview(w_2): - if space.isinstance_w(w_t, space.w_str): + if space.isinstance_w(w_t, space.w_bytes): msg = "catching of string exceptions is deprecated" space.warn(space.newtext(msg), space.w_DeprecationWarning) - elif space.isinstance_w(w_2, space.w_str): + elif space.isinstance_w(w_2, space.w_bytes): msg = "catching of string exceptions is deprecated" space.warn(space.newtext(msg), space.w_DeprecationWarning) return space.newbool(space.exception_match(w_1, w_2)) diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -227,7 +227,7 @@ def decode_utf8_recode(space, s, ps, end, recode_encoding): u, ps = decode_utf8(space, s, ps, end) w_v = unicodehelper.encode(space, space.newunicode(u), recode_encoding) - v = space.str_w(w_v) + v = space.bytes_w(w_v) return v, ps def raise_app_valueerror(space, msg): diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -8,7 +8,7 @@ if not space.isinstance_w(w_text, space.w_unicode): raise error.SyntaxError("codec did not return a unicode object") w_recoded = space.call_method(w_text, "encode", space.newtext("utf-8")) - return space.str_w(w_recoded) + return space.bytes_w(w_recoded) def _normalize_encoding(encoding): """returns normalized name for @@ -130,7 +130,7 @@ if e.match(space, space.w_UnicodeDecodeError): e.normalize_exception(space) w_message = space.str(e.get_w_value(space)) - raise error.SyntaxError(space.str_w(w_message)) + raise error.SyntaxError(space.text_w(w_message)) raise flags = compile_info.flags diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -6,7 +6,7 @@ space = self.space w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): - assert space.type(w_ret) == space.w_str + assert space.type(w_ret) == space.w_bytes assert space.str_w(w_ret) == value elif isinstance(value, unicode): assert space.type(w_ret) == space.w_unicode @@ -58,7 +58,7 @@ w_ret = parsestring.parsestr(space, None, repr("hello"), True) assert space.isinstance_w(w_ret, space.w_unicode) w_ret = parsestring.parsestr(space, None, "b'hi'", True) - assert space.isinstance_w(w_ret, space.w_str) + assert space.isinstance_w(w_ret, space.w_bytes) w_ret = parsestring.parsestr(space, None, "r'hi'", True) assert space.isinstance_w(w_ret, space.w_unicode) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -649,7 +649,7 @@ assert meth4.call_args(args) == obj2 # Check method returned from unbound_method.__get__() # --- with an incompatible class - w_meth5 = meth3.descr_method_get(space.wrap('hello'), space.w_str) + w_meth5 = meth3.descr_method_get(space.wrap('hello'), space.w_text) assert space.is_w(w_meth5, w_meth3) # Same thing, with an old-style class w_oldclass = space.call_function( @@ -660,7 +660,7 @@ # Reverse order of old/new styles w_meth7 = descr_function_get(space, func, space.w_None, w_oldclass) meth7 = space.unwrap(w_meth7) - w_meth8 = meth7.descr_method_get(space.wrap('hello'), space.w_str) + w_meth8 = meth7.descr_method_get(space.wrap('hello'), space.w_text) assert space.is_w(w_meth8, w_meth7) class TestShortcuts(object): diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -551,7 +551,7 @@ space = self.space w = space.wrap def g_run(space, w_type): - assert space.is_w(w_type, space.w_str) + assert space.is_w(w_type, space.w_text) return w(42) app_g_run = gateway.interp2app_temp(g_run, @@ -559,7 +559,7 @@ gateway.W_Root], as_classmethod=True) w_app_g_run = space.wrap(app_g_run) - w_bound = space.get(w_app_g_run, w("hello"), space.w_str) + w_bound = space.get(w_app_g_run, w("hello"), space.w_text) assert space.eq_w(space.call_function(w_bound), w(42)) def test_interp2app_fastcall(self): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -20,9 +20,9 @@ w_result = space.isinstance(w_i, space.w_int) assert space.is_true(w_result) assert space.isinstance_w(w_i, space.w_int) - w_result = space.isinstance(w_i, space.w_str) + w_result = space.isinstance(w_i, space.w_bytes) assert not space.is_true(w_result) - assert not space.isinstance_w(w_i, space.w_str) + assert not space.isinstance_w(w_i, space.w_bytes) def test_newlist(self): w = self.space.wrap diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -36,7 +36,7 @@ 'None' : '(space.w_None)', 'False' : '(space.w_False)', 'True' : '(space.w_True)', - 'bytes' : '(space.w_str)', + 'bytes' : '(space.w_bytes)', 'file' : 'state.get(space).w_file', 'open' : 'state.get(space).w_file', diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -44,7 +44,7 @@ if space.isinstance_w(w_source, space.w_unicode): w_utf_8_source = space.call_method(w_source, "encode", space.newtext("utf-8")) - source = space.str_w(w_utf_8_source) + source = space.bytes_w(w_utf_8_source) # This flag tells the parser to reject any coding cookies it sees. flags |= consts.PyCF_SOURCE_IS_UTF8 else: @@ -69,7 +69,7 @@ are dictionaries, defaulting to the current current globals and locals. If only globals is given, locals defaults to it. """ - if (space.isinstance_w(w_code, space.w_str) or + if (space.isinstance_w(w_code, space.w_bytes) or space.isinstance_w(w_code, space.w_unicode)): w_code = compile(space, space.call_method(w_code, 'lstrip', diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -64,7 +64,7 @@ self.w_dict = w_dict def setname(self, space, w_newname): - if not space.isinstance_w(w_newname, space.w_str): + if not space.isinstance_w(w_newname, space.w_text): raise oefmt(space.w_TypeError, "__name__ must be a string object") self.name = space.text_w(w_newname) @@ -172,7 +172,7 @@ if not e.match(space, space.w_AttributeError): raise return "?" - if space.isinstance_w(w_mod, space.w_str): + if space.isinstance_w(w_mod, space.w_text): return space.text_w(w_mod) return "?" diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -44,7 +44,7 @@ # space.{get,set,del}attr()... # Note that if w_name is already an exact string it must be returned # unmodified (and not e.g. unwrapped-rewrapped). - if not space.is_w(space.type(w_name), space.w_str): + if not space.is_w(space.type(w_name), space.w_text): name = space.text_w(w_name) # typecheck w_name = space.newtext(name) # rewrap as a real string return w_name @@ -223,7 +223,7 @@ table of interned strings whose purpose is to speed up dictionary lookups. Return the string itself or the previously interned string object with the same value.""" - if space.is_w(space.type(w_str), space.w_str): + if space.is_w(space.type(w_str), space.w_bytes): return space.new_interned_w_str(w_str) raise oefmt(space.w_TypeError, "intern() argument must be string.") diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -91,7 +91,7 @@ if space.is_w(w_name, space.w_None): w_name = space.getattr(w_python_callable, space.newtext('__name__')) - name = space.str_w(w_name) + name = space.text_w(w_name) ctx = ffi.ctxobj.ctx index = parse_c_type.search_in_globals(ctx, name) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -98,7 +98,7 @@ def _repr_extra(self): space = self.space - return 'calling ' + space.str_w(space.repr(self.w_callable)) + return 'calling ' + space.text_w(space.repr(self.w_callable)) def write_error_return_value(self, ll_res): error_string = self.error_string diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -232,10 +232,10 @@ from pypy.module._cffi_backend import ctypeprim space = self.space if isinstance(ctitem, ctypeprim.W_CTypePrimitive) and ctitem.size == 1: - if space.isinstance_w(w_value, space.w_str): + if space.isinstance_w(w_value, space.w_bytes): from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw - value = space.str_w(w_value) + value = space.bytes_w(w_value) if len(value) != length: raise oefmt(space.w_ValueError, "need a string of length %d, got %d", @@ -324,7 +324,7 @@ return self._add_or_sub(w_other, -1) def getcfield(self, w_attr): - return self.ctype.getcfield(self.space.str_w(w_attr)) + return self.ctype.getcfield(self.space.text_w(w_attr)) def getattr(self, w_attr): cfield = self.getcfield(w_attr) @@ -419,8 +419,8 @@ with self as ptr: if not ptr: raise oefmt(space.w_RuntimeError, - "cannot use unpack() on %s", - space.str_w(self.repr())) + "cannot use unpack() on %R", + self) w_result = ctype.ctitem.unpack_ptr(ctype, ptr, length) return w_result @@ -565,7 +565,7 @@ def _repr_extra(self): w_repr = self.space.repr(self.w_keepalive) - return "handle to %s" % (self.space.str_w(w_repr),) + return "handle to %s" % (self.space.text_w(w_repr),) class W_CDataFromBuffer(W_CData): diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py --- a/pypy/module/_cffi_backend/cdlopen.py +++ b/pypy/module/_cffi_backend/cdlopen.py @@ -155,7 +155,7 @@ p = rffi.ptradd(p, llmemory.raw_malloc_usage(n * rffi.sizeof(GLOBAL_S))) nintconsts = rffi.cast(rffi.CArrayPtr(CDL_INTCONST_S), p) for i in range(n): - decoder = StringDecoder(ffi, space.str_w(globals_w[i * 2])) + decoder = StringDecoder(ffi, space.bytes_w(globals_w[i * 2])) nglobs[i].c_type_op = decoder.next_opcode() nglobs[i].c_name = decoder.next_name() op = getop(nglobs[i].c_type_op) @@ -185,7 +185,7 @@ # 'desc' is the tuple of strings (desc_struct, desc_field_1, ..) desc = space.fixedview(struct_unions_w[i]) nf1 = len(desc) - 1 - decoder = StringDecoder(ffi, space.str_w(desc[0])) + decoder = StringDecoder(ffi, space.bytes_w(desc[0])) rffi.setintfield(nstructs[i], 'c_type_index', decoder.next_4bytes()) flags = decoder.next_4bytes() rffi.setintfield(nstructs[i], 'c_flags', flags) @@ -202,7 +202,7 @@ rffi.setintfield(nstructs[i], 'c_first_field_index', nf) rffi.setintfield(nstructs[i], 'c_num_fields', nf1) for j in range(nf1): - decoder = StringDecoder(ffi, space.str_w(desc[j + 1])) + decoder = StringDecoder(ffi, space.bytes_w(desc[j + 1])) # this 'decoder' is for one of the other strings beyond # the first one, describing one field each type_op = decoder.next_opcode() @@ -226,7 +226,7 @@ n = len(enums_w) nenums = allocate_array(ffi, ENUM_S, n) for i in range(n): - decoder = StringDecoder(ffi, space.str_w(enums_w[i])) + decoder = StringDecoder(ffi, space.bytes_w(enums_w[i])) rffi.setintfield(nenums[i], 'c_type_index', decoder.next_4bytes()) rffi.setintfield(nenums[i], 'c_type_prim', decoder.next_4bytes()) nenums[i].c_name = decoder.next_name() @@ -241,7 +241,7 @@ n = len(typenames_w) ntypenames = allocate_array(ffi, TYPENAME_S, n) for i in range(n): - decoder = StringDecoder(ffi, space.str_w(typenames_w[i])) + decoder = StringDecoder(ffi, space.bytes_w(typenames_w[i])) rffi.setintfield(ntypenames[i],'c_type_index',decoder.next_4bytes()) ntypenames[i].c_name = decoder.next_name() ffi.ctxobj.ctx.c_typenames = ntypenames diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -175,7 +175,7 @@ def direct_typeoffsetof(self, w_field_or_index, following=0): space = self.space try: - fieldname = space.str_w(w_field_or_index) + fieldname = space.text_w(w_field_or_index) except OperationError as e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -25,14 +25,14 @@ def extra_repr(self, cdata): w_ob = self.convert_to_object(cdata) - return self.space.str_w(self.space.repr(w_ob)) + return self.space.text_w(self.space.repr(w_ob)) def _alignof(self): return self.align def cast_str(self, w_ob): space = self.space - s = space.str_w(w_ob) + s = space.bytes_w(w_ob) if len(s) != 1: raise oefmt(space.w_TypeError, "cannot cast string of length %d to ctype '%s'", @@ -56,7 +56,7 @@ ptr = w_ob.unsafe_escaping_ptr() value = rffi.cast(lltype.Signed, ptr) value = self._cast_result(value) - elif space.isinstance_w(w_ob, space.w_str): + elif space.isinstance_w(w_ob, space.w_bytes): value = self.cast_str(w_ob) value = self._cast_result(value) elif space.isinstance_w(w_ob, space.w_unicode): @@ -76,7 +76,7 @@ def _overflow(self, w_ob): space = self.space - s = space.str_w(space.str(w_ob)) + s = space.text_w(space.str(w_ob)) raise oefmt(space.w_OverflowError, "integer %s does not fit '%s'", s, self.name) @@ -129,7 +129,7 @@ def _convert_to_char(self, w_ob): space = self.space - if space.isinstance_w(w_ob, space.w_str): + if space.isinstance_w(w_ob, space.w_bytes): s = space.bytes_w(w_ob) if len(s) == 1: return s[0] @@ -383,7 +383,7 @@ w_ob.ctype.name, self.name) w_ob = w_ob.convert_to_object() # - if space.isinstance_w(w_ob, space.w_str): + if space.isinstance_w(w_ob, space.w_bytes): value = self.cast_str(w_ob) elif space.isinstance_w(w_ob, space.w_unicode): value = self.cast_unicode(w_ob) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -74,9 +74,9 @@ else: self._convert_array_from_listview(cdata, space.listview(w_ob)) elif self.accept_str: - if not space.isinstance_w(w_ob, space.w_str): + if not space.isinstance_w(w_ob, space.w_bytes): raise self._convert_error("str or list or tuple", w_ob) - s = space.str_w(w_ob) + s = space.bytes_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: raise oefmt(space.w_IndexError, @@ -107,8 +107,8 @@ with cdataobj as ptr: if not ptr: raise oefmt(space.w_RuntimeError, - "cannot use string() on %s", - space.str_w(cdataobj.repr())) + "cannot use string() on %R", + cdataobj) # from pypy.module._cffi_backend import ctypearray length = maxlen @@ -280,9 +280,9 @@ def _prepare_pointer_call_argument(self, w_init, cdata, keepalives, i): space = self.space - if self.accept_str and space.isinstance_w(w_init, space.w_str): + if self.accept_str and space.isinstance_w(w_init, space.w_bytes): # special case to optimize strings passed to a "char *" argument - value = w_init.str_w(space) + value = space.bytes_w(w_init) keepalives[i] = value buf, buf_flag = rffi.get_nonmovingbuffer_final_null(value) rffi.cast(rffi.CCHARPP, cdata)[0] = buf diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -138,7 +138,7 @@ lst_w = space.fixedview(w_ob) for i in range(len(lst_w)): w_key = lst_w[i] - key = space.str_w(w_key) + key = space.text_w(w_key) try: cf = self._fields_dict[key] except KeyError: diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -152,7 +152,7 @@ space = self.space if (accept & ACCEPT_STRING) and ( space.isinstance_w(w_x, space.w_basestring)): - string = space.str_w(w_x) + string = space.text_w(w_x) consider_fn_as_fnptr = (accept & CONSIDER_FN_AS_FNPTR) != 0 if jit.isconstant(string): try: @@ -226,7 +226,7 @@ space = self.space if isinstance(w_arg, W_LibObject) and len(args_w) == 1: # case 3 in the docstring - return w_arg.address_of_func_or_global_var(space.str_w(args_w[0])) + return w_arg.address_of_func_or_global_var(space.text_w(args_w[0])) # w_ctype = self.ffi_type(w_arg, ACCEPT_CDATA) if len(args_w) == 0: diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -137,7 +137,7 @@ def _from_buffer(space, w_ctype, w_x): buf = _fetch_as_read_buffer(space, w_x) - if space.isinstance_w(w_x, space.w_str): + if space.isinstance_w(w_x, space.w_bytes): _cdata = get_raw_address_of_string(space, w_x) else: try: @@ -178,7 +178,7 @@ cache = space.fromcache(RawBytesCache) rawbytes = cache.wdict.get(w_x) if rawbytes is None: - data = space.str_w(w_x) + data = space.bytes_w(w_x) if we_are_translated() and not rgc.can_move(data): lldata = llstr(data) data_start = (llmemory.cast_ptr_to_adr(lldata) + diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py --- a/pypy/module/_cffi_backend/lib_obj.py +++ b/pypy/module/_cffi_backend/lib_obj.py @@ -222,7 +222,7 @@ else: raise oefmt(self.space.w_AttributeError, "cannot write to function or constant '%s'", - self.space.str_w(w_attr)) + self.space.text_w(w_attr)) def descr_delattr(self, w_attr): self._get_attr(w_attr) # for the possible AttributeError diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -309,7 +309,7 @@ field_w = space.fixedview(w_field) if not (2 <= len(field_w) <= 4): raise oefmt(space.w_TypeError, "bad field descr") - fname = space.str_w(field_w[0]) + fname = space.text_w(field_w[0]) ftype = space.interp_w(ctypeobj.W_CType, field_w[1]) fbitsize = -1 foffset = -1 @@ -568,7 +568,7 @@ enumvalues_w = space.fixedview(w_enumvalues) if len(enumerators_w) != len(enumvalues_w): raise oefmt(space.w_ValueError, "tuple args must have the same size") - enumerators = [space.str_w(w) for w in enumerators_w] + enumerators = [space.text_w(w) for w in enumerators_w] # if (not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveSigned) and not isinstance(w_basectype, ctypeprim.W_CTypePrimitiveUnsigned)): diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -176,7 +176,7 @@ raise oefmt(space.w_TypeError, "wrong exception") delta = space.int_w(w_end) - space.int_w(w_start) - if delta < 0 or not (space.isinstance_w(w_obj, space.w_str) or + if delta < 0 or not (space.isinstance_w(w_obj, space.w_bytes) or space.isinstance_w(w_obj, space.w_unicode)): raise oefmt(space.w_TypeError, "wrong exception") @@ -561,7 +561,7 @@ raise return errorchar - if space.isinstance_w(w_ch, space.w_str): + if space.isinstance_w(w_ch, space.w_bytes): # Charmap may return a string return space.bytes_w(w_ch) elif space.isinstance_w(w_ch, space.w_int): diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -38,7 +38,7 @@ if w_src is None: return default try: - return space.str_w(w_src) + return space.text_w(w_src) except OperationError as e: if e.match(space, space.w_TypeError): raise oefmt(space.w_TypeError, '"%s" must be a string', attrname) @@ -49,9 +49,9 @@ return default if space.is_w(w_src, space.w_None): return '\0' - if not space.isinstance_w(w_src, space.w_str): + if not space.isinstance_w(w_src, space.w_text): raise oefmt(space.w_TypeError, '"%s" must be string, not %T', name, w_src) - src = space.str_w(w_src) + src = space.text_w(w_src) if len(src) == 1: return src[0] if len(src) == 0: diff --git a/pypy/module/_csv/interp_writer.py b/pypy/module/_csv/interp_writer.py --- a/pypy/module/_csv/interp_writer.py +++ b/pypy/module/_csv/interp_writer.py @@ -42,9 +42,9 @@ if space.is_w(w_field, space.w_None): field = "" elif space.isinstance_w(w_field, space.w_float): - field = space.str_w(space.repr(w_field)) + field = space.text_w(space.repr(w_field)) else: - field = space.str_w(space.str(w_field)) + field = space.text_w(space.str(w_field)) # if dialect.quoting == QUOTE_NONNUMERIC: try: diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -56,7 +56,7 @@ return if self.space.sys.track_resources: w_repr = self.space.repr(self) - str_repr = self.space.str_w(w_repr) + str_repr = self.space.text_w(w_repr) w_msg = self.space.newtext("WARNING: unclosed file: " + str_repr) self.space.resource_warning(w_msg, self.w_tb) # @@ -493,7 +493,7 @@ if w_name is None: return '?' else: - return space.str_w(space.repr(w_name)) + return space.text_w(space.repr(w_name)) def file_write(self, w_data): """write(str) -> None. Write string str to file. @@ -526,7 +526,7 @@ self.check_writable() lines = space.fixedview(w_lines) for i, w_line in enumerate(lines): - if not space.isinstance_w(w_line, space.w_str): + if not space.isinstance_w(w_line, space.w_bytes): try: if self.binary: line = w_line.readbuf_w(space).as_str() diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -89,7 +89,7 @@ length = rwbuffer.getlength() w_data = space.call_method(self, "read", space.newint(length)) - if not space.isinstance_w(w_data, space.w_str): + if not space.isinstance_w(w_data, space.w_bytes): raise oefmt(space.w_TypeError, "read() should return bytes") data = space.bytes_w(w_data) rwbuffer.setslice(0, data) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -185,7 +185,7 @@ if trap_eintr(space, e): continue raise - if not space.isinstance_w(w_readahead, space.w_str): + if not space.isinstance_w(w_readahead, space.w_bytes): raise oefmt(space.w_IOError, "peek() should have returned a bytes object, " "not '%T'", w_readahead) @@ -215,7 +215,7 @@ if trap_eintr(space, e): continue raise - if not space.isinstance_w(w_read, space.w_str): + if not space.isinstance_w(w_read, space.w_bytes): raise oefmt(space.w_IOError, "peek() should have returned a bytes object, not " "'%T'", w_read) @@ -337,7 +337,7 @@ return w_data break - if not space.isinstance_w(w_data, space.w_str): + if not space.isinstance_w(w_data, space.w_bytes): raise oefmt(space.w_TypeError, "read() should return bytes") data = space.bytes_w(w_data) if not data: diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -284,7 +284,7 @@ raise return space.newtext('ascii') else: - if space.isinstance_w(w_encoding, space.w_str): + if space.isinstance_w(w_encoding, space.w_text): return w_encoding raise oefmt(space.w_IOError, "could not determine default encoding") @@ -565,7 +565,7 @@ w_input = space.call_method(self.w_buffer, "read1", space.newint(self.chunk_size)) - if not space.isinstance_w(w_input, space.w_str): + if not space.isinstance_w(w_input, space.w_bytes): msg = "decoder getstate() should have returned a bytes " \ "object not '%T'" raise oefmt(space.w_TypeError, msg, w_input) @@ -897,7 +897,7 @@ # Just like _read_chunk, feed the decoder and save a snapshot. w_chunk = space.call_method(self.w_buffer, "read", space.newint(cookie.bytes_to_feed)) - if not space.isinstance_w(w_chunk, space.w_str): + if not space.isinstance_w(w_chunk, space.w_bytes): msg = "underlying read() should have returned " \ "a bytes object, not '%T'" raise oefmt(space.w_TypeError, msg, w_chunk) diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py --- a/pypy/module/_pypyjson/interp_encoder.py +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -17,8 +17,8 @@ def raw_encode_basestring_ascii(space, w_string): - if space.isinstance_w(w_string, space.w_str): - s = space.str_w(w_string) + if space.isinstance_w(w_string, space.w_bytes): + s = space.bytes_w(w_string) for i in range(len(s)): c = s[i] if c >= ' ' and c <= '~' and c != '"' and c != '\\': diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -80,7 +80,7 @@ def maybe_handle_char_or_unichar_p(self, w_ffitype, w_obj): w_type = jit.promote(self.space.type(w_obj)) if w_ffitype.is_char_p() and w_type is self.space.w_bytes: - strval = self.space.str_w(w_obj) + strval = self.space.bytes_w(w_obj) self.handle_char_p(w_ffitype, w_obj, strval) return True elif w_ffitype.is_unichar_p() and (w_type is self.space.w_bytes or diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -99,7 +99,7 @@ def unpack_simple_shape(space, w_shape): # 'w_shape' must be either a letter or a tuple (struct, 1). - if space.isinstance_w(w_shape, space.w_str): + if space.isinstance_w(w_shape, space.w_text): letter = space.text_w(w_shape) return letter2tp(space, letter) else: diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -262,7 +262,7 @@ # host can be None, string or unicode if space.is_w(w_host, space.w_None): host = None - elif space.isinstance_w(w_host, space.w_str): + elif space.isinstance_w(w_host, space.w_bytes): host = space.bytes_w(w_host) elif space.isinstance_w(w_host, space.w_unicode): w_shost = space.call_method(w_host, "encode", space.newtext("idna")) @@ -276,7 +276,7 @@ port = None elif space.isinstance_w(w_port, space.w_int) or space.isinstance_w(w_port, space.w_long): port = str(space.int_w(w_port)) - elif space.isinstance_w(w_port, space.w_str): + elif space.isinstance_w(w_port, space.w_bytes): port = space.bytes_w(w_port) else: raise oefmt(space.w_TypeError, diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -251,7 +251,7 @@ else: literal = '\\' not in filter_as_string use_builder = ( - space.isinstance_w(w_string, space.w_str) and literal) + space.isinstance_w(w_string, space.w_bytes) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False diff --git a/pypy/module/_warnings/interp_warnings.py b/pypy/module/_warnings/interp_warnings.py --- a/pypy/module/_warnings/interp_warnings.py +++ b/pypy/module/_warnings/interp_warnings.py @@ -249,7 +249,7 @@ w_text = space.str(w_message) w_category = space.type(w_message) elif (not space.isinstance_w(w_message, space.w_unicode) or - not space.isinstance_w(w_message, space.w_str)): + not space.isinstance_w(w_message, space.w_bytes)): w_text = space.str(w_message) w_message = space.call_function(w_category, w_message) else: diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -217,7 +217,7 @@ if space.is_w(w_subkey, space.w_None): subkey = None else: - subkey = space.str_w(w_subkey) + subkey = space.text_w(w_subkey) with rffi.scoped_str2charp(value) as dataptr: ret = rwinreg.RegSetValue(hkey, subkey, rwinreg.REG_SZ, dataptr, len(value)) if ret != 0: @@ -238,7 +238,7 @@ if space.is_w(w_subkey, space.w_None): subkey = None else: - subkey = space.str_w(w_subkey) + subkey = space.text_w(w_subkey) with lltype.scoped_alloc(rwin32.PLONG.TO, 1) as bufsize_p: ret = rwinreg.RegQueryValue(hkey, subkey, None, bufsize_p) bufSize = intmask(bufsize_p[0]) @@ -286,7 +286,7 @@ if space.isinstance_w(w_value, space.w_unicode): w_value = space.call_method(w_value, 'encode', space.newtext('mbcs')) - buf = rffi.str2charp(space.str_w(w_value)) + buf = rffi.str2charp(space.text_w(w_value)) buflen = space.len_w(w_value) + 1 elif typ == rwinreg.REG_MULTI_SZ: @@ -306,7 +306,7 @@ if space.isinstance_w(w_item, space.w_unicode): w_item = space.call_method(w_item, 'encode', space.newtext('mbcs')) - item = space.str_w(w_item) + item = space.bytes_w(w_item) strings.append(item) buflen += len(item) + 1 except OperationError as e: @@ -438,7 +438,7 @@ if space.is_w(w_subkey, space.w_None): subkey = None else: - subkey = space.str_w(w_subkey) + subkey = space.text_w(w_subkey) null_dword = lltype.nullptr(rwin32.LPDWORD.TO) with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retDataSize: ret = rwinreg.RegQueryValueEx(hkey, subkey, null_dword, null_dword, diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -43,7 +43,7 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] w_initializer_type = space.type(w_initializer) - if w_initializer_type is space.w_str: + if w_initializer_type is space.w_bytes: a.descr_fromstring(space, w_initializer) elif w_initializer_type is space.w_list: a.descr_fromlist(space, w_initializer) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -577,7 +577,7 @@ # Common types with their own struct for cpyname, pypyexpr in { "PyType_Type": "space.w_type", - "PyString_Type": "space.w_str", + "PyString_Type": "space.w_bytes", "PyUnicode_Type": "space.w_unicode", "PyBaseString_Type": "space.w_basestring", "PyDict_Type": "space.w_dict", diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -52,13 +52,13 @@ @bootstrap_function def init_bytesobject(space): "Type description of PyBytesObject" - make_typedescr(space.w_str.layout.typedef, + make_typedescr(space.w_bytes.layout.typedef, basestruct=PyBytesObject.TO, attach=bytes_attach, dealloc=bytes_dealloc, realize=bytes_realize) -PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") +PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_bytes") def new_empty_str(space, length): """ @@ -66,8 +66,8 @@ interpreter object. The ob_sval may be mutated, until bytes_realize() is called. Refcount of the result is 1. """ - typedescr = get_typedescr(space.w_str.layout.typedef) - py_obj = typedescr.allocate(space, space.w_str, length) + typedescr = get_typedescr(space.w_bytes.layout.typedef) + py_obj = typedescr.allocate(space, space.w_bytes, length) py_str = rffi.cast(PyBytesObject, py_obj) py_str.c_ob_shash = -1 py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED @@ -132,7 +132,7 @@ return _PyString_AsString(space, ref) def _PyString_AsString(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_bytes: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way from pypy.module.cpyext.unicodeobject import ( @@ -182,7 +182,7 @@ @cpython_api([PyObject], Py_ssize_t, error=-1) def PyString_Size(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_bytes: ref = rffi.cast(PyBytesObject, ref) return ref.c_ob_size else: diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -108,7 +108,7 @@ ml.c_ml_meth = rffi.cast(PyCFunction_typedef, c_func.get_llhelper(space)) - method = api.PyDescr_NewMethod(space.w_str, ml) + method = api.PyDescr_NewMethod(space.w_bytes, ml) assert repr(method).startswith( " self.mmap.size: j = self.mmap.size - if not space.isinstance_w(w_item, space.w_str): + if not space.isinstance_w(w_item, space.w_bytes): raise oefmt(space.w_IndexError, "mmap slice assignment must be a string") value = space.realstr_w(w_item) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -149,14 +149,14 @@ "set_param() takes at most 1 non-keyword argument, %d " "given", len(args_w)) if len(args_w) == 1: - text = space.str_w(args_w[0]) + text = space.text_w(args_w[0]) try: jit.set_user_param(None, text) except ValueError: raise oefmt(space.w_ValueError, "error in JIT parameters string") for key, w_value in kwds_w.items(): if key == 'enable_opts': - jit.set_param(None, 'enable_opts', space.str_w(w_value)) + jit.set_param(None, 'enable_opts', space.text_w(w_value)) else: intval = space.int_w(w_value) for name, _ in unroll_parameters: diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -280,7 +280,7 @@ if self.type == "bridge": code_repr = 'bridge no %d' % self.bridge_no else: - code_repr = space.str_w(space.repr(self.w_green_key)) + code_repr = space.text_w(space.repr(self.w_green_key)) return space.newtext('>' % (self.jd_name, lgt, code_repr)) diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -98,7 +98,7 @@ def accept_str_arg(self): w_obj = self.accept_obj_arg() - return self.space.str_w(w_obj) + return self.space.bytes_w(w_obj) def accept_unicode_arg(self): w_obj = self.accept_obj_arg() diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -45,9 +45,9 @@ return space.lookup_in_type(space.w_tuple, '__iter__') @specialize.memo() -def str_getitem(space): +def bytes_getitem(space): "Utility that returns the app-level descriptor str.__getitem__." - return space.lookup_in_type(space.w_str, '__getitem__') + return space.lookup_in_type(space.w_bytes, '__getitem__') @specialize.memo() def unicode_getitem(space): @@ -845,10 +845,10 @@ "'%%T'", w_obj) w_result = space.get_and_call_function(w_impl, w_obj) - if space.isinstance_w(w_result, space.w_str): + if space.isinstance_w(w_result, space.w_text): return w_result try: - result = space.str_w(w_result) # YYY + result = space.text_w(w_result) except OperationError, e: if not e.match(space, space.w_TypeError): raise diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -40,7 +40,7 @@ def test_constants(self): space = self.space space.translates(lambda: (space.w_None, space.w_True, space.w_False, - space.w_int, space.w_str, space.w_object, + space.w_int, space.w_bytes, space.w_object, space.w_TypeError)) def test_wrap(self): diff --git a/pypy/objspace/std/classdict.py b/pypy/objspace/std/classdict.py --- a/pypy/objspace/std/classdict.py +++ b/pypy/objspace/std/classdict.py @@ -18,8 +18,8 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if (space.is_w(w_lookup_type, space.w_str) or # Most common path first - space.abstract_issubclass_w(w_lookup_type, space.w_str)): + if (space.is_w(w_lookup_type, space.w_text) or # Most common path first + space.abstract_issubclass_w(w_lookup_type, space.w_text)): return self.getitem_str(w_dict, space.text_w(w_key)) elif space.abstract_issubclass_w(w_lookup_type, space.w_unicode): try: @@ -38,7 +38,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_text): self.setitem_str(w_dict, self.space.text_w(w_key), w_value) else: raise oefmt(space.w_TypeError, @@ -70,7 +70,7 @@ def delitem(self, w_dict, w_key): space = self.space w_key_type = space.type(w_key) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_text): key = self.space.text_w(w_key) if not self.unerase(w_dict.dstorage).deldictvalue(space, key): raise KeyError diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -175,7 +175,7 @@ return (space.float_w(real), space.float_w(imag)) # # Check that it is not a string (on which space.float() would succeed). - if (space.isinstance_w(w_complex, space.w_str) or + if (space.isinstance_w(w_complex, space.w_bytes) or space.isinstance_w(w_complex, space.w_unicode)): raise oefmt(space.w_TypeError, "complex number expected, got '%T'", w_complex) @@ -299,14 +299,14 @@ and space.is_w(space.type(w_real), space.w_complex)): return w_real - if space.isinstance_w(w_real, space.w_str) or \ + if space.isinstance_w(w_real, space.w_bytes) or \ space.isinstance_w(w_real, space.w_unicode): # a string argument if not noarg2: raise oefmt(space.w_TypeError, "complex() can't take second" " arg if first is a string") try: - realstr, imagstr = _split_complex(space.str_w(w_real)) + realstr, imagstr = _split_complex(space.text_w(w_real)) except ValueError: raise oefmt(space.w_ValueError, "complex() arg is a malformed string") diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1060,7 +1060,7 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_bytes) def get_empty_storage(self): res = {} @@ -1199,7 +1199,7 @@ space = self.space # XXX there are many more types return (space.is_w(w_lookup_type, space.w_NoneType) or - space.is_w(w_lookup_type, space.w_str) or + space.is_w(w_lookup_type, space.w_bytes) or space.is_w(w_lookup_type, space.w_unicode) ) diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -334,19 +334,10 @@ def unknown_fmtchar(self): space = self.space c = self.fmt[self.fmtpos - 1] - if do_unicode: - w_defaultencoding = space.call_function( - space.sys.get('getdefaultencoding')) - w_s = space.call_method(space.newunicode(c), - "encode", - w_defaultencoding, - space.newtext('replace')) - s = space.str_w(w_s) - else: - s = c + w_s = space.newunicode(c) if do_unicode else space.newbytes(c) raise oefmt(space.w_ValueError, - "unsupported format character '%s' (%s) at index %d", - s, hex(ord(c)), self.fmtpos - 1) + "unsupported format character %R (%s) at index %d", + w_s, hex(ord(c)), self.fmtpos - 1) @specialize.argtype(1) def std_wp(self, r): @@ -437,7 +428,7 @@ if space.isinstance_w(w_result, space.w_unicode): raise NeedUnicodeFormattingError - return space.str_w(w_result) + return space.bytes_w(w_result) def fmt_s(self, w_value): space = self.space @@ -462,8 +453,8 @@ def fmt_c(self, w_value): self.prec = -1 # just because space = self.space - if space.isinstance_w(w_value, space.w_str): - s = space.str_w(w_value) + if space.isinstance_w(w_value, space.w_bytes): + s = space.bytes_w(w_value) if len(s) != 1: raise oefmt(space.w_TypeError, "%c requires int or char") self.std_wp(s) @@ -511,7 +502,7 @@ def format(space, w_fmt, values_w, w_valuedict, do_unicode): "Entry point" if not do_unicode: - fmt = space.str_w(w_fmt) + fmt = space.bytes_w(w_fmt) formatter = StringFormatter(space, fmt, values_w, w_valuedict) try: result = formatter.format() diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -694,7 +694,7 @@ # we cannot construct a subclass of int instance with an # an overflowing long value = space.int_w(w_obj, allow_conversion=False) - elif space.isinstance_w(w_value, space.w_str): + elif space.isinstance_w(w_value, space.w_bytes): value, w_longval = _string_to_int_or_long(space, w_value, space.text_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -39,7 +39,7 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_text) def _never_equal_to(self, w_lookup_type): return False diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -738,7 +738,7 @@ def getitem(self, w_dict, w_key): space = self.space w_lookup_type = space.type(w_key) - if space.is_w(w_lookup_type, space.w_str): + if space.is_w(w_lookup_type, space.w_text): return self.getitem_str(w_dict, space.text_w(w_key)) elif _never_equal_to_string(space, w_lookup_type): return None @@ -757,7 +757,7 @@ def setitem(self, w_dict, w_key, w_value): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_text): self.setitem_str(w_dict, self.space.text_w(w_key), w_value) else: self.switch_to_object_strategy(w_dict) @@ -765,7 +765,7 @@ def setdefault(self, w_dict, w_key, w_default): space = self.space - if space.is_w(space.type(w_key), space.w_str): + if space.is_w(space.type(w_key), space.w_text): key = space.text_w(w_key) w_result = self.getitem_str(w_dict, key) if w_result is not None: @@ -780,7 +780,7 @@ space = self.space w_key_type = space.type(w_key) w_obj = self.unerase(w_dict.dstorage) - if space.is_w(w_key_type, space.w_str): + if space.is_w(w_key_type, space.w_text): key = self.space.text_w(w_key) flag = w_obj.deldictvalue(space, key) if not flag: diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -205,7 +205,7 @@ m.put(pack_float(w_float.floatval)) else: m.start(TYPE_FLOAT) - m.put_pascal(space.str_w(space.repr(w_float))) + m.put_pascal(space.text_w(space.repr(w_float))) @unmarshaller(TYPE_FLOAT) def unmarshal_float(space, u, tc): @@ -227,8 +227,8 @@ w_real = space.newfloat(w_complex.realval) w_imag = space.newfloat(w_complex.imagval) m.start(TYPE_COMPLEX) - m.put_pascal(space.str_w(space.repr(w_real))) - m.put_pascal(space.str_w(space.repr(w_imag))) + m.put_pascal(space.text_w(space.repr(w_real))) + m.put_pascal(space.text_w(space.repr(w_imag))) @unmarshaller(TYPE_COMPLEX) def unmarshal_complex(space, u, tc): @@ -248,7 +248,7 @@ @marshaller(W_BytesObject) def marshal_bytes(space, w_str, m): - s = space.str_w(w_str) + s = space.bytes_w(w_str) if m.version >= 1 and space.is_interned_str(s): # we use a native rtyper stringdict for speed try: @@ -363,7 +363,7 @@ def unmarshal_str(u): w_obj = u.get_w_obj() try: - return u.space.str_w(w_obj) + return u.space.bytes_w(w_obj) except OperationError as e: if e.match(u.space, u.space.w_TypeError): u.raise_exc('invalid marshal data for code object') diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -212,7 +212,7 @@ def descr___format__(space, w_obj, w_format_spec): if space.isinstance_w(w_format_spec, space.w_unicode): w_as_str = space.call_function(space.w_unicode, w_obj) - elif space.isinstance_w(w_format_spec, space.w_str): + elif space.isinstance_w(w_format_spec, space.w_bytes): w_as_str = space.str(w_obj) else: raise oefmt(space.w_TypeError, "format_spec must be a string") diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -89,10 +89,12 @@ for typedef, cls in builtin_type_classes.items(): w_type = self.gettypeobject(typedef) self.builtin_types[typedef.name] = w_type - setattr(self, 'w_' + typedef.name, w_type) + if typedef.name != "str": + setattr(self, 'w_' + typedef.name, w_type) + else: + self.w_bytes = w_type self._interplevel_classes[w_type] = cls - self.w_bytes = self.w_str - self.w_text = self.w_str # this is w_unicode on Py3 + self.w_text = self.w_bytes # this is w_unicode on Py3 self.w_dict.flag_map_or_seq = 'M' self.builtin_types["NotImplemented"] = self.w_NotImplemented self.builtin_types["Ellipsis"] = self.w_Ellipsis @@ -519,9 +521,9 @@ return self.lookup(w_obj, '__iter__') is tuple_iter(self) def _str_uses_no_iter(self, w_obj): - from pypy.objspace.descroperation import str_getitem + from pypy.objspace.descroperation import bytes_getitem return (self.lookup(w_obj, '__iter__') is None and - self.lookup(w_obj, '__getitem__') is str_getitem(self)) + self.lookup(w_obj, '__getitem__') is bytes_getitem(self)) def _uni_uses_no_iter(self, w_obj): from pypy.objspace.descroperation import unicode_getitem @@ -557,7 +559,7 @@ # fast path: XXX this is duplicating most of the logic # from the default __getattribute__ and the getattr() method... - name = self.str_w(w_name) + name = self.text_w(w_name) w_descr = w_type.lookup(name) e = None if w_descr is not None: diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -18,7 +18,7 @@ assert start >= 0 assert stop >= 0 #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), - # space.w_str): + # space.w_bytes): # return orig_obj return self._new(s[start:stop]) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1093,7 +1093,7 @@ if isinstance(w_obj, FakeString): return str return type(w_obj) - w_str = str + w_bytes = str w_text = str def str_w(self, string): diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -222,7 +222,7 @@ class AppTestStringFormat(BaseStringFormatTests): def setup_class(cls): - cls.w_s = cls.space.w_str + cls.w_s = cls.space.w_bytes def test_string_conversion(self): class x(object): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -283,4 +283,4 @@ # if it crashes, it means that space._type_isinstance didn't go through # the fast path, and tries to call type() (which is set to None just # above) - space.isinstance_w(w_a, space.w_str) # does not crash + space.isinstance_w(w_a, space.w_bytes) # does not crash diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py --- a/pypy/objspace/std/test/test_stdobjspace.py +++ b/pypy/objspace/std/test/test_stdobjspace.py @@ -43,7 +43,7 @@ from pypy.objspace.std.iterobject import W_SeqIterObject space = self.space - assert space._get_interplevel_cls(space.w_str) is W_BytesObject + assert space._get_interplevel_cls(space.w_bytes) is W_BytesObject assert space._get_interplevel_cls(space.w_int) is W_IntObject class X(W_BytesObject): def __init__(self): @@ -51,7 +51,7 @@ typedef = None - assert space.isinstance_w(X(), space.w_str) + assert space.isinstance_w(X(), space.w_bytes) w_sequenceiterator = space.gettypefor(W_SeqIterObject) cls = space._get_interplevel_cls(w_sequenceiterator) @@ -61,7 +61,7 @@ from pypy.objspace.std.bytesobject import W_AbstractBytesObject space = gettestobjspace(withstrbuf=True) - cls = space._get_interplevel_cls(space.w_str) + cls = space._get_interplevel_cls(space.w_bytes) assert cls is W_AbstractBytesObject def test_wrap_various_unsigned_types(self): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -635,7 +635,7 @@ def descr_repr(self, space): w_mod = self.get_module() - if w_mod is None or not space.isinstance_w(w_mod, space.w_str): + if w_mod is None or not space.isinstance_w(w_mod, space.w_text): mod = None else: mod = space.text_w(w_mod) @@ -691,7 +691,7 @@ def _check_new_args(space, w_name, w_bases, w_dict): if w_bases is None or w_dict is None: raise oefmt(space.w_TypeError, "type() takes 1 or 3 arguments") - if not space.isinstance_w(w_name, space.w_str): + if not space.isinstance_w(w_name, space.w_text): raise oefmt(space.w_TypeError, "type() argument 1 must be string, not %T", w_name) if not space.isinstance_w(w_bases, space.w_tuple): @@ -775,7 +775,7 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise oefmt(space.w_TypeError, "can't set %N.__name__", w_type) - if not space.isinstance_w(w_value, space.w_str): + if not space.isinstance_w(w_value, space.w_text): raise oefmt(space.w_TypeError, "can only assign string to %N.__name__, not '%T'", w_type, w_value) @@ -1049,7 +1049,7 @@ wantdict = False wantweakref = False w_slots = dict_w['__slots__'] - if (space.isinstance_w(w_slots, space.w_str) or + if (space.isinstance_w(w_slots, space.w_bytes) or space.isinstance_w(w_slots, space.w_unicode)): slot_names_w = [w_slots] else: diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -129,7 +129,7 @@ def _op_val(space, w_other, strict=None): if isinstance(w_other, W_UnicodeObject): return w_other._value - if space.isinstance_w(w_other, space.w_str): + if space.isinstance_w(w_other, space.w_bytes): return unicode_from_string(space, w_other)._value if strict: raise oefmt(space.w_TypeError, @@ -374,7 +374,7 @@ return space.is_w(space.type(w_obj), space.w_unicode) def _join_check_item(self, space, w_obj): - if (space.isinstance_w(w_obj, space.w_str) or + if (space.isinstance_w(w_obj, space.w_bytes) or space.isinstance_w(w_obj, space.w_unicode)): return 0 return 1 @@ -490,7 +490,7 @@ w_errors = space.newtext(errors) w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.newint(0)) - if not space.isinstance_w(w_retval, space.w_str): + if not space.isinstance_w(w_retval, space.w_bytes): raise oefmt(space.w_TypeError, "encoder did not return an string object (type '%T')", w_retval) @@ -545,7 +545,7 @@ def unicode_from_object(space, w_obj): if space.is_w(space.type(w_obj), space.w_unicode): return w_obj - elif space.is_w(space.type(w_obj), space.w_str): + elif space.is_w(space.type(w_obj), space.w_bytes): w_res = w_obj else: w_unicode_method = space.lookup(w_obj, "__unicode__") @@ -564,17 +564,17 @@ return unicode_from_encoded_object(space, w_res, None, "strict") -def unicode_from_string(space, w_str): +def unicode_from_string(space, w_bytes): # this is a performance and bootstrapping hack encoding = getdefaultencoding(space) if encoding != 'ascii': - return unicode_from_encoded_object(space, w_str, encoding, "strict") - s = space.bytes_w(w_str) + return unicode_from_encoded_object(space, w_bytes, encoding, "strict") + s = space.bytes_w(w_bytes) try: return W_UnicodeObject(s.decode("ascii")) except UnicodeDecodeError: # raising UnicodeDecodeError is messy, "please crash for me" - return unicode_from_encoded_object(space, w_str, "ascii", "strict") + return unicode_from_encoded_object(space, w_bytes, "ascii", "strict") class UnicodeDocstrings: diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -54,5 +54,5 @@ if isinstance(e, InvalidBaseError): raise OperationError(space.w_ValueError, space.newtext(e.msg)) else: - raise oefmt(space.w_ValueError, '%s: %s', - e.msg, space.str_w(space.repr(w_source))) + raise oefmt(space.w_ValueError, '%s: %R', + e.msg, w_source) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -230,7 +230,7 @@ def pypyraises(space, w_ExpectedException, w_expr, __args__): """A built-in function providing the equivalent of py.test.raises().""" From pypy.commits at gmail.com Fri Dec 2 04:40:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 01:40:53 -0800 (PST) Subject: [pypy-commit] pypy default: remove one test case which is not vectorized anymore Message-ID: <584141a5.8ab81c0a.d8962.af7d@mx.google.com> Author: Richard Plangger Branch: Changeset: r88810:b65d7af46522 Date: 2016-12-02 10:37 +0100 http://bitbucket.org/pypy/pypy/changeset/b65d7af46522/ Log: remove one test case which is not vectorized anymore diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -75,7 +75,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), - ('sum','float', 2581, 2581, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), From pypy.commits at gmail.com Fri Dec 2 04:52:04 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 01:52:04 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: next cpython bug Message-ID: <58414444.820bc30a.52e99.9fb5@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5756:234907e53211 Date: 2016-12-02 10:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/234907e53211/ Log: next cpython bug diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -204,6 +204,18 @@ the first line. Otherwise I guess the timings happen to make that test pass. +* CPython 3.5.2: this ``nonlocal`` seems not to have a reasonable + effect (note that if we use a different name instead of ``__class__``, + this example correctly complain that there is no binding in the outer + scope of ``Y``):: + + class Y: + class X: + nonlocal __class__ + __class__ = 42 + print(locals()['__class__']) # 42 + print(__class__) # but this is a NameError + Other issues of "dubious IMHO" status ------------------------------------- From pypy.commits at gmail.com Fri Dec 2 04:52:54 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 01:52:54 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Minimal changes to pass these tests Message-ID: <58414476.61adc20a.f6b81.a485@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88811:d1edc5aca1da Date: 2016-12-02 10:52 +0100 http://bitbucket.org/pypy/pypy/changeset/d1edc5aca1da/ Log: Minimal changes to pass these tests diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -213,7 +213,7 @@ try: role_here = self.roles[name] except KeyError: - if name in bound: + if bound and name in bound: self.symbols[name] = SCOPE_FREE self.free_vars[name] = None else: @@ -330,7 +330,7 @@ return misc.mangle(name, self.name) def _pass_special_names(self, local, new_bound): - assert '__class__' in local + #assert '__class__' in local new_bound['__class__'] = None def _finalize_cells(self, free): @@ -485,7 +485,9 @@ def visit_Global(self, glob): for name in glob.names: old_role = self.scope.lookup_role(name) - if old_role & (SYM_USED | SYM_ASSIGNED): + if (old_role & (SYM_USED | SYM_ASSIGNED) and not + (name == '__class__' and + self.scope._hide_bound_from_nested_scopes)): if old_role & SYM_ASSIGNED: msg = "name '%s' is assigned to before global declaration" \ % (name,) @@ -499,7 +501,9 @@ def visit_Nonlocal(self, nonl): for name in nonl.names: old_role = self.scope.lookup_role(name) - if old_role & (SYM_USED | SYM_ASSIGNED): + if (old_role & (SYM_USED | SYM_ASSIGNED) and not + (name == '__class__' and + self.scope._hide_bound_from_nested_scopes)): if old_role & SYM_ASSIGNED: msg = "name '%s' is assigned to before nonlocal declaration" \ % (name,) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -399,8 +399,11 @@ class X: nonlocal __class__ __class__ = 42 + assert locals()['__class__'] == 42 + # ^^^ but at the same place, reading '__class__' gives a NameError + # in CPython 3.5.2. Looks like a bug to me def testing(): - return 42 # 'Y.__class__' is *not* set to 42, at least on CPython 3.5.2 + return 42 ''' ]: space.call_args(w_filterwarnings, filter_arg) From pypy.commits at gmail.com Fri Dec 2 05:16:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 02:16:09 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: extended test to support the correct indexing of a reveresed memoryview (test passes partly) Message-ID: <584149e9.0e0a1c0a.9fec3.c083@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88812:429efb9740ad Date: 2016-12-02 11:15 +0100 http://bitbucket.org/pypy/pypy/changeset/429efb9740ad/ Log: extended test to support the correct indexing of a reveresed memoryview (test passes partly) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -212,12 +212,11 @@ while dim < length: w_obj = w_tuple.getitem(space, dim) index = space.getindex_w(w_obj, space.w_IndexError) - start = self.lookup_dimension(space, start, dim, index) + start = self.lookup_dimension(space, self.buf, start, dim, index) dim += 1 return start - def lookup_dimension(self, space, start, dim, index): - view = self.buf + def lookup_dimension(self, space, view, start, dim, index): shape = view.getshape() strides = view.getstrides() nitems = shape[dim] @@ -264,9 +263,16 @@ # ^^^ for a non-slice index, this returns (index, 0, 0, 1) if step == 0: # index only itemsize = self.getitemsize() + dim = self.getndim() if itemsize == 1: - ch = self.buf.getitem(start) - return space.newint(ord(ch)) + if dim == 0: + raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") + elif dim == 1: + idx = self.lookup_dimension(space, self, 0, 0, start) + ch = self.buf.getitem(idx) + return space.newint(ord(ch)) + else: + raise oefmt(space.w_NotImplementedError, "multi-dimensional sub-views are not implemented") else: # TODO: this probably isn't very fast buf = SubBuffer(self.buf, start*itemsize, itemsize) @@ -292,7 +298,7 @@ shape = self.getshape()[:] itemsize = self.getitemsize() dim = 0 - self.buf = SubBuffer(self.buf, strides[dim] * start, size*step*itemsize) + self.buf = SubBuffer(self.buf, strides[dim] * start, itemsize) shape[dim] = size strides[dim] = strides[dim] * step self.strides = strides diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -411,8 +411,16 @@ raises(TypeError, "view.cast('h', shape=(3,3))") def test_reversed(self): - bytes = b"\x01\x00\x02\x00\x03\x00" + bytes = b"\x01\x01\x02\x02\x03\x03" view = memoryview(bytes) revlist = list(reversed(view.tolist())) + assert view[::-1][0] == 3 + assert view[::-1][1] == 3 + assert view[::-1][2] == 2 + assert view[::-1][3] == 2 + assert view[::-1][4] == 1 + assert view[::-1][5] == 1 + assert view[::-1][-1] == 1 + assert view[::-1][-2] == 1 assert list(reversed(view)) == revlist assert list(reversed(view)) == view[::-1].tolist() From pypy.commits at gmail.com Fri Dec 2 05:19:03 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 02:19:03 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Partly revert and complain on seeing 'global __class__' inside a class Message-ID: <58414a97.88711c0a.65c47.bcad@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88813:e4f05423a1ff Date: 2016-12-02 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/e4f05423a1ff/ Log: Partly revert and complain on seeing 'global __class__' inside a class body. This is not what CPython does, but I don't understand how CPython works and I think it's too much of a special case (and, mostly, not tested at all) diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -485,9 +485,13 @@ def visit_Global(self, glob): for name in glob.names: old_role = self.scope.lookup_role(name) - if (old_role & (SYM_USED | SYM_ASSIGNED) and not - (name == '__class__' and - self.scope._hide_bound_from_nested_scopes)): + if (self.scope._hide_bound_from_nested_scopes and + name == '__class__'): + msg = ("'global __class__' inside a class statement is not " + "implemented in PyPy") + raise SyntaxError(msg, glob.lineno, glob.col_offset, + filename=self.compile_info.filename) + if old_role & (SYM_USED | SYM_ASSIGNED): if old_role & SYM_ASSIGNED: msg = "name '%s' is assigned to before global declaration" \ % (name,) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1127,10 +1127,12 @@ source = """if 1: class X: global __class__ - def f(self): - super() """ py.test.raises(SyntaxError, self.simple_test, source, None, None) + # XXX this raises "'global __class__' inside a class statement + # is not implemented in PyPy". The reason it is not is that it + # seems we need to refactor some things to implement it exactly + # like CPython, and I seriously don't think there is a point def test_error_message_1(self): source = """if 1: diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -381,12 +381,6 @@ filter_arg = Arguments(space, [ space.wrap('error') ], ["module"], [space.wrap("")]) for code in [''' -class C: - global __class__ - __class__ = 42 -def testing(): - return __class__ -''', ''' def testing(): __class__ = 0 def f(): From pypy.commits at gmail.com Fri Dec 2 05:30:52 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 02:30:52 -0800 (PST) Subject: [pypy-commit] pypy py3.5: document another very obscure case where CPython differs from PyPy Message-ID: <58414d5c.ca06c20a.ecbba.b5f8@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88814:14f7085831c5 Date: 2016-12-02 11:30 +0100 http://bitbucket.org/pypy/pypy/changeset/14f7085831c5/ Log: document another very obscure case where CPython differs from PyPy diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1133,6 +1133,14 @@ # is not implemented in PyPy". The reason it is not is that it # seems we need to refactor some things to implement it exactly # like CPython, and I seriously don't think there is a point + # + # Another case which so far works on CPython but not on PyPy: + #class X: + # __class__ = 42 + # def f(self): + # return __class__ + #assert X.__dict__['__class__'] == 42 + #assert X().f() is X def test_error_message_1(self): source = """if 1: From pypy.commits at gmail.com Fri Dec 2 05:58:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 02:58:01 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: passes the test for reverse iterating a memory view (flat memory views for bytes only tested) Message-ID: <584153b9.c8111c0a.702ea.cd46@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88815:2bb952fe0d93 Date: 2016-12-02 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/2bb952fe0d93/ Log: passes the test for reverse iterating a memory view (flat memory views for bytes only tested) diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -109,6 +109,7 @@ self.buf = buf self.length = buf.getlength() self.pos = 0 + self.strides = None self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -126,11 +127,18 @@ self.pos = (self.pos + mask) & ~mask def finished(self): - if self.pos != self.length: - raise StructError("unpack str size too long for format") + if self.strides: + # FIXME richard + pass + else: + if self.pos != self.length: + raise StructError("unpack str size too long for format") def read(self, count): - end = self.pos + count + if self.strides: + end = self.pos + count * self.strides[0] + else: + end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") s = self.buf.getslice(self.pos, end, 1, count) @@ -151,5 +159,15 @@ string, pos = self.buf.as_str_and_offset_maybe() return string, pos+self.pos - def skip(self, size): - self.read(size) # XXX, could avoid taking the slice + def skip(self, count): + # assumption: UnpackFormatIterator only iterates over + # flat structures (continous memory) either, forward (index + # is increasing) or reverse + if self.strides: + assert len(self.strides) == 1 + end = self.pos + count * self.strides[0] + else: + end = self.pos + count + if end > self.length: + raise StructError("unpack str size too short for format") + self.pos = end diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -167,13 +167,18 @@ raise NotImplementedError elif dim == 1: itemsize = self.getitemsize() - return self._tolist(space, buf, buf.getlength() // itemsize, fmt) + return self._tolist(space, buf, self.getlength() // itemsize, fmt) else: return self._tolist_rec(space, buf, 0, 0, fmt) def _tolist(self, space, buf, count, fmt): # TODO: this probably isn't very fast fmtiter = UnpackFormatIterator(space, buf) + # patch the length, necessary buffer might have offset + # which leads to wrong length calculation if e.g. the + # memoryview is reversed + fmtiter.length = self.getlength() + fmtiter.strides = self.getstrides() fmtiter.interpret(fmt * count) return space.newlist(fmtiter.result_w) From pypy.commits at gmail.com Fri Dec 2 06:34:06 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 03:34:06 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <58415c2e.c19d1c0a.f1734.da80@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88816:e020df6cd749 Date: 2016-12-02 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e020df6cd749/ Log: fix test diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -175,7 +175,7 @@ assert st.st_atime == 41 assert st.st_mtime == 42.1 assert st.st_ctime == 43 - assert repr(st).startswith(self.posix.__name__ + '.stat_result') + assert repr(st).startswith('os.stat_result') def test_stat_lstat(self): import stat From pypy.commits at gmail.com Fri Dec 2 07:01:25 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 02 Dec 2016 04:01:25 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: fixes Message-ID: <58416295.42061c0a.4ee49.ea9f@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88817:4c2b7177aeea Date: 2016-12-02 13:00 +0100 http://bitbucket.org/pypy/pypy/changeset/4c2b7177aeea/ Log: fixes diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -51,7 +51,7 @@ self.fields = fields def spacebind(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) + return space.newtuple([space.newtext(field) for field in self.fields]) class W_AST(W_Root): @@ -1240,7 +1240,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ImportFrom) - w_module = space.newtext(self.module) # identifier + w_module = space.newtext_or_none(self.module) # identifier space.setattr(w_node, space.newtext('module'), w_module) if self.names is None: names_w = [] @@ -3248,9 +3248,9 @@ args_w = [node.to_object(space) for node in self.args] # expr w_args = space.newlist(args_w) space.setattr(w_node, space.newtext('args'), w_args) - w_vararg = space.newtext(self.vararg) # identifier + w_vararg = space.newtext_or_none(self.vararg) # identifier space.setattr(w_node, space.newtext('vararg'), w_vararg) - w_kwarg = space.newtext(self.kwarg) # identifier + w_kwarg = space.newtext_or_none(self.kwarg) # identifier space.setattr(w_node, space.newtext('kwarg'), w_kwarg) if self.defaults is None: defaults_w = [] @@ -3323,7 +3323,7 @@ w_node = space.call_function(get(space).w_alias) w_name = space.newtext(self.name) # identifier space.setattr(w_node, space.newtext('name'), w_name) - w_asname = space.newtext(self.asname) # identifier + w_asname = space.newtext_or_none(self.asname) # identifier space.setattr(w_node, space.newtext('asname'), w_asname) return w_node diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -132,6 +132,8 @@ elif field.type.value == "int": return "space.newint(%s)" % (value,) elif field.type.value == "identifier": + if field.opt: + return "space.newtext_or_none(%s)" % (value,) return "space.newtext(%s)" % (value,) else: wrapper = "%s.to_object(space)" % (value,) diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -37,7 +37,7 @@ assert not hasattr_(space.w_int, 'nonexistingattr') buf = rffi.str2charp('__len__') - assert api.PyObject_HasAttrString(space.w_str, buf) + assert api.PyObject_HasAttrString(space.w_bytes, buf) assert not api.PyObject_HasAttrString(space.w_int, buf) rffi.free_charp(buf) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -297,11 +297,11 @@ def index(self, w_obj): return self.wrap(self.int_w(w_obj)) - def str_w(self, w_obj): + def bytes_w(self, w_obj): if isinstance(w_obj, StringObject): return w_obj.v raise NotImplementedError - text_w = str_w + text_w = bytes_w def unicode_w(self, w_obj): # XXX diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -4,7 +4,8 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.sliceobject import W_SliceObject from rpython.rlib.buffer import StringBuffer -from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize +from rpython.rlib.objectmodel import (instantiate, we_are_translated, specialize, + not_rpython) from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import r_uint, r_singlefloat from rpython.rtyper.extregistry import ExtRegistryEntry @@ -141,8 +142,8 @@ is_root(w_obj) return NonConstant(False) + @not_rpython def unwrap(self, w_obj): - "NOT_RPYTHON" raise NotImplementedError def newdict(self, module=False, instance=False, kwargs=False, @@ -194,8 +195,8 @@ def newbuffer(self, x): return w_some_obj() + @not_rpython def marshal_w(self, w_obj): - "NOT_RPYTHON" raise NotImplementedError def newbytes(self, x): @@ -207,7 +208,7 @@ newtext = newbytes newtext_or_none = newbytes - @specialize.argtype(1) + @not_rpython def wrap(self, x): if not we_are_translated(): if isinstance(x, gateway.interp2app): @@ -219,15 +220,11 @@ if isinstance(x, list): if x == []: # special case: it is used e.g. in sys/__init__.py return w_some_obj() - self._wrap_not_rpython(x) + raise NotImplementedError return w_some_obj() - def _wrap_not_rpython(self, x): - "NOT_RPYTHON" - raise NotImplementedError - + @not_rpython def _see_interp2app(self, interp2app): - "NOT_RPYTHON" activation = interp2app._code.activation def check(): scope_w = [w_some_obj()] * NonConstant(42) @@ -236,8 +233,8 @@ check = func_with_new_name(check, 'check__' + interp2app.name) self._seen_extras.append(check) + @not_rpython def _see_getsetproperty(self, getsetproperty): - "NOT_RPYTHON" space = self def checkprop(): getsetproperty.fget(getsetproperty, space, w_some_obj()) @@ -388,7 +385,10 @@ for name in (ObjSpace.ConstantTable + ObjSpace.ExceptionTable + BUILTIN_TYPES): - setattr(space, 'w_' + name, w_some_obj()) + if name != "str": + setattr(space, 'w_' + name, w_some_obj()) + space.w_bytes = w_some_obj() + space.w_text = w_some_obj() space.w_type = w_some_type() # for (name, _, arity, _) in ObjSpace.MethodTable: diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -30,7 +30,7 @@ def foobar(space, x, w_y, z): is_root(w_y) see() - return space.wrap(x - z) + return space.newint(x - z) space = FakeObjSpace() space.wrap(interp2app(foobar, unwrap_spec=[ObjSpace, int, W_Root, int])) space.translates() @@ -89,7 +89,7 @@ space = FakeObjSpace() def f(i): - w_x = space.wrap(i) + w_x = space.newint(i) w_type = space.type(w_x) return len(w_type.mro_w) diff --git a/pypy/objspace/fake/test/test_objspace.py b/pypy/objspace/fake/test/test_objspace.py --- a/pypy/objspace/fake/test/test_objspace.py +++ b/pypy/objspace/fake/test/test_objspace.py @@ -45,8 +45,8 @@ def test_wrap(self): space = self.space - space.translates(lambda: (space.wrap(42), space.wrap(42.5), - space.wrap("foo"))) + space.translates(lambda: (space.newint(42), space.newfloat(42.5), + space.newtext("foo"))) def test_call_args(self): space = self.space From pypy.commits at gmail.com Fri Dec 2 07:47:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 04:47:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: test that checks multidim. memoryview tolist + reversed Message-ID: <58416d53.c4811c0a.cf05c.fb02@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88819:a62844213890 Date: 2016-12-02 13:46 +0100 http://bitbucket.org/pypy/pypy/changeset/a62844213890/ Log: test that checks multidim. memoryview tolist + reversed diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -135,9 +135,8 @@ def read(self, count): if self.strides: - end = self.pos + count * self.strides[0] - else: - end = self.pos + count + count = self.strides[0] + end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") s = self.buf.getslice(self.pos, end, 1, count) diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -167,18 +167,20 @@ raise NotImplementedError elif dim == 1: itemsize = self.getitemsize() - return self._tolist(space, buf, self.getlength() // itemsize, fmt) + return self._tolist(space, buf, self.getlength(), itemsize, fmt, + self.getstrides()) else: return self._tolist_rec(space, buf, 0, 0, fmt) - def _tolist(self, space, buf, count, fmt): + def _tolist(self, space, buf, bytecount, itemsize, fmt, strides=None): # TODO: this probably isn't very fast + count = bytecount // itemsize fmtiter = UnpackFormatIterator(space, buf) # patch the length, necessary buffer might have offset # which leads to wrong length calculation if e.g. the # memoryview is reversed - fmtiter.length = self.getlength() - fmtiter.strides = self.getstrides() + fmtiter.length = bytecount + fmtiter.strides = strides fmtiter.interpret(fmt * count) return space.newlist(fmtiter.result_w) @@ -193,12 +195,13 @@ # if dim >= self.getndim(): bytecount = (stride * dimshape) - count = bytecount // itemsize - return self._tolist(space, buf, count, fmt) + return self._tolist(space, buf, bytecount, itemsize, fmt, [stride]) items = [None] * dimshape for i in range(dimshape): - item = self._tolist_rec(space, SubBuffer(buf, start, stride), start, idim+1, fmt) + import pdb; pdb.set_trace() + buf = SubBuffer(buf, start, stride) + item = self._tolist_rec(space, buf, start, idim+1, fmt) items[i] = item start += stride diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -1,4 +1,5 @@ import py +import pytest import struct from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app @@ -424,3 +425,18 @@ assert view[::-1][-2] == 1 assert list(reversed(view)) == revlist assert list(reversed(view)) == view[::-1].tolist() + +class AppTestMemoryViewReversed(object): + spaceconfig = dict(usemodules=['array']) + def test_reversed_non_bytes(self): + import array + items = [1,2,3,9,7,5] + formats = ['h'] + for fmt in formats: + bytes = array.array(fmt, items) + view = memoryview(bytes) + bview = view.cast('b') + rview = bview.cast(fmt, shape=(2,3)) + assert rview.tolist() == [[1,2,3],[9,7,5]] + assert rview[::-1].tolist() == [[3,2,1], [5,7,9]] + raises(NotImplementedError, list, reversed(rview)) From pypy.commits at gmail.com Fri Dec 2 07:47:13 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 04:47:13 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: check if end has reached when UnpackFormatIterator walks in backward direction Message-ID: <58416d51.96a61c0a.ed422.fa20@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88818:fb36dd2e7878 Date: 2016-12-02 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/fb36dd2e7878/ Log: check if end has reached when UnpackFormatIterator walks in backward direction diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -127,12 +127,11 @@ self.pos = (self.pos + mask) & ~mask def finished(self): - if self.strides: - # FIXME richard - pass - else: - if self.pos != self.length: - raise StructError("unpack str size too long for format") + value = self.pos + if self.strides and self.strides[0] < 0: + value = -self.pos + if value != self.length: + raise StructError("unpack str size too long for format") def read(self, count): if self.strides: From pypy.commits at gmail.com Fri Dec 2 09:24:32 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 06:24:32 -0800 (PST) Subject: [pypy-commit] pypy py3.5: atexit._ncallbacks() Message-ID: <58418420.0bba1c0a.3928d.20d4@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88820:ec203b75befa Date: 2016-12-02 15:23 +0100 http://bitbucket.org/pypy/pypy/changeset/ec203b75befa/ Log: atexit._ncallbacks() diff --git a/pypy/module/atexit/__init__.py b/pypy/module/atexit/__init__.py --- a/pypy/module/atexit/__init__.py +++ b/pypy/module/atexit/__init__.py @@ -15,5 +15,6 @@ 'unregister': 'app_atexit.unregister', '_clear': 'app_atexit.clear', '_run_exitfuncs': 'app_atexit.run_exitfuncs', + '_ncallbacks': 'app_atexit.ncallbacks', } diff --git a/pypy/module/atexit/app_atexit.py b/pypy/module/atexit/app_atexit.py --- a/pypy/module/atexit/app_atexit.py +++ b/pypy/module/atexit/app_atexit.py @@ -50,3 +50,6 @@ for i, (f, _, _) in enumerate(atexit_callbacks): if f == func: atexit_callbacks[i] = (None, None, None) + +def ncallbacks(): + return len(atexit_callbacks) diff --git a/pypy/module/atexit/test/test_atexit.py b/pypy/module/atexit/test/test_atexit.py --- a/pypy/module/atexit/test/test_atexit.py +++ b/pypy/module/atexit/test/test_atexit.py @@ -13,7 +13,9 @@ print("h2") atexit.register(h1) atexit.register(h2) + assert atexit._ncallbacks() == 2 atexit._run_exitfuncs() + assert atexit._ncallbacks() == 0 assert capture.getvalue() == 'h2\nh1\n' finally: sys.stdout = stdout From pypy.commits at gmail.com Fri Dec 2 09:55:30 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 06:55:30 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Remove @cpython_only for tests that pass on PyPy too nowadays Message-ID: <58418b62.6737c20a.352bb.19c8@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88821:1d3d5b34031a Date: 2016-12-02 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/1d3d5b34031a/ Log: Remove @cpython_only for tests that pass on PyPy too nowadays diff --git a/lib-python/3/test/test_scope.py b/lib-python/3/test/test_scope.py --- a/lib-python/3/test/test_scope.py +++ b/lib-python/3/test/test_scope.py @@ -1,7 +1,7 @@ import unittest import weakref -from test.support import check_syntax_error, cpython_only +from test.support import check_syntax_error from test.support import gc_collect @@ -500,7 +500,6 @@ self.assertNotIn("x", varnames) self.assertIn("y", varnames) - @cpython_only def testLocalsClass_WithTrace(self): # Issue23728: after the trace function returns, the locals() # dictionary is used to update all variables, this used to @@ -530,7 +529,6 @@ inst = f(3)() self.assertEqual(inst.a, inst.m()) - @cpython_only def testInteractionWithTraceFunc(self): import sys @@ -730,7 +728,6 @@ self.assertFalse(hasattr(X, "x")) self.assertEqual(x, 42) - @cpython_only def testCellLeak(self): # Issue 17927. # @@ -756,6 +753,7 @@ tester.dig() ref = weakref.ref(tester) del tester + gc_collect() self.assertIsNone(ref()) From pypy.commits at gmail.com Fri Dec 2 10:21:32 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 07:21:32 -0800 (PST) Subject: [pypy-commit] pypy py3.5: implement LOAD_CLASSDEREF Message-ID: <5841917c.6249c20a.7805b.258e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88822:806213151c0a Date: 2016-12-02 16:20 +0100 http://bitbucket.org/pypy/pypy/changeset/806213151c0a/ Log: implement LOAD_CLASSDEREF diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -682,12 +682,11 @@ ops.JUMP_IF_FALSE_OR_POP: 0, ops.POP_JUMP_IF_TRUE: -1, ops.POP_JUMP_IF_FALSE: -1, - # TODO ops.JUMP_IF_NOT_DEBUG: 0, # TODO ops.BUILD_LIST_FROM_ARG: 1, - # TODO + ops.LOAD_CLASSDEREF: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -243,6 +243,8 @@ op = name_ops_fast(ctx) elif scope == symtable.SCOPE_FREE: op = name_ops_deref(ctx) + if op == ops.LOAD_DEREF and isinstance(self, ClassCodeGenerator): + op = ops.LOAD_CLASSDEREF container = self.free_vars elif scope == symtable.SCOPE_CELL: op = name_ops_deref(ctx) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1151,6 +1151,17 @@ assert e.value.msg == ( "'await' expressions in comprehensions are not supported") + def test_load_classderef(self): + source = """if 1: + def f(): + x = 42 + class X: + locals()["x"] = 43 + y = x + return X.y + """ + yield self.st, source, "f()", 43 + class AppTestCompiler: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -348,6 +348,8 @@ self.LOAD_CONST(oparg, next_instr) elif opcode == opcodedesc.LOAD_DEREF.index: self.LOAD_DEREF(oparg, next_instr) + elif opcode == opcodedesc.LOAD_CLASSDEREF.index: + self.LOAD_CLASSDEREF(oparg, next_instr) elif opcode == opcodedesc.LOAD_FAST.index: self.LOAD_FAST(oparg, next_instr) elif opcode == opcodedesc.LOAD_GLOBAL.index: @@ -521,6 +523,18 @@ else: self.pushvalue(w_value) + def LOAD_CLASSDEREF(self, varindex, next_instr): + # like LOAD_DEREF but used in class bodies + space = self.space + i = varindex - len(self.pycode.co_cellvars) + assert i >= 0 + name = self.pycode.co_freevars[i] + w_value = space.finditem(self.debugdata.w_locals, space.wrap(name)) + if w_value is None: + self.LOAD_DEREF(varindex, next_instr) + else: + self.pushvalue(w_value) + def STORE_DEREF(self, varindex, next_instr): # nested scopes: access a variable through its cell object w_newvalue = self.popvalue() From pypy.commits at gmail.com Fri Dec 2 10:30:13 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 07:30:13 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix doctest Message-ID: <58419385.05bd1c0a.ef06e.3bde@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88823:c4e8d087daf0 Date: 2016-12-02 16:29 +0100 http://bitbucket.org/pypy/pypy/changeset/c4e8d087daf0/ Log: fix doctest diff --git a/lib-python/3/test/test_unpack_ex.py b/lib-python/3/test/test_unpack_ex.py --- a/lib-python/3/test/test_unpack_ex.py +++ b/lib-python/3/test/test_unpack_ex.py @@ -233,25 +233,25 @@ Overridden parameters - >>> f(x=5, **{'x': 3}, y=2) + >>> f(x=5, **{'x': 3}, y=2) # doctest:+ELLIPSIS Traceback (most recent call last): ... - TypeError: f() got multiple values for keyword argument 'x' + TypeError: ...got multiple values for keyword argument 'x' - >>> f(**{'x': 3}, x=5, y=2) + >>> f(**{'x': 3}, x=5, y=2) # doctest:+ELLIPSIS Traceback (most recent call last): ... - TypeError: f() got multiple values for keyword argument 'x' + TypeError: ...got multiple values for keyword argument 'x' - >>> f(**{'x': 3}, **{'x': 5}, y=2) + >>> f(**{'x': 3}, **{'x': 5}, y=2) # doctest:+ELLIPSIS Traceback (most recent call last): ... - TypeError: f() got multiple values for keyword argument 'x' + TypeError: ...got multiple values for keyword argument 'x' - >>> f(**{1: 3}, **{1: 5}) + >>> f(**{1: 3}, **{1: 5}) # doctest:+ELLIPSIS Traceback (most recent call last): ... - TypeError: f() keywords must be strings + TypeError: ...keywords must be strings... Unpacking non-sequence From pypy.commits at gmail.com Fri Dec 2 10:48:13 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 07:48:13 -0800 (PST) Subject: [pypy-commit] pypy default: Preserve the order of a literal set: previously, it would be built in Message-ID: <584197bd.54b31c0a.70faf.3fdf@mx.google.com> Author: Armin Rigo Branch: Changeset: r88824:a5a64897d7a4 Date: 2016-12-02 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/a5a64897d7a4/ Log: Preserve the order of a literal set: previously, it would be built in reverse order diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1295,9 +1295,10 @@ @jit.unroll_safe def BUILD_SET(self, itemcount, next_instr): w_set = self.space.newset() - for i in range(itemcount): - w_item = self.popvalue() + for i in range(itemcount-1, -1, -1): + w_item = self.peekvalue(i) self.space.call_method(w_set, 'add', w_item) + self.popvalues(itemcount) self.pushvalue(w_set) def STORE_MAP(self, oparg, next_instr): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -729,6 +729,10 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_host_is_pypy = cls.space.wrap( + '__pypy__' in sys.builtin_module_names) + def test_bom_with_future(self): s = '\xef\xbb\xbffrom __future__ import division\nx = 1/2' ns = {} @@ -771,6 +775,18 @@ assert math.copysign(1., c[0]) == -1.0 assert math.copysign(1., c[1]) == -1.0 + def test_dict_and_set_literal_order(self): + x = 1 + l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) + l2 = list({1, 3, 2, 4}) + l3 = list({x:'a', 3:'b', 2:'c', 4:'d'}) + l4 = list({x, 3, 2, 4}) + if not self.host_is_pypy: + # the full test relies on the host Python providing ordered dicts + assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4} + else: + assert l1 == l2 == l3 == l4 == [1, 3, 2, 4] + ##class TestPythonAstCompiler(BaseTestCompiler): ## def setup_method(self, method): From pypy.commits at gmail.com Fri Dec 2 11:14:55 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:14:55 -0800 (PST) Subject: [pypy-commit] pypy default: Document the fix of set literal orders Message-ID: <58419dff.0a74c20a.f20e7.3c2e@mx.google.com> Author: Armin Rigo Branch: Changeset: r88825:668cdca5a890 Date: 2016-12-02 17:14 +0100 http://bitbucket.org/pypy/pypy/changeset/668cdca5a890/ Log: Document the fix of set literal orders diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,15 @@ .. this is a revision shortly after release-pypy2.7-v5.6 .. startrev: 7e9787939641 + +Since a while now, PyPy preserves the order of dictionaries and sets. +However, the set literal syntax ``{x, y, z}`` would by mistake build a +set with the opposite order: ``set([z, y, x])``. This has been fixed. +Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be +``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior +changed in exactly the same way because of this fix. + + .. branch: rpython-error-to-systemerror Any uncaught RPython exception (from a PyPy bug) is turned into an From pypy.commits at gmail.com Fri Dec 2 11:26:16 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:26:16 -0800 (PST) Subject: [pypy-commit] pypy default: Test and fix: type names Message-ID: <5841a0a8.6a5cc20a.f6424.45b4@mx.google.com> Author: Armin Rigo Branch: Changeset: r88826:1e7540052811 Date: 2016-12-02 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/1e7540052811/ Log: Test and fix: type names diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -4,9 +4,11 @@ def test_Ellipsis(self): assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' + assert Ellipsis.__class__.__name__ == 'ellipsis' def test_NotImplemented(self): def f(): return NotImplemented assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' + assert NotImplemented.__class__.__name__ == 'NotImplementedType' diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -770,12 +770,12 @@ ) assert not Cell.typedef.acceptable_as_base_class # no __new__ -Ellipsis.typedef = TypeDef("Ellipsis", +Ellipsis.typedef = TypeDef("ellipsis", __repr__ = interp2app(Ellipsis.descr__repr__), ) assert not Ellipsis.typedef.acceptable_as_base_class # no __new__ -NotImplemented.typedef = TypeDef("NotImplemented", +NotImplemented.typedef = TypeDef("NotImplementedType", __repr__ = interp2app(NotImplemented.descr__repr__), ) assert not NotImplemented.typedef.acceptable_as_base_class # no __new__ From pypy.commits at gmail.com Fri Dec 2 11:35:05 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:35:05 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <5841a2b9.c19d1c0a.f1734.53c5@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88827:d61704606472 Date: 2016-12-02 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/d61704606472/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,15 @@ .. this is a revision shortly after release-pypy2.7-v5.6 .. startrev: 7e9787939641 + +Since a while now, PyPy preserves the order of dictionaries and sets. +However, the set literal syntax ``{x, y, z}`` would by mistake build a +set with the opposite order: ``set([z, y, x])``. This has been fixed. +Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be +``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior +changed in exactly the same way because of this fix. + + .. branch: mappingproxy .. branch: py3k-finish_time .. branch: py3k-kwonly-builtin diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1376,9 +1376,10 @@ @jit.unroll_safe def BUILD_SET(self, itemcount, next_instr): w_set = self.space.newset() - for i in range(itemcount): - w_item = self.popvalue() + for i in range(itemcount-1, -1, -1): + w_item = self.peekvalue(i) self.space.call_method(w_set, 'add', w_item) + self.popvalues(itemcount) self.pushvalue(w_set) @jit.unroll_safe diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -806,6 +806,8 @@ def setup_class(cls): cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + cls.w_host_is_pypy = cls.space.wrap( + '__pypy__' in sys.builtin_module_names) def w_is_pypy(self): import sys @@ -952,6 +954,18 @@ else: assert False, "Expected SyntaxError" + def test_dict_and_set_literal_order(self): + x = 1 + l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) + l2 = list({1, 3, 2, 4}) + l3 = list({x:'a', 3:'b', 2:'c', 4:'d'}) + l4 = list({x, 3, 2, 4}) + if not self.host_is_pypy: + # the full test relies on the host Python providing ordered dicts + assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4} + else: + assert l1 == l2 == l3 == l4 == [1, 3, 2, 4] + def test_ast_equality(self): import _ast sample_code = [ diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -4,9 +4,11 @@ def test_Ellipsis(self): assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' + assert Ellipsis.__class__.__name__ == 'ellipsis' def test_NotImplemented(self): def f(): return NotImplemented assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' + assert NotImplemented.__class__.__name__ == 'NotImplementedType' diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -860,13 +860,13 @@ ) assert not Cell.typedef.acceptable_as_base_class # no __new__ -Ellipsis.typedef = TypeDef("Ellipsis", +Ellipsis.typedef = TypeDef("ellipsis", __new__ = interp2app(Ellipsis.descr_new_ellipsis), __repr__ = interp2app(Ellipsis.descr__repr__), ) Ellipsis.typedef.acceptable_as_base_class = False -NotImplemented.typedef = TypeDef("NotImplemented", +NotImplemented.typedef = TypeDef("NotImplementedType", __new__ = interp2app(NotImplemented.descr_new_notimplemented), __repr__ = interp2app(NotImplemented.descr__repr__), ) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -374,17 +374,7 @@ def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_vectorized(1, 1) - - def define_sum(): - return """ - a = |30| - sum(a) - """ - def test_sum(self): - result = self.run("sum") - assert result == sum(range(30)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_int(): return """ @@ -408,7 +398,7 @@ def test_sum_multi(self): result = self.run("sum_multi") assert result == sum(range(30)) + sum(range(60)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_float_to_int16(): return """ @@ -490,7 +480,7 @@ assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(2, 1) + self.check_vectorized(2, 0) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -501,7 +491,7 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(3, 1) + self.check_vectorized(3, 0) def define_prod(): return """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -76,7 +76,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), - ('sum','float', 2581, 2581, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -978,9 +978,7 @@ self.right is other.right class AccumPack(Pack): - SUPPORTED = { rop.FLOAT_ADD: '+', - rop.INT_ADD: '+', - } + SUPPORTED = { rop.INT_ADD: '+', } def __init__(self, nodes, operator, position): Pack.__init__(self, nodes) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py @@ -197,7 +197,7 @@ f13 = float_add(f12, f11) """) savings = self.savings(loop1) - assert savings == 2 + assert savings == -2 @py.test.mark.parametrize("bytes,s", [(4,0),(8,0)]) def test_sum_float_to_int(self, bytes, s): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1162,32 +1162,32 @@ vopt = self.vectorize(loop,1) self.assert_equal(loop, self.parse_loop(opt)) - def test_accumulate_basic(self): - trace = """ - [p0, i0, f0] - f1 = raw_load_f(p0, i0, descr=floatarraydescr) - f2 = float_add(f0, f1) - i1 = int_add(i0, 8) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, f2] - jump(p0, i1, f2) - """ - trace_opt = """ - [p0, i0, f0] - v6[0xf64] = vec_f() - v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) - v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) - label(p0, i0, v2[2xf64]) - i1 = int_add(i0, 16) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, v2[2xf64]] - v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) - v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) - jump(p0, i1, v3[2xf64]) - """ - loop = self.parse_loop(trace) - opt = self.vectorize(loop) - self.assert_equal(loop, self.parse_loop(trace_opt)) + #def test_accumulate_basic(self): + # trace = """ + # [p0, i0, f0] + # f1 = raw_load_f(p0, i0, descr=floatarraydescr) + # f2 = float_add(f0, f1) + # i1 = int_add(i0, 8) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, f2] + # jump(p0, i1, f2) + # """ + # trace_opt = """ + # [p0, i0, f0] + # v6[0xf64] = vec_f() + # v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) + # v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) + # label(p0, i0, v2[2xf64]) + # i1 = int_add(i0, 16) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, v2[2xf64]] + # v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) + # v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) + # jump(p0, i1, v3[2xf64]) + # """ + # loop = self.parse_loop(trace) + # opt = self.vectorize(loop) + # self.assert_equal(loop, self.parse_loop(trace_opt)) def test_element_f45_in_guard_failargs(self): trace = self.parse_loop(""" diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -842,7 +842,8 @@ oplist.append(vecop) opnum = rop.VEC_INT_XOR if datatype == FLOAT: - opnum = rop.VEC_FLOAT_XOR + # see PRECISION loss below + raise NotImplementedError vecop = VecOperation(opnum, [vecop, vecop], vecop, count) oplist.append(vecop) From pypy.commits at gmail.com Fri Dec 2 11:35:07 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:35:07 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Ellipsis.__reduce__ Message-ID: <5841a2bb.08301c0a.5efa1.57f2@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88828:2744856221a3 Date: 2016-12-02 17:34 +0100 http://bitbucket.org/pypy/pypy/changeset/2744856221a3/ Log: Ellipsis.__reduce__ NotImplemented.__reduce__ diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -10,6 +10,8 @@ def descr__repr__(self, space): return space.wrap('Ellipsis') + descr__reduce__ = descr__repr__ + class NotImplemented(W_Root): @@ -19,3 +21,5 @@ def descr__repr__(self, space): return space.wrap('NotImplemented') + + descr__reduce__ = descr__repr__ diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -5,6 +5,7 @@ assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' assert Ellipsis.__class__.__name__ == 'ellipsis' + assert Ellipsis.__reduce__() == 'Ellipsis' def test_NotImplemented(self): def f(): @@ -12,3 +13,4 @@ assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' assert NotImplemented.__class__.__name__ == 'NotImplementedType' + assert NotImplemented.__reduce__() == 'NotImplemented' diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -863,12 +863,14 @@ Ellipsis.typedef = TypeDef("ellipsis", __new__ = interp2app(Ellipsis.descr_new_ellipsis), __repr__ = interp2app(Ellipsis.descr__repr__), + __reduce__ = interp2app(Ellipsis.descr__reduce__), ) Ellipsis.typedef.acceptable_as_base_class = False NotImplemented.typedef = TypeDef("NotImplementedType", __new__ = interp2app(NotImplemented.descr_new_notimplemented), __repr__ = interp2app(NotImplemented.descr__repr__), + __reduce__ = interp2app(NotImplemented.descr__reduce__), ) NotImplemented.typedef.acceptable_as_base_class = False From pypy.commits at gmail.com Fri Dec 2 11:39:23 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:39:23 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <5841a3bb.c19d1c0a.f1734.55d8@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88829:7f6ebfa18d90 Date: 2016-12-02 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/7f6ebfa18d90/ Log: fix test diff --git a/lib-python/3/test/test_pickletools.py b/lib-python/3/test/test_pickletools.py --- a/lib-python/3/test/test_pickletools.py +++ b/lib-python/3/test/test_pickletools.py @@ -23,12 +23,14 @@ pickled = pickle.dumps(data, proto) unpickled = pickle.loads(pickled) self.assertEqual(unpickled, data) - self.assertIs(unpickled[-1], unpickled[-2]) + # compare two strings, xxx don't use assertIs() here: + self.assertEqual(unpickled[-1], unpickled[-2]) pickled2 = pickletools.optimize(pickled) unpickled2 = pickle.loads(pickled2) self.assertEqual(unpickled2, data) - self.assertIs(unpickled2[-1], unpickled2[-2]) + # compare two strings, xxx don't use assertIs() here: + self.assertEqual(unpickled2[-1], unpickled2[-2]) self.assertNotIn(pickle.LONG_BINGET, pickled2) self.assertNotIn(pickle.LONG_BINPUT, pickled2) From pypy.commits at gmail.com Fri Dec 2 11:52:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 02 Dec 2016 08:52:17 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: fixed slicelength and parameter to decode_index4, renamed some methods, [::-1] for multi dim. memoryview works Message-ID: <5841a6c1.8ab81c0a.d8962.5ea0@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88830:bd39209dafe3 Date: 2016-12-02 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/bd39209dafe3/ Log: fixed slicelength and parameter to decode_index4, renamed some methods, [::-1] for multi dim. memoryview works diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -159,13 +159,12 @@ def skip(self, count): # assumption: UnpackFormatIterator only iterates over - # flat structures (continous memory) either, forward (index - # is increasing) or reverse + # flat structures (continous memory) either: forward (index + # grows) or reverse if self.strides: assert len(self.strides) == 1 - end = self.pos + count * self.strides[0] - else: - end = self.pos + count + count = self.strides[0] + end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") self.pos = end diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -199,7 +199,6 @@ items = [None] * dimshape for i in range(dimshape): - import pdb; pdb.set_trace() buf = SubBuffer(buf, start, stride) item = self._tolist_rec(space, buf, start, idim+1, fmt) items[i] = item @@ -267,7 +266,8 @@ if space.isinstance_w(w_index, space.w_tuple): return self._getitem_tuple_indexed(space, w_index) - start, stop, step, size = space.decode_index4(w_index, self.getlength()) + shape = self.getshape() + start, stop, step, slicelength = space.decode_index4(w_index, shape[0]) # ^^^ for a non-slice index, this returns (index, 0, 0, 1) if step == 0: # index only itemsize = self.getitemsize() @@ -289,28 +289,27 @@ return fmtiter.result_w[0] elif step == 1: mv = W_MemoryView.copy(self) - mv.slice(start, step, size) + mv.init_slice(start, stop, step, slicelength, 0) mv._init_flags() return mv else: mv = W_MemoryView.copy(self) - mv.slice(start, step, size) - mv.length = mv.bytecount_from_shape() + mv.init_slice(start, stop, step, slicelength, 0) + mv.init_len() mv._init_flags() return mv - def slice(self, start, step, size): + def init_slice(self, start, stop, step, slicelength, dim): # modifies the buffer, shape and stride to allow step to be > 1 # TODO subbuffer - strides = self.getstrides()[:] - shape = self.getshape()[:] - itemsize = self.getitemsize() - dim = 0 - self.buf = SubBuffer(self.buf, strides[dim] * start, itemsize) - shape[dim] = size + self.strides = strides = self.getstrides()[:] + self.shape = shape = self.getshape()[:] + self.buf = SubBuffer(self.buf, strides[dim] * start, slicelength) + shape[dim] = slicelength strides[dim] = strides[dim] * step - self.strides = strides - self.shape = shape + + def init_len(self): + self.length = self.bytecount_from_shape() def bytecount_from_shape(self): dim = self.getndim() @@ -629,8 +628,6 @@ return None def _cast_to_ND(self, space, shape, ndim): - buf = self.buf - self.ndim = ndim length = self.itemsize if ndim == 0: diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -437,6 +437,6 @@ view = memoryview(bytes) bview = view.cast('b') rview = bview.cast(fmt, shape=(2,3)) + raises(NotImplementedError, list, reversed(rview)) assert rview.tolist() == [[1,2,3],[9,7,5]] - assert rview[::-1].tolist() == [[3,2,1], [5,7,9]] - raises(NotImplementedError, list, reversed(rview)) + assert rview[::-1].tolist() == [[9,7,5], [1,2,3]] From pypy.commits at gmail.com Fri Dec 2 11:55:11 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 08:55:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5: os.chdir(fd) Message-ID: <5841a76f.2aa9c20a.34a7d.4b14@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88831:093346994810 Date: 2016-12-02 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/093346994810/ Log: os.chdir(fd) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -700,12 +700,16 @@ """Return the current working directory as a string.""" return space.fsdecode(getcwdb(space)) -def chdir(space, w_path): + at unwrap_spec(path=path_or_fd(allow_fd=rposix.HAVE_FCHDIR)) +def chdir(space, path): """Change the current working directory to the specified path.""" try: - dispatch_filename(rposix.chdir)(space, w_path) + if rposix.HAVE_FCHDIR and path.as_fd != -1: + os.fchdir(path.as_fd) + else: + call_rposix(rposix.chdir, path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, path.w_path) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) def mkdir(space, w_path, mode=0o777, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -818,19 +818,20 @@ def test_fchdir(self): os = self.posix localdir = os.getcwd() - try: - os.mkdir(self.path2 + 'dir') - fd = os.open(self.path2 + 'dir', os.O_RDONLY) + os.mkdir(self.path2 + 'fchdir') + for func in [os.fchdir, os.chdir]: try: - os.fchdir(fd) - mypath = os.getcwd() + fd = os.open(self.path2 + 'fchdir', os.O_RDONLY) + try: + func(fd) + mypath = os.getcwd() + finally: + os.close(fd) + assert mypath.endswith('test_posix2-fchdir') + raises(OSError, func, fd) finally: - os.close(fd) - assert mypath.endswith('test_posix2-dir') - raises(OSError, os.fchdir, fd) - raises(ValueError, os.fchdir, -1) - finally: - os.chdir(localdir) + os.chdir(localdir) + raises(ValueError, os.fchdir, -1) def test_largefile(self): os = self.posix From pypy.commits at gmail.com Fri Dec 2 12:28:57 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 09:28:57 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Change argument names to match CPython, which (with argument clinic) now Message-ID: <5841af59.8a29c20a.db42a.6310@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88832:56dae2016926 Date: 2016-12-02 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/56dae2016926/ Log: Change argument names to match CPython, which (with argument clinic) now tends to have actual names there too diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -237,13 +237,13 @@ raise wrap_oserror2(space, e, w_path) return space.wrap(fd) - at unwrap_spec(fd=c_int, pos=r_longlong, how=c_int) -def lseek(space, fd, pos, how): + at unwrap_spec(fd=c_int, position=r_longlong, how=c_int) +def lseek(space, fd, position, how): """Set the current position of a file descriptor. Return the new position. -If how == 0, 'pos' is relative to the start of the file; if how == 1, to the -current position; if how == 2, to the end.""" +If how == 0, 'position' is relative to the start of the file; if how == 1, to +the current position; if how == 2, to the end.""" try: - pos = os.lseek(fd, pos, how) + pos = os.lseek(fd, position, how) except OSError as e: raise wrap_oserror(space, e) else: @@ -260,11 +260,11 @@ else: return space.wrap(res) - at unwrap_spec(fd=c_int, buffersize=int) -def read(space, fd, buffersize): + at unwrap_spec(fd=c_int, length=int) +def read(space, fd, length): """Read data from a file descriptor.""" try: - s = os.read(fd, buffersize) + s = os.read(fd, length) except OSError as e: raise wrap_oserror(space, e) else: @@ -540,11 +540,11 @@ else: return space.wrap(newfd) - at unwrap_spec(old_fd=c_int, new_fd=c_int, inheritable=int) -def dup2(space, old_fd, new_fd, inheritable=1): + at unwrap_spec(fd=c_int, fd2=c_int, inheritable=bool) +def dup2(space, fd, fd2, inheritable=1): """Duplicate a file descriptor.""" try: - rposix.dup2(old_fd, new_fd, inheritable) + rposix.dup2(fd, fd2, inheritable) except OSError as e: raise wrap_oserror(space, e) @@ -613,11 +613,11 @@ space.wrap(times[3]), space.wrap(times[4])]) - at unwrap_spec(cmd='fsencode') -def system(space, cmd): + at unwrap_spec(command='fsencode') +def system(space, command): """Execute the command (a string) in a subshell.""" try: - rc = os.system(cmd) + rc = os.system(command) except OSError as e: raise wrap_oserror(space, e) else: @@ -700,16 +700,16 @@ """Return the current working directory as a string.""" return space.fsdecode(getcwdb(space)) - at unwrap_spec(path=path_or_fd(allow_fd=rposix.HAVE_FCHDIR)) -def chdir(space, path): +def chdir(space, w_path): """Change the current working directory to the specified path.""" try: - if rposix.HAVE_FCHDIR and path.as_fd != -1: - os.fchdir(path.as_fd) + if rposix.HAVE_FCHDIR: + dispatch_filename(rposix.chdir, + allow_fd_fn=os.fchdir)(space, w_path) else: - call_rposix(rposix.chdir, path) + dispatch_filename(rposix.chdir)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, w_path) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) def mkdir(space, w_path, mode=0o777, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): @@ -751,11 +751,11 @@ except OSError as e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(errno=c_int) -def strerror(space, errno): + at unwrap_spec(code=c_int) +def strerror(space, code): """Translate an error code to a message string.""" try: - return space.wrap(_strerror(errno)) + return space.wrap(_strerror(code)) except ValueError: raise oefmt(space.w_ValueError, "strerror() argument out of range") @@ -1059,12 +1059,12 @@ raise wrap_oserror2(space, e, w_path) @unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD(rposix.HAVE_MKNODAT)) -def mknod(space, w_filename, mode=0600, device=0, +def mknod(space, w_path, mode=0600, device=0, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): - """mknod(filename, mode=0o600, device=0, *, dir_fd=None) + """mknod(path, mode=0o600, device=0, *, dir_fd=None) Create a filesystem node (file, device special file or named pipe) -named filename. mode specifies both the permissions to use and the +named 'path'. mode specifies both the permissions to use and the type of node to be created, being combined (bitwise OR) with one of S_IFREG, S_IFCHR, S_IFBLK, and S_IFIFO. For S_IFCHR and S_IFBLK, device defines the newly created device special file (probably using @@ -1076,12 +1076,12 @@ If it is unavailable, using it will raise a NotImplementedError.""" try: if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: - fname = space.fsencode_w(w_filename) + fname = space.fsencode_w(w_path) rposix.mknodat(fname, mode, device, dir_fd) else: - dispatch_filename(rposix.mknod)(space, w_filename, mode, device) + dispatch_filename(rposix.mknod)(space, w_path, mode, device) except OSError as e: - raise wrap_oserror2(space, e, w_filename) + raise wrap_oserror2(space, e, w_path) @unwrap_spec(mask=c_int) def umask(space, mask): @@ -1097,19 +1097,19 @@ raise wrap_oserror(space, e) return space.wrap(pid) - at unwrap_spec(pid=c_int, sig=c_int) -def kill(space, pid, sig): + at unwrap_spec(pid=c_int, signal=c_int) +def kill(space, pid, signal): "Kill a process with a signal." try: - rposix.kill(pid, sig) + rposix.kill(pid, signal) except OSError as e: raise wrap_oserror(space, e) - at unwrap_spec(pgid=c_int, sig=c_int) -def killpg(space, pgid, sig): + at unwrap_spec(pgid=c_int, signal=c_int) +def killpg(space, pgid, signal): "Kill a process group with a signal." try: - os.killpg(pgid, sig) + os.killpg(pgid, signal) except OSError as e: raise wrap_oserror(space, e) @@ -1287,7 +1287,7 @@ def _exit(space, status): os._exit(status) -def execv(space, w_path, w_args): +def execv(space, w_path, w_argv): """ execv(path, args) Execute an executable path with arguments, replacing current process. @@ -1297,7 +1297,7 @@ """ command = space.fsencode_w(w_path) try: - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_argv) if len(args_w) < 1: raise oefmt(space.w_ValueError, "execv() arg 2 must not be empty") @@ -1322,13 +1322,13 @@ return env -def execve(space, w_path, w_argv, w_environment): - """execve(path, args, env) +def execve(space, w_path, w_argv, w_env): + """execve(path, argv, env) Execute a path with arguments and environment, replacing current process. path: path of executable file - args: tuple or list of arguments + argv: tuple or list of arguments env: dictionary of strings mapping to strings On some platforms, you may specify an open file descriptor for path; @@ -1340,7 +1340,7 @@ raise oefmt(space.w_TypeError, "execve: argv must be a tuple or a list") args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] - env = _env2interp(space, w_environment) + env = _env2interp(space, w_env) try: path = space.fsencode_w(w_path) except OperationError: @@ -1363,8 +1363,8 @@ raise wrap_oserror(space, e) @unwrap_spec(mode=int, path='fsencode') -def spawnv(space, mode, path, w_args): - args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_args)] +def spawnv(space, mode, path, w_argv): + args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] try: ret = os.spawnv(mode, path, args) except OSError as e: @@ -1372,8 +1372,8 @@ return space.wrap(ret) @unwrap_spec(mode=int, path='fsencode') -def spawnve(space, mode, path, w_args, w_env): - args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_args)] +def spawnve(space, mode, path, w_argv, w_env): + args = [space.fsencode_w(w_arg) for w_arg in space.unpackiterable(w_argv)] env = _env2interp(space, w_env) try: ret = os.spawnve(mode, path, args, env) @@ -1561,47 +1561,47 @@ """ return wrap_uid(space, os.getuid()) - at unwrap_spec(arg=c_uid_t) -def setuid(space, arg): + at unwrap_spec(uid=c_uid_t) +def setuid(space, uid): """ setuid(uid) Set the current process's user id. """ try: - os.setuid(arg) + os.setuid(uid) except OSError as e: raise wrap_oserror(space, e) - at unwrap_spec(arg=c_uid_t) -def seteuid(space, arg): - """ seteuid(uid) + at unwrap_spec(euid=c_uid_t) +def seteuid(space, euid): + """ seteuid(euid) Set the current process's effective user id. """ try: - os.seteuid(arg) + os.seteuid(euid) except OSError as e: raise wrap_oserror(space, e) - at unwrap_spec(arg=c_gid_t) -def setgid(space, arg): + at unwrap_spec(gid=c_gid_t) +def setgid(space, gid): """ setgid(gid) Set the current process's group id. """ try: - os.setgid(arg) + os.setgid(gid) except OSError as e: raise wrap_oserror(space, e) - at unwrap_spec(arg=c_gid_t) -def setegid(space, arg): - """ setegid(gid) + at unwrap_spec(egid=c_gid_t) +def setegid(space, egid): + """ setegid(egid) Set the current process's effective group id. """ try: - os.setegid(arg) + os.setegid(egid) except OSError as e: raise wrap_oserror(space, e) @@ -1649,13 +1649,13 @@ raise wrap_oserror(space, e) return space.newlist([wrap_gid(space, e) for e in list]) -def setgroups(space, w_list): - """ setgroups(list) +def setgroups(space, w_groups): + """ setgroups(groups) Set the groups of the current process to list. """ list = [] - for w_gid in space.unpackiterable(w_list): + for w_gid in space.unpackiterable(w_groups): list.append(space.c_uid_t_w(w_gid)) try: os.setgroups(list[:]) @@ -2025,24 +2025,25 @@ result = os.minor(intmask(device)) return space.wrap(result) - at unwrap_spec(inc=c_int) -def nice(space, inc): - "Decrease the priority of process by inc and return the new priority." + at unwrap_spec(increment=c_int) +def nice(space, increment): + """Decrease the priority of process by 'increment' + and return the new priority.""" try: - res = os.nice(inc) + res = os.nice(increment) except OSError as e: raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(n=int) -def urandom(space, n): - """urandom(n) -> str + at unwrap_spec(size=int) +def urandom(space, size): + """urandom(size) -> str - Return a string of n random bytes suitable for cryptographic use. + Return a string of 'size' random bytes suitable for cryptographic use. """ context = get(space).random_context try: - return space.newbytes(rurandom.urandom(context, n)) + return space.newbytes(rurandom.urandom(context, size)) except OSError as e: raise wrap_oserror(space, e) From pypy.commits at gmail.com Fri Dec 2 12:38:53 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 09:38:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5: test and fix Message-ID: <5841b1ad.313ac20a.8a7be.698e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88834:064673746af4 Date: 2016-12-02 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/064673746af4/ Log: test and fix diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1037,7 +1037,7 @@ else: dispatch_filename_2(rposix.replace)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT)) def mkfifo(space, w_path, mode=0666, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1231,6 +1231,8 @@ assert str(e.value).endswith(": 'nonexistentfile1' -> 'bok'") e = raises(OSError, self.posix.rename, 'nonexistentfile1', 'bok') assert str(e.value).endswith(": 'nonexistentfile1' -> 'bok'") + e = raises(OSError, self.posix.replace, 'nonexistentfile1', 'bok') + assert str(e.value).endswith(": 'nonexistentfile1' -> 'bok'") e = raises(OSError, self.posix.symlink, 'bok', '/nonexistentdir/boz') assert str(e.value).endswith(": 'bok' -> '/nonexistentdir/boz'") From pypy.commits at gmail.com Fri Dec 2 12:38:51 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 09:38:51 -0800 (PST) Subject: [pypy-commit] pypy py3.5: stick rposix.O_CLOEXEC inside posix Message-ID: <5841b1ab.aaa3c20a.711ce.67a4@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88833:0b3bb5547935 Date: 2016-12-02 18:35 +0100 http://bitbucket.org/pypy/pypy/changeset/0b3bb5547935/ Log: stick rposix.O_CLOEXEC inside posix diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -206,6 +206,10 @@ interpleveldefs['get_blocking'] = 'interp_posix.get_blocking' interpleveldefs['set_blocking'] = 'interp_posix.set_blocking' + for _name in ["O_CLOEXEC"]: + if getattr(rposix, _name) is not None: + interpleveldefs[_name] = 'space.wrap(%d)' % getattr(rposix, _name) + def startup(self, space): from pypy.module.posix import interp_posix from pypy.module.imp import importing From pypy.commits at gmail.com Sat Dec 3 00:57:53 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 02 Dec 2016 21:57:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Add a failing test for the issue that prevents pip from working on pypy3 nightly Message-ID: <58425ee1.542e1c0a.730e4.35a3@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88835:a92e564e1432 Date: 2016-12-03 05:57 +0000 http://bitbucket.org/pypy/pypy/changeset/a92e564e1432/ Log: Add a failing test for the issue that prevents pip from working on pypy3 nightly Since it doesn't seem to fit in any of the existing test categories, I'm putting it in a new top-level directory, 'extra_tests/'. diff --git a/extra_tests/README.txt b/extra_tests/README.txt new file mode 100644 --- /dev/null +++ b/extra_tests/README.txt @@ -0,0 +1,5 @@ +The tests in this directory are a complement to lib-python/3/test/. + +They are meant to run on top of a compiled pypy3 or CPython3.5 in an +environment containing at least pytest and hypothesis, using a command like +'pytest extra_tests/'. diff --git a/extra_tests/pytest.ini b/extra_tests/pytest.ini new file mode 100644 diff --git a/extra_tests/test_bufferedreader.py b/extra_tests/test_bufferedreader.py new file mode 100644 --- /dev/null +++ b/extra_tests/test_bufferedreader.py @@ -0,0 +1,99 @@ +import io +from cffi import FFI + +import pytest +from hypothesis import strategies as st +from hypothesis import given, assume, settings +from hypothesis.stateful import ( + RuleBasedStateMachine, Bundle, rule, run_state_machine_as_test, precondition) +ffi = FFI() + +MAX_READ_SIZE = 1024 +MIN_READ_SIZE = 1 +MAX_SIZE = 0xffff + + at st.composite +def data_and_sizes(draw, reads=st.lists(st.integers(MIN_READ_SIZE, MAX_READ_SIZE))): + reads = draw(reads) + total_size = sum(reads) + assume(0 < total_size < MAX_SIZE) + data = draw(st.binary(min_size=total_size, max_size=total_size)) + return data, reads + +class Stream(io.RawIOBase): + def __init__(self, data, read_sizes): + assert sum(read_sizes) == len(data) + self.data = data + self.n = 0 + self.read_sizes = iter(read_sizes) + self.partial_read = 0 + + def readinto(self, buf): + if self.n == len(self.data): + return 0 + if self.partial_read: + read_size = self.partial_read + else: + read_size = next(self.read_sizes) + if len(buf) < read_size: + self.partial_read = read_size - len(buf) + read_size = len(buf) + else: + self.partial_read = 0 + self.update_buffer(buf, self.data[self.n:self.n + read_size]) + self.n += read_size + return read_size + + def update_buffer(self, buf, data): + n = len(data) + buf[:n] = data + + def readable(self): + return True + +class StreamCFFI(Stream): + def update_buffer(self, buf, data): + n = len(data) + ffi.buffer(ffi.from_buffer(buf), n)[:] = data + + + at pytest.mark.parametrize('StreamCls', [Stream, StreamCFFI]) + at given(params=data_and_sizes(), chunk_size=st.integers(MIN_READ_SIZE, 8192)) +def test_buf(params, chunk_size, StreamCls): + data, sizes = params + stream = StreamCls(data, sizes) + assert io.BufferedReader(stream, chunk_size).read(len(data)) == data + +class StateMachine(RuleBasedStateMachine): + def __init__(self, stream, reference): + super().__init__() + self.stream = stream + self.reference = reference + + @rule(size=st.integers(MIN_READ_SIZE, MAX_READ_SIZE)) + def read(self, size): + expected = self.reference.read(size) + assert self.stream.read(size) == expected + + @rule(size=st.integers(MIN_READ_SIZE, MAX_READ_SIZE)) + def readinto(self, size): + expected = self.reference.read(size) + buf = bytearray(size) + n = self.stream.readinto(buf) + assert buf[:n] == expected + + @rule() + def readline(self): + expected = self.reference.readline(80) + assert self.stream.readline(80) == expected + + at pytest.mark.parametrize('StreamCls', [Stream, StreamCFFI]) + at settings(max_examples=50) + at given(params=data_and_sizes(), chunk_size=st.integers(MIN_READ_SIZE, 8192)) +def test_stateful(params, chunk_size, StreamCls): + data, sizes = params + raw_stream = StreamCls(data, sizes) + reference = io.BytesIO(data) + stream = io.BufferedReader(raw_stream, chunk_size) + sm = StateMachine(stream, reference) + run_state_machine_as_test(lambda: sm) From pypy.commits at gmail.com Sat Dec 3 02:37:14 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 02 Dec 2016 23:37:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Attempt to work around rare cases where sys.exc_info() returns non-None Message-ID: <5842762a.52301c0a.42568.5020@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88836:dc7c0fe0f3cb Date: 2016-12-03 08:36 +0100 http://bitbucket.org/pypy/pypy/changeset/dc7c0fe0f3cb/ Log: Attempt to work around rare cases where sys.exc_info() returns non- None because it was not correctly cleared diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -248,20 +248,21 @@ """Start this frame's execution.""" if self._is_generator_or_coroutine(): return self.initialize_as_generator(name, qualname) - elif we_are_translated(): - return self.execute_frame() else: # untranslated: check that sys_exc_info is exactly - # restored after running any Python function + # restored after running any Python function. + # Translated: actually save and restore it, as an attempt to + # work around rare cases that can occur if RecursionError or + # MemoryError is raised at just the wrong place executioncontext = self.space.getexecutioncontext() exc_on_enter = executioncontext.sys_exc_info() try: - w_res = self.execute_frame() - except OperationError: - assert exc_on_enter is executioncontext.sys_exc_info() - raise - assert exc_on_enter is executioncontext.sys_exc_info() - return w_res + return self.execute_frame() + finally: + if we_are_translated(): + executioncontext.set_sys_exc_info(exc_on_enter) + else: + assert exc_on_enter is executioncontext.sys_exc_info() run._always_inline_ = True def initialize_as_generator(self, name, qualname): From pypy.commits at gmail.com Sat Dec 3 03:04:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 00:04:50 -0800 (PST) Subject: [pypy-commit] pypy py3.5: __annotations__ takes keys that are mangled parameter names Message-ID: <58427ca2.05bd1c0a.ef06e.57ab@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88838:5eca897303d3 Date: 2016-12-03 09:04 +0100 http://bitbucket.org/pypy/pypy/changeset/5eca897303d3/ Log: __annotations__ takes keys that are mangled parameter names diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -345,7 +345,7 @@ def _visit_arg_annotation(self, name, ann, names): if ann: ann.walkabout(self) - names.append(name) + names.append(self.scope.mangle(name)) def _visit_arg_annotations(self, args, names): if args: diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -60,6 +60,13 @@ f.__annotations__ = ann assert f.__annotations__ is ann + def test_annotations_mangle(self): """ + class X: + def foo(self, __a:5, b:6): + pass + assert X.foo.__annotations__ == {'_X__a': 5, 'b': 6} + """ + def test_kwdefaults(self): """ def f(*, kw=3): return kw From pypy.commits at gmail.com Sat Dec 3 03:04:48 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 00:04:48 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix Message-ID: <58427ca0.913fc20a.ca4e8.4959@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88837:b4167fbd295e Date: 2016-12-03 08:54 +0100 http://bitbucket.org/pypy/pypy/changeset/b4167fbd295e/ Log: fix diff --git a/lib-python/3/test/test_inspect.py b/lib-python/3/test/test_inspect.py --- a/lib-python/3/test/test_inspect.py +++ b/lib-python/3/test/test_inspect.py @@ -2791,11 +2791,11 @@ self.assertNotEqual(hash(foo_sig), hash(inspect.signature(bar))) def foo(a={}): pass - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(inspect.signature(foo)) def foo(a) -> {}: pass - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(inspect.signature(foo)) def test_signature_str(self): @@ -3252,7 +3252,7 @@ def foo(a): pass ba = inspect.signature(foo).bind(1) - with self.assertRaisesRegex(TypeError, 'unhashable type'): + with self.assertRaisesRegex(TypeError, 'unhashable'): hash(ba) def test_signature_bound_arguments_equality(self): From pypy.commits at gmail.com Sat Dec 3 03:47:49 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 00:47:49 -0800 (PST) Subject: [pypy-commit] pypy py3.5: test and fix Message-ID: <584286b5.0a4cc20a.cf0fa.5384@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88839:5ff985c7735b Date: 2016-12-03 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/5ff985c7735b/ Log: test and fix diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -270,6 +270,37 @@ assert 0, "exception not propagated" """ + def test_mutable_bases_with_failing_mro_2(self): """ + class E(Exception): + pass + class M(type): + def mro(cls): + if cls.__name__ == 'Sub' and A.__bases__ == (Base1,): + A.__bases__ = (Base2,) + raise E + return type.mro(cls) + + class Base0: + pass + class Base1: + pass + class Base2: + pass + class A(Base0, metaclass=M): + pass + class Sub(A): + pass + + try: + A.__bases__ = (Base1,) + except E: + assert A.__bases__ == (Base2,) + assert A.__mro__ == (A, Base2, object) + assert Sub.__mro__ == (Sub, A, Base2, object) + else: + assert 0 + """ + def test_mutable_bases_catch_mro_conflict(self): class A(object): pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -802,8 +802,9 @@ return space.newtuple(w_type.bases_w) def mro_subclasses(space, w_type, temp): - temp.append((w_type, w_type.mro_w)) + old_mro_w = w_type.mro_w compute_mro(w_type) + temp.append((w_type, old_mro_w, w_type.mro_w)) for w_sc in w_type.get_subclasses(): assert isinstance(w_sc, W_TypeObject) mro_subclasses(space, w_sc, temp) @@ -856,9 +857,11 @@ # try to recompute all MROs mro_subclasses(space, w_type, temp) except: - for cls, old_mro in temp: - cls.mro_w = old_mro - w_type.bases_w = saved_bases_w + for cls, old_mro, new_mro in temp: + if cls.mro_w is new_mro: # don't revert if it changed again + cls.mro_w = old_mro + if w_type.bases_w is newbases_w: # don't revert if it changed again + w_type.bases_w = saved_bases_w raise if (w_type.version_tag() is not None and not is_mro_purely_of_types(w_type.mro_w)): From pypy.commits at gmail.com Sat Dec 3 03:48:06 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 00:48:06 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <584286c6.c89cc20a.df59e.4aca@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r827:c8fe3e4c9140 Date: 2016-12-03 09:47 +0100 http://bitbucket.org/pypy/pypy.org/changeset/c8fe3e4c9140/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66348 of $105000 (63.2%) + $66367 of $105000 (63.2%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sat Dec 3 06:40:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:30 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: in-progress Message-ID: <5842af2e.c19d1c0a.f1734.966f@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88842:abe549159b0d Date: 2016-12-03 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/abe549159b0d/ Log: in-progress diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rgc import (resizable_list_supporting_raw_ptr, nonmoving_raw_ptr_for_resizable_list) +from rpython.rlib import jit from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -35,6 +36,7 @@ self._data = resizable_list_supporting_raw_ptr(data) self._offset = 0 # NOTE: the bytearray data is in 'self._data[self._offset:]' + _tweak_for_tests(self) def getdata(self): if self._offset > 0: @@ -48,13 +50,13 @@ ''.join(self._data[self._offset:])) def buffer_w(self, space, flags): - return BytearrayBuffer(self._data, self._offset) + return BytearrayBuffer(self) def bytearray_list_of_chars_w(self, space): return self.getdata() def nonmovable_carray(self, space): - return BytearrayBuffer(self._data, self._offset).get_raw_address() + return BytearrayBuffer(self).get_raw_address() def _new(self, value): if value is self._data: @@ -73,7 +75,7 @@ def _len(self): return len(self._data) - self._offset - def _fixindex(self, space, index): + def _fixindex(self, space, index, errmsg="bytearray index out of range"): # for getitem/setitem/delitem of a single char if index >= 0: index += self._offset @@ -82,7 +84,7 @@ index += len(self._data) # count from the end oob = index < self._offset if oob: - raise oefmt(space.w_IndexError, "bytearray index out of range") + raise OperationError(space.w_IndexError, space.wrap(errmsg)) check_nonneg(index) return index @@ -210,6 +212,7 @@ data = [c for c in newbytesdata_w(space, w_source, encoding, errors)] self._data = resizable_list_supporting_raw_ptr(data) self._offset = 0 + _tweak_for_tests(self) def descr_repr(self, space): s = self.getdata() @@ -374,11 +377,20 @@ def descr_setitem(self, space, w_index, w_other): if isinstance(w_index, W_SliceObject): - XXX - oldsize = len(self.data) + sequence2 = [c for c in makebytesdata_w(space, w_other)] + oldsize = self._len() start, stop, step, slicelength = w_index.indices4(space, oldsize) - sequence2 = [c for c in makebytesdata_w(space, w_other)] - _setitem_slice_helper(space, self.data, start, step, + if start == 0 and step == 1 and len(sequence2) <= slicelength: + self._delete_from_start(slicelength - len(sequence2)) + slicelength = len(sequence2) + if slicelength == 0: + return + data = self._data + start += self._offset + #stop += self._offset---not used + else: + data = self.getdata() + _setitem_slice_helper(space, data, start, step, slicelength, sequence2, empty_elem='\x00') else: idx = space.getindex_w(w_index, space.w_IndexError, "bytearray") @@ -387,14 +399,27 @@ def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): - XXX - start, stop, step, slicelength = w_idx.indices4(space, - len(self.data)) - _delitem_slice_helper(space, self.data, start, step, slicelength) + start, stop, step, slicelength = w_idx.indices4(space, self._len()) + if start == 0 and step == 1: + self._delete_from_start(slicelength) + else: + _delitem_slice_helper(space, self.getdata(), + start, step, slicelength) else: - XXX # case of del b[0] idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray") - del self._data[self._fixindex(space, idx)] + idx = self._fixindex(space, idx) + if idx == self._offset: # fast path for del x[0] or del[-len] + self._delete_from_start(1) + else: + del self._data[idx] + + def _delete_from_start(self, n): + self._offset += n + jit.conditional_call(self._offset > len(self._data) / 2 + 15, + self._shrink_after_delete_from_start) + + def _shrink_after_delete_from_start(self): + self.getdata() def descr_append(self, space, w_item): self._data.append(getbytevalue(space, w_item)) @@ -407,32 +432,33 @@ def descr_insert(self, space, w_idx, w_other): where = space.int_w(w_idx) - length = len(self.data) + val = getbytevalue(space, w_other) + data = self.getdata() + length = len(data) index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - self.data.insert(index, val) + data.insert(index, val) @unwrap_spec(w_idx=WrappedDefault(-1)) def descr_pop(self, space, w_idx): index = space.int_w(w_idx) - try: - result = self.data.pop(index) - except IndexError: - if not self.data: - raise oefmt(space.w_IndexError, "pop from empty bytearray") - raise oefmt(space.w_IndexError, "pop index out of range") + if self._len() == 0: + raise oefmt(space.w_IndexError, "pop from empty bytearray") + index = self._fixindex(space, index, "pop index out of range") + result = self._data.pop(index) return space.wrap(ord(result)) def descr_remove(self, space, w_char): char = space.int_w(space.index(w_char)) - try: - self.data.remove(chr(char)) - except ValueError: - raise oefmt(space.w_ValueError, "value not found in bytearray") + _data = self._data + for index in range(self._offset, len(_data)): + if ord(_data[index]) == char: + del _data[index] + return + raise oefmt(space.w_ValueError, "value not found in bytearray") def descr_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return self._new(self.data + w_other.data) + return self._new(self.getdata() + w_other.getdata()) if isinstance(w_other, W_BytesObject): return self._add(self._op_val(space, w_other)) @@ -447,19 +473,21 @@ @specialize.argtype(1) def _add(self, other): - return self._new(self.data + [other[i] for i in range(len(other))]) + return self._new(self.getdata() + [other[i] for i in range(len(other))]) def descr_reverse(self, space): - self.data.reverse() + self.getdata().reverse() def descr_clear(self, space): - self.data = [] + self._data = [] + self._offset = 0 def descr_copy(self, space): - return self._new(self.data[:]) + return self._new(self._data[self._offset:]) def descr_hex(self, space): - return _array_to_hexstring(space, self.data, len(self.data), True) + data = self.getdata() + return _array_to_hexstring(space, data, len(data), True) def descr_mod(self, space, w_values): return mod_format(space, self, w_values, fmt_type=FORMAT_BYTEARRAY) @@ -1235,37 +1263,48 @@ class BytearrayBuffer(Buffer): _immutable_ = True + readonly = False - def __init__(self, data, offset): - self._data = data - self._offset = offset + def __init__(self, ba): + self.ba = ba # the W_BytearrayObject def getlength(self): - return len(self.data) + return self.ba._len() def getitem(self, index): - return self.data[index] + ba = self.ba + return ba._data[ba._offset + index] def setitem(self, index, char): - self.data[index] = char + ba = self.ba + ba._data[ba._offset + index] = char def getslice(self, start, stop, step, size): if size == 0: return "" if step == 1: assert 0 <= start <= stop - if start == 0 and stop == len(self.data): - return "".join(self.data) - return "".join(self.data[start:stop]) + ba = self.ba + start += ba._offset + stop += ba._offset + data = ba._data + if start != 0 or stop != len(data): + data = data[start:stop] + return "".join(data) return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): # No bounds checks. + ba = self.ba + start += ba._offset for i in range(len(string)): - self.data[start + i] = string[i] + ba._data[start + i] = string[i] def get_raw_address(self): - return nonmoving_raw_ptr_for_resizable_list(self.data) + ba = self.ba + p = nonmoving_raw_ptr_for_resizable_list(ba._data) + p = rffi.ptradd(p, ba._offset) + return p @specialize.argtype(1) @@ -1276,3 +1315,6 @@ if selfvalue[i] > buffer[i]: return 1 return 0 + +def _tweak_for_tests(w_bytearray): + "Patched in test_bytearray.py" diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -1,10 +1,27 @@ # coding: utf-8 +import random from pypy import conftest +from pypy.objspace.std import bytearrayobject + +class DontAccess(object): + pass +dont_access = DontAccess() + class AppTestBytesArray: def setup_class(cls): cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) + def tweak(w_bytearray): + n = random.randint(-3, 16) + if n > 0: + w_bytearray._data = [dont_access] * n + w_bytearray._data + w_bytearray._offset += n + cls._old_tweak = [bytearrayobject._tweak_for_tests] + bytearrayobject._tweak_for_tests = tweak + + def teardown_class(cls): + [bytearrayobject._tweak_for_tests] = cls._old_tweak def test_basics(self): b = bytearray() @@ -345,6 +362,20 @@ b.reverse() assert b == bytearray(b'olleh') + def test_delitem_from_front(self): + b = bytearray(b'abcdefghij') + del b[0] + del b[0] + assert len(b) == 8 + assert b == bytearray(b'cdefghij') + del b[-8] + del b[-7] + assert len(b) == 6 + assert b == bytearray(b'efghij') + del b[:3] + assert len(b) == 3 + assert b == bytearray(b'hij') + def test_delitem(self): b = bytearray(b'abc') del b[1] @@ -427,6 +458,18 @@ raises(TypeError, b.extend, [object()]) raises(TypeError, b.extend, "unicode") + def test_setitem_from_front(self): + b = bytearray(b'abcdefghij') + b[:2] = b'' + assert len(b) == 8 + assert b == bytearray(b'cdefghij') + b[:3] = b'X' + assert len(b) == 6 + assert b == bytearray(b'Xfghij') + b[:2] = b'ABC' + assert len(b) == 7 + assert b == bytearray(b'ABCghij') + def test_setslice(self): b = bytearray(b'hello') b[:] = [ord(c) for c in 'world'] From pypy.commits at gmail.com Sat Dec 3 06:40:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:26 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: Bytearrays have amortized constant-time "del a[:n]" in CPython 3.5 Message-ID: <5842af2a.e626c20a.a1dc0.860e@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88840:387fcaa5c779 Date: 2016-12-03 10:07 +0100 http://bitbucket.org/pypy/pypy/changeset/387fcaa5c779/ Log: Bytearrays have amortized constant-time "del a[:n]" in CPython 3.5 From pypy.commits at gmail.com Sat Dec 3 06:40:28 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:28 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: in-progress Message-ID: <5842af2c.542e1c0a.730e4.998b@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88841:a338838c2d41 Date: 2016-12-03 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/a338838c2d41/ Log: in-progress diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -6,7 +6,7 @@ from rpython.rlib.buffer import Buffer from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder, ByteListBuilder -from rpython.rlib.debug import check_list_of_chars +from rpython.rlib.debug import check_list_of_chars, check_nonneg from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rgc import (resizable_list_supporting_raw_ptr, nonmoving_raw_ptr_for_resizable_list) @@ -32,23 +32,32 @@ def __init__(self, data): check_list_of_chars(data) - self.data = resizable_list_supporting_raw_ptr(data) + self._data = resizable_list_supporting_raw_ptr(data) + self._offset = 0 + # NOTE: the bytearray data is in 'self._data[self._offset:]' + + def getdata(self): + if self._offset > 0: + self._data = self._data[self._offset:] + self._offset = 0 + return self._data def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, ''.join(self.data)) + return "%s(%s)" % (self.__class__.__name__, + ''.join(self._data[self._offset:])) def buffer_w(self, space, flags): - return BytearrayBuffer(self.data, False) + return BytearrayBuffer(self._data, self._offset) def bytearray_list_of_chars_w(self, space): - return self.data + return self.getdata() def nonmovable_carray(self, space): - return BytearrayBuffer(self.data, False).get_raw_address() + return BytearrayBuffer(self._data, self._offset).get_raw_address() def _new(self, value): - if value is self.data: + if value is self._data: value = value[:] return W_BytearrayObject(value) @@ -62,17 +71,29 @@ return W_BytearrayObject([]) def _len(self): - return len(self.data) + return len(self._data) - self._offset + + def _fixindex(self, space, index): + # for getitem/setitem/delitem of a single char + if index >= 0: + index += self._offset + oob = index >= len(self._data) + else: + index += len(self._data) # count from the end + oob = index < self._offset + if oob: + raise oefmt(space.w_IndexError, "bytearray index out of range") + check_nonneg(index) + return index def _getitem_result(self, space, index): - try: - character = self.data[index] - except IndexError: - raise oefmt(space.w_IndexError, "bytearray index out of range") + character = self._data[self._fixindex(space, index)] return space.wrap(ord(character)) def _val(self, space): - return self.data + # XXX review the calls of _val and think if some of them should + # XXX not force a copy of self._data if _offset > 0 + return self.getdata() @staticmethod def _use_rstr_ops(space, w_other): @@ -151,11 +172,12 @@ return False def ord(self, space): - if len(self.data) != 1: + length = self._len() + if length != 1: raise oefmt(space.w_TypeError, "ord() expected a character, but string of length %d " - "found", len(self.data)) - return space.wrap(ord(self.data[0])) + "found", length) + return space.wrap(ord(self._data[self._offset])) @staticmethod def descr_new(space, w_bytearraytype, __args__): @@ -168,7 +190,7 @@ w_dict = space.w_None return space.newtuple([ space.type(self), space.newtuple([ - space.wrap(''.join(self.data).decode('latin-1')), + space.wrap(''.join(self.getdata()).decode('latin-1')), space.wrap('latin-1')]), w_dict]) @@ -186,10 +208,11 @@ def descr_init(self, space, w_source=None, encoding=None, errors=None): assert isinstance(self, W_BytearrayObject) data = [c for c in newbytesdata_w(space, w_source, encoding, errors)] - self.data = resizable_list_supporting_raw_ptr(data) + self._data = resizable_list_supporting_raw_ptr(data) + self._offset = 0 def descr_repr(self, space): - s = self.data + s = self.getdata() # Good default if there are no replacements. buf = StringBuilder(len("bytearray(b'')") + len(s)) @@ -237,7 +260,7 @@ def descr_eq(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return space.newbool(self.data == w_other.data) + return space.newbool(self.getdata() == w_other.getdata()) try: buffer = _get_buffer(space, w_other) @@ -257,7 +280,7 @@ def descr_ne(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return space.newbool(self.data != w_other.data) + return space.newbool(self.getdata() != w_other.getdata()) try: buffer = _get_buffer(space, w_other) @@ -279,7 +302,7 @@ value = self._val(space) if isinstance(w_other, W_BytearrayObject): - other = w_other.data + other = w_other.getdata() other_len = len(other) cmp = _memcmp(value, other, min(len(value), len(other))) elif isinstance(w_other, W_BytesObject): @@ -324,7 +347,7 @@ def descr_inplace_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - self.data += w_other.data + self._data += w_other.getdata() return self if isinstance(w_other, W_BytesObject): @@ -336,7 +359,7 @@ @specialize.argtype(1) def _inplace_add(self, other): for i in range(len(other)): - self.data.append(other[i]) + self._data.append(other[i]) def descr_inplace_mul(self, space, w_times): try: @@ -345,11 +368,13 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented raise - self.data *= times + data = self.getdata() + data *= times return self def descr_setitem(self, space, w_index, w_other): if isinstance(w_index, W_SliceObject): + XXX oldsize = len(self.data) start, stop, step, slicelength = w_index.indices4(space, oldsize) sequence2 = [c for c in makebytesdata_w(space, w_other)] @@ -357,33 +382,28 @@ slicelength, sequence2, empty_elem='\x00') else: idx = space.getindex_w(w_index, space.w_IndexError, "bytearray") - try: - self.data[idx] = getbytevalue(space, w_other) - except IndexError: - raise oefmt(space.w_IndexError, "bytearray index out of range") + newvalue = getbytevalue(space, w_other) + self._data[self._fixindex(space, idx)] = newvalue def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): + XXX start, stop, step, slicelength = w_idx.indices4(space, len(self.data)) _delitem_slice_helper(space, self.data, start, step, slicelength) else: + XXX # case of del b[0] idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray") - try: - del self.data[idx] - except IndexError: - raise oefmt(space.w_IndexError, - "bytearray deletion index out of range") + del self._data[self._fixindex(space, idx)] def descr_append(self, space, w_item): - self.data.append(getbytevalue(space, w_item)) + self._data.append(getbytevalue(space, w_item)) def descr_extend(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - self.data += w_other.data + self._data += w_other.getdata() else: - self.data += [c for c in makebytesdata_w(space, w_other)] - return self + self._inplace_add(makebytesdata_w(space, w_other)) def descr_insert(self, space, w_idx, w_other): where = space.int_w(w_idx) @@ -391,7 +411,6 @@ index = get_positive_index(where, length) val = getbytevalue(space, w_other) self.data.insert(index, val) - return space.w_None @unwrap_spec(w_idx=WrappedDefault(-1)) def descr_pop(self, space, w_idx): @@ -1217,9 +1236,9 @@ class BytearrayBuffer(Buffer): _immutable_ = True - def __init__(self, data, readonly): - self.data = data - self.readonly = readonly + def __init__(self, data, offset): + self._data = data + self._offset = offset def getlength(self): return len(self.data) From pypy.commits at gmail.com Sat Dec 3 06:40:32 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:32 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: .find, .index, .__contains__ should work without forcing _offset==0 Message-ID: <5842af30.e6b0c20a.2bab1.8244@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88843:a862812d5e11 Date: 2016-12-03 11:54 +0100 http://bitbucket.org/pypy/pypy/changeset/a862812d5e11/ Log: .find, .index, .__contains__ should work without forcing _offset==0 diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -18,7 +18,7 @@ getbytevalue, makebytesdata_w, newbytesdata_w) from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.objspace.std.sliceobject import W_SliceObject +from pypy.objspace.std.sliceobject import W_SliceObject, unwrap_start_stop from pypy.objspace.std.stringmethods import StringMethods, _get_buffer from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.util import get_positive_index @@ -498,7 +498,13 @@ return self._getitem_result(space, index) def descr_alloc(self, space): - return space.wrap(self._len() + 1) + return space.wrap(len(self._data) + 1) # includes the _offset part + + def _convert_idx_params(self, space, w_start, w_end): + # optimization: this version doesn't force getdata() + start, end = unwrap_start_stop(space, self._len(), w_start, w_end) + ofs = self._offset + return (self._data, start + ofs, end + ofs, ofs) # ____________________________________________________________ diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -25,7 +25,8 @@ value = self._val(space) lenself = len(value) start, end = unwrap_start_stop(space, lenself, w_start, w_end) - return (value, start, end) + # the None means "no offset"; see bytearrayobject.py + return (value, start, end, None) @staticmethod def descr_maketrans(space, w_type, w_from, w_to): @@ -79,12 +80,12 @@ return W_StringIterObject(self, self._iter_getitem_result) def descr_contains(self, space, w_sub): - value = self._val(space) + value, start, end, _ = self._convert_idx_params(space, None, None) other = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.find(other) + res = value.find(other, start, end) else: - res = find(value, other, 0, len(value)) + res = find(value, other, start, end) return space.newbool(res >= 0) def descr_add(self, space, w_other): @@ -179,7 +180,7 @@ return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): - value, start, end = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -247,27 +248,31 @@ return distance def descr_find(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): res = value.find(sub, start, end) else: res = find(value, sub, start, end) + if ofs is not None and res >= 0: + res -= ofs return space.wrap(res) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): res = value.rfind(sub, start, end) else: res = rfind(value, sub, start, end) + if ofs is not None and res >= 0: + res -= ofs return space.wrap(res) def descr_index(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -278,10 +283,12 @@ if res < 0: raise oefmt(space.w_ValueError, "substring not found in " + self._KIND2 + ".index") + if ofs is not None: + res -= ofs return space.wrap(res) def descr_rindex(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -292,6 +299,8 @@ if res < 0: raise oefmt(space.w_ValueError, "substring not found in " + self._KIND2 + ".rindex") + if ofs is not None: + res -= ofs return space.wrap(res) @specialize.arg(2) @@ -595,7 +604,7 @@ return "bytes" def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_prefix, space.w_tuple): return self._startswith_tuple(space, value, w_prefix, start, end) try: @@ -620,7 +629,7 @@ return startswith(value, prefix, start, end) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): return self._endswith_tuple(space, value, w_suffix, start, end) try: From pypy.commits at gmail.com Sat Dec 3 06:40:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:34 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: a test to check explicitly for non-forced cases Message-ID: <5842af32.e644c20a.2628e.85b0@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88844:9d0fb0aff02d Date: 2016-12-03 12:20 +0100 http://bitbucket.org/pypy/pypy/changeset/9d0fb0aff02d/ Log: a test to check explicitly for non-forced cases diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -20,6 +20,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.sliceobject import W_SliceObject, unwrap_start_stop from pypy.objspace.std.stringmethods import StringMethods, _get_buffer +from pypy.objspace.std.stringmethods import _descr_getslice_slowpath from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.util import get_positive_index from pypy.objspace.std.formatting import mod_format, FORMAT_BYTEARRAY @@ -93,8 +94,6 @@ return space.wrap(ord(character)) def _val(self, space): - # XXX review the calls of _val and think if some of them should - # XXX not force a copy of self._data if _offset > 0 return self.getdata() @staticmethod @@ -377,7 +376,7 @@ def descr_setitem(self, space, w_index, w_other): if isinstance(w_index, W_SliceObject): - sequence2 = [c for c in makebytesdata_w(space, w_other)] + sequence2 = makebytesdata_w(space, w_other) oldsize = self._len() start, stop, step, slicelength = w_index.indices4(space, oldsize) if start == 0 and step == 1 and len(sequence2) <= slicelength: @@ -385,11 +384,8 @@ slicelength = len(sequence2) if slicelength == 0: return - data = self._data - start += self._offset - #stop += self._offset---not used - else: - data = self.getdata() + data = self._data + start += self._offset _setitem_slice_helper(space, data, start, step, slicelength, sequence2, empty_elem='\x00') else: @@ -506,6 +502,24 @@ ofs = self._offset return (self._data, start + ofs, end + ofs, ofs) + def descr_getitem(self, space, w_index): + # optimization: this version doesn't force getdata() + if isinstance(w_index, W_SliceObject): + start, stop, step, sl = w_index.indices4(space, self._len()) + if sl == 0: + return self._empty() + elif step == 1: + assert start >= 0 and stop >= 0 + ofs = self._offset + return self._new(self._data[start + ofs : stop + ofs]) + else: + start += self._offset + ret = _descr_getslice_slowpath(self._data, start, step, sl) + return self._new_from_list(ret) + + index = space.getindex_w(w_index, space.w_IndexError, self._KIND1) + return self._getitem_result(space, index) + # ____________________________________________________________ # helpers for slow paths, moved out because they contain loops @@ -1247,21 +1261,6 @@ "attempt to assign sequence of size %d to extended slice " "of size %d", len2, slicelength) - if sequence2 is items: - if step > 0: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - i = len2 - 1 - start += i*step - while i >= 0: - items[start] = sequence2[i] - start -= step - i -= 1 - return - else: - # Make a shallow copy to more easily handle the reversal case - sequence2 = list(sequence2) for i in range(len2): items[start] = sequence2[i] start += step diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -85,6 +85,7 @@ raises(IndexError, b.__getitem__, 4) assert b[1:5] == bytearray(b'est') assert b[slice(1,5)] == bytearray(b'est') + assert b[1:5:2] == bytearray(b'et') def test_arithmetic(self): b1 = bytearray(b'hello ') @@ -627,3 +628,52 @@ def test_constructor_typeerror(self): raises(TypeError, bytearray, b'', 'ascii') raises(TypeError, bytearray, '') + + def test_dont_force_offset(self): + def make(x=b'abcdefghij', shift=3): + b = bytearray(b'?'*shift + x) + repr(b) # force 'b' + del b[:shift] # add shift to b._offset + return b + assert make(shift=0).__alloc__() == 11 + # + x = make(shift=3) + assert x.__alloc__() == 14 + repr(x) + assert x.__alloc__() == 11 + # + x = make(shift=3) + assert memoryview(x)[1] == ord('b') + assert x.__alloc__() == 14 + assert len(x) == 10 + assert x.__alloc__() == 14 + assert x[3] == ord('d') + assert x[-3] == ord('h') + assert x.__alloc__() == 14 + assert x[3:-3] == b'defg' + assert x[-3:3:-1] == b'hgfe' + assert x.__alloc__() == 14 + # + x = make(shift=3) + x[3] = ord('D') + assert x.__alloc__() == 14 + x[4:6] = b'EF' + assert x.__alloc__() == 14 + x[6:8] = b'G' + assert x.__alloc__() == 13 + x[-2:4:-2] = b'*/' + assert x.__alloc__() == 13 + assert x == bytearray(b'abcDE/G*j') + # + x = make(shift=3) + assert x.__alloc__() == 14 + del x[:1] + assert x.__alloc__() == 13 + del x[0:5] + assert x.__alloc__() == 8 + del x[0] + assert len(x) == 4 + assert x.__alloc__() == 7 + del x[1] + assert x.__alloc__() == 4 # forced + assert x == bytearray(b'gij') From pypy.commits at gmail.com Sat Dec 3 06:40:35 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:40:35 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: More tests Message-ID: <5842af33.624fc20a.7231f.859b@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88845:4af53ab59362 Date: 2016-12-03 12:39 +0100 http://bitbucket.org/pypy/pypy/changeset/4af53ab59362/ Log: More tests diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -214,14 +214,15 @@ _tweak_for_tests(self) def descr_repr(self, space): - s = self.getdata() + s, start, end, _ = self._convert_idx_params(space, None, None) # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) + buf = StringBuilder(len("bytearray(b'')") + (end - start)) buf.append("bytearray(b") quote = "'" - for c in s: + for i in range(start, end): + c = s[i] if c == '"': quote = "'" break @@ -229,7 +230,7 @@ quote = '"' buf.append(quote) - for i in range(len(s)): + for i in range(start, end): c = s[i] if c == '\\' or c == "'": @@ -399,8 +400,8 @@ if start == 0 and step == 1: self._delete_from_start(slicelength) else: - _delitem_slice_helper(space, self.getdata(), - start, step, slicelength) + _delitem_slice_helper(space, self._data, + start + self._offset, step, slicelength) else: idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray") idx = self._fixindex(space, idx) @@ -411,7 +412,7 @@ def _delete_from_start(self, n): self._offset += n - jit.conditional_call(self._offset > len(self._data) / 2 + 15, + jit.conditional_call(self._offset > len(self._data) / 2, self._shrink_after_delete_from_start) def _shrink_after_delete_from_start(self): diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -632,17 +632,13 @@ def test_dont_force_offset(self): def make(x=b'abcdefghij', shift=3): b = bytearray(b'?'*shift + x) - repr(b) # force 'b' + b + b'' # force 'b' del b[:shift] # add shift to b._offset return b assert make(shift=0).__alloc__() == 11 # x = make(shift=3) assert x.__alloc__() == 14 - repr(x) - assert x.__alloc__() == 11 - # - x = make(shift=3) assert memoryview(x)[1] == ord('b') assert x.__alloc__() == 14 assert len(x) == 10 @@ -653,6 +649,8 @@ assert x[3:-3] == b'defg' assert x[-3:3:-1] == b'hgfe' assert x.__alloc__() == 14 + assert repr(x) == "bytearray(b'abcdefghij')" + assert x.__alloc__() == 14 # x = make(shift=3) x[3] = ord('D') @@ -665,15 +663,31 @@ assert x.__alloc__() == 13 assert x == bytearray(b'abcDE/G*j') # - x = make(shift=3) - assert x.__alloc__() == 14 + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + assert len(x) == 26 + assert x.__alloc__() == 38 del x[:1] - assert x.__alloc__() == 13 + assert len(x) == 25 + assert x.__alloc__() == 38 del x[0:5] - assert x.__alloc__() == 8 + assert len(x) == 20 + assert x.__alloc__() == 38 del x[0] - assert len(x) == 4 - assert x.__alloc__() == 7 + assert len(x) == 19 + assert x.__alloc__() == 38 + del x[0] # too much emptiness, forces now + assert len(x) == 18 + assert x.__alloc__() == 19 + # + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + del x[:9] # too much emptiness, forces now + assert len(x) == 17 + assert x.__alloc__() == 18 + # + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + assert x.__alloc__() == 38 del x[1] - assert x.__alloc__() == 4 # forced - assert x == bytearray(b'gij') + assert x.__alloc__() == 37 # not forced, but the list shrank + del x[3:10:2] + assert x.__alloc__() == 33 + assert x == bytearray(b'acdfhjlmnopqrstuvwxyz') From pypy.commits at gmail.com Sat Dec 3 06:46:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:46:22 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: passing test Message-ID: <5842b08e.c5311c0a.28c18.9f5c@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88846:e4d0a0ab66d2 Date: 2016-12-03 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/e4d0a0ab66d2/ Log: passing test diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -229,6 +229,10 @@ assert bytearray(b'ab').endswith(bytearray(b''), 2) is True assert bytearray(b'ab').endswith(bytearray(b''), 3) is False + def test_startswith_self(self): + b = bytearray(b'abcd') + assert b.startswith(b) + def test_stringlike_conversions(self): # methods that should return bytearray (and not str) def check(result, expected): From pypy.commits at gmail.com Sat Dec 3 06:59:53 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 03:59:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: translation fixes Message-ID: <5842b3b9.0a74c20a.f20e7.8b22@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88847:701ac5e8d4dc Date: 2016-12-03 12:56 +0100 http://bitbucket.org/pypy/pypy/changeset/701ac5e8d4dc/ Log: translation fixes diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -37,6 +37,7 @@ self._data = resizable_list_supporting_raw_ptr(data) self._offset = 0 # NOTE: the bytearray data is in 'self._data[self._offset:]' + check_nonneg(self._offset) _tweak_for_tests(self) def getdata(self): @@ -80,12 +81,12 @@ # for getitem/setitem/delitem of a single char if index >= 0: index += self._offset - oob = index >= len(self._data) + if index >= len(self._data): + raise OperationError(space.w_IndexError, space.wrap(errmsg)) else: index += len(self._data) # count from the end - oob = index < self._offset - if oob: - raise OperationError(space.w_IndexError, space.wrap(errmsg)) + if index < self._offset: + raise OperationError(space.w_IndexError, space.wrap(errmsg)) check_nonneg(index) return index @@ -411,6 +412,7 @@ del self._data[idx] def _delete_from_start(self, n): + assert n >= 0 self._offset += n jit.conditional_call(self._offset > len(self._data) / 2, self._shrink_after_delete_from_start) From pypy.commits at gmail.com Sat Dec 3 07:28:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 04:28:00 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: fix jit.conditional_call Message-ID: <5842ba50.313ac20a.8a7be.9ab6@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88848:93e5ab1a4ea7 Date: 2016-12-03 13:27 +0100 http://bitbucket.org/pypy/pypy/changeset/93e5ab1a4ea7/ Log: fix jit.conditional_call diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -415,10 +415,7 @@ assert n >= 0 self._offset += n jit.conditional_call(self._offset > len(self._data) / 2, - self._shrink_after_delete_from_start) - - def _shrink_after_delete_from_start(self): - self.getdata() + _shrink_after_delete_from_start, self) def descr_append(self, space, w_item): self._data.append(getbytevalue(space, w_item)) @@ -1326,3 +1323,6 @@ def _tweak_for_tests(w_bytearray): "Patched in test_bytearray.py" + +def _shrink_after_delete_from_start(w_bytearray): + w_bytearray.getdata() From pypy.commits at gmail.com Sat Dec 3 09:51:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 06:51:34 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge py3.5-bytearray Message-ID: <5842dbf6.cf3fc20a.2de93.c768@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88851:ea1f6159d661 Date: 2016-12-03 15:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ea1f6159d661/ Log: hg merge py3.5-bytearray The bytearray type now supports an offset from the start, which makes a few operations like "del b[:10]" amortized constant-time. Only common operations support that; other operations force a copy (once) if there is an offset. The motivation is that CPython 3.5 does the same: there is app-level code around that relies on that. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -6,10 +6,11 @@ from rpython.rlib.buffer import Buffer from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder, ByteListBuilder -from rpython.rlib.debug import check_list_of_chars +from rpython.rlib.debug import check_list_of_chars, check_nonneg from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rgc import (resizable_list_supporting_raw_ptr, nonmoving_raw_ptr_for_resizable_list) +from rpython.rlib import jit from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -17,8 +18,9 @@ getbytevalue, makebytesdata_w, newbytesdata_w) from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.objspace.std.sliceobject import W_SliceObject +from pypy.objspace.std.sliceobject import W_SliceObject, unwrap_start_stop from pypy.objspace.std.stringmethods import StringMethods, _get_buffer +from pypy.objspace.std.stringmethods import _descr_getslice_slowpath from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.util import get_positive_index from pypy.objspace.std.formatting import mod_format, FORMAT_BYTEARRAY @@ -32,23 +34,34 @@ def __init__(self, data): check_list_of_chars(data) - self.data = resizable_list_supporting_raw_ptr(data) + self._data = resizable_list_supporting_raw_ptr(data) + self._offset = 0 + # NOTE: the bytearray data is in 'self._data[self._offset:]' + check_nonneg(self._offset) + _tweak_for_tests(self) + + def getdata(self): + if self._offset > 0: + self._data = self._data[self._offset:] + self._offset = 0 + return self._data def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (self.__class__.__name__, ''.join(self.data)) + return "%s(%s)" % (self.__class__.__name__, + ''.join(self._data[self._offset:])) def buffer_w(self, space, flags): - return BytearrayBuffer(self.data, False) + return BytearrayBuffer(self) def bytearray_list_of_chars_w(self, space): - return self.data + return self.getdata() def nonmovable_carray(self, space): - return BytearrayBuffer(self.data, False).get_raw_address() + return BytearrayBuffer(self).get_raw_address() def _new(self, value): - if value is self.data: + if value is self._data: value = value[:] return W_BytearrayObject(value) @@ -62,17 +75,27 @@ return W_BytearrayObject([]) def _len(self): - return len(self.data) + return len(self._data) - self._offset + + def _fixindex(self, space, index, errmsg="bytearray index out of range"): + # for getitem/setitem/delitem of a single char + if index >= 0: + index += self._offset + if index >= len(self._data): + raise OperationError(space.w_IndexError, space.wrap(errmsg)) + else: + index += len(self._data) # count from the end + if index < self._offset: + raise OperationError(space.w_IndexError, space.wrap(errmsg)) + check_nonneg(index) + return index def _getitem_result(self, space, index): - try: - character = self.data[index] - except IndexError: - raise oefmt(space.w_IndexError, "bytearray index out of range") + character = self._data[self._fixindex(space, index)] return space.wrap(ord(character)) def _val(self, space): - return self.data + return self.getdata() @staticmethod def _use_rstr_ops(space, w_other): @@ -151,11 +174,12 @@ return False def ord(self, space): - if len(self.data) != 1: + length = self._len() + if length != 1: raise oefmt(space.w_TypeError, "ord() expected a character, but string of length %d " - "found", len(self.data)) - return space.wrap(ord(self.data[0])) + "found", length) + return space.wrap(ord(self._data[self._offset])) @staticmethod def descr_new(space, w_bytearraytype, __args__): @@ -168,7 +192,7 @@ w_dict = space.w_None return space.newtuple([ space.type(self), space.newtuple([ - space.wrap(''.join(self.data).decode('latin-1')), + space.wrap(''.join(self.getdata()).decode('latin-1')), space.wrap('latin-1')]), w_dict]) @@ -186,17 +210,20 @@ def descr_init(self, space, w_source=None, encoding=None, errors=None): assert isinstance(self, W_BytearrayObject) data = [c for c in newbytesdata_w(space, w_source, encoding, errors)] - self.data = resizable_list_supporting_raw_ptr(data) + self._data = resizable_list_supporting_raw_ptr(data) + self._offset = 0 + _tweak_for_tests(self) def descr_repr(self, space): - s = self.data + s, start, end, _ = self._convert_idx_params(space, None, None) # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) + buf = StringBuilder(len("bytearray(b'')") + (end - start)) buf.append("bytearray(b") quote = "'" - for c in s: + for i in range(start, end): + c = s[i] if c == '"': quote = "'" break @@ -204,7 +231,7 @@ quote = '"' buf.append(quote) - for i in range(len(s)): + for i in range(start, end): c = s[i] if c == '\\' or c == "'": @@ -237,7 +264,7 @@ def descr_eq(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return space.newbool(self.data == w_other.data) + return space.newbool(self.getdata() == w_other.getdata()) try: buffer = _get_buffer(space, w_other) @@ -257,7 +284,7 @@ def descr_ne(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return space.newbool(self.data != w_other.data) + return space.newbool(self.getdata() != w_other.getdata()) try: buffer = _get_buffer(space, w_other) @@ -279,7 +306,7 @@ value = self._val(space) if isinstance(w_other, W_BytearrayObject): - other = w_other.data + other = w_other.getdata() other_len = len(other) cmp = _memcmp(value, other, min(len(value), len(other))) elif isinstance(w_other, W_BytesObject): @@ -324,7 +351,7 @@ def descr_inplace_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - self.data += w_other.data + self._data += w_other.getdata() return self if isinstance(w_other, W_BytesObject): @@ -336,7 +363,7 @@ @specialize.argtype(1) def _inplace_add(self, other): for i in range(len(other)): - self.data.append(other[i]) + self._data.append(other[i]) def descr_inplace_mul(self, space, w_times): try: @@ -345,75 +372,89 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented raise - self.data *= times + data = self.getdata() + data *= times return self def descr_setitem(self, space, w_index, w_other): if isinstance(w_index, W_SliceObject): - oldsize = len(self.data) + sequence2 = makebytesdata_w(space, w_other) + oldsize = self._len() start, stop, step, slicelength = w_index.indices4(space, oldsize) - sequence2 = [c for c in makebytesdata_w(space, w_other)] - _setitem_slice_helper(space, self.data, start, step, + if start == 0 and step == 1 and len(sequence2) <= slicelength: + self._delete_from_start(slicelength - len(sequence2)) + slicelength = len(sequence2) + if slicelength == 0: + return + data = self._data + start += self._offset + _setitem_slice_helper(space, data, start, step, slicelength, sequence2, empty_elem='\x00') else: idx = space.getindex_w(w_index, space.w_IndexError, "bytearray") - try: - self.data[idx] = getbytevalue(space, w_other) - except IndexError: - raise oefmt(space.w_IndexError, "bytearray index out of range") + newvalue = getbytevalue(space, w_other) + self._data[self._fixindex(space, idx)] = newvalue def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): - start, stop, step, slicelength = w_idx.indices4(space, - len(self.data)) - _delitem_slice_helper(space, self.data, start, step, slicelength) + start, stop, step, slicelength = w_idx.indices4(space, self._len()) + if start == 0 and step == 1: + self._delete_from_start(slicelength) + else: + _delitem_slice_helper(space, self._data, + start + self._offset, step, slicelength) else: idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray") - try: - del self.data[idx] - except IndexError: - raise oefmt(space.w_IndexError, - "bytearray deletion index out of range") + idx = self._fixindex(space, idx) + if idx == self._offset: # fast path for del x[0] or del[-len] + self._delete_from_start(1) + else: + del self._data[idx] + + def _delete_from_start(self, n): + assert n >= 0 + self._offset += n + jit.conditional_call(self._offset > len(self._data) / 2, + _shrink_after_delete_from_start, self) def descr_append(self, space, w_item): - self.data.append(getbytevalue(space, w_item)) + self._data.append(getbytevalue(space, w_item)) def descr_extend(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - self.data += w_other.data + self._data += w_other.getdata() else: - self.data += [c for c in makebytesdata_w(space, w_other)] - return self + self._inplace_add(makebytesdata_w(space, w_other)) def descr_insert(self, space, w_idx, w_other): where = space.int_w(w_idx) - length = len(self.data) + val = getbytevalue(space, w_other) + data = self.getdata() + length = len(data) index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - self.data.insert(index, val) - return space.w_None + data.insert(index, val) @unwrap_spec(w_idx=WrappedDefault(-1)) def descr_pop(self, space, w_idx): index = space.int_w(w_idx) - try: - result = self.data.pop(index) - except IndexError: - if not self.data: - raise oefmt(space.w_IndexError, "pop from empty bytearray") - raise oefmt(space.w_IndexError, "pop index out of range") + if self._len() == 0: + raise oefmt(space.w_IndexError, "pop from empty bytearray") + index = self._fixindex(space, index, "pop index out of range") + result = self._data.pop(index) return space.wrap(ord(result)) def descr_remove(self, space, w_char): char = space.int_w(space.index(w_char)) - try: - self.data.remove(chr(char)) - except ValueError: - raise oefmt(space.w_ValueError, "value not found in bytearray") + _data = self._data + for index in range(self._offset, len(_data)): + if ord(_data[index]) == char: + del _data[index] + return + raise oefmt(space.w_ValueError, "value not found in bytearray") def descr_add(self, space, w_other): if isinstance(w_other, W_BytearrayObject): - return self._new(self.data + w_other.data) + return self._new(self.getdata() + w_other.getdata()) if isinstance(w_other, W_BytesObject): return self._add(self._op_val(space, w_other)) @@ -428,19 +469,21 @@ @specialize.argtype(1) def _add(self, other): - return self._new(self.data + [other[i] for i in range(len(other))]) + return self._new(self.getdata() + [other[i] for i in range(len(other))]) def descr_reverse(self, space): - self.data.reverse() + self.getdata().reverse() def descr_clear(self, space): - self.data = [] + self._data = [] + self._offset = 0 def descr_copy(self, space): - return self._new(self.data[:]) + return self._new(self._data[self._offset:]) def descr_hex(self, space): - return _array_to_hexstring(space, self.data, len(self.data), True) + data = self.getdata() + return _array_to_hexstring(space, data, len(data), True) def descr_mod(self, space, w_values): return mod_format(space, self, w_values, fmt_type=FORMAT_BYTEARRAY) @@ -451,7 +494,31 @@ return self._getitem_result(space, index) def descr_alloc(self, space): - return space.wrap(self._len() + 1) + return space.wrap(len(self._data) + 1) # includes the _offset part + + def _convert_idx_params(self, space, w_start, w_end): + # optimization: this version doesn't force getdata() + start, end = unwrap_start_stop(space, self._len(), w_start, w_end) + ofs = self._offset + return (self._data, start + ofs, end + ofs, ofs) + + def descr_getitem(self, space, w_index): + # optimization: this version doesn't force getdata() + if isinstance(w_index, W_SliceObject): + start, stop, step, sl = w_index.indices4(space, self._len()) + if sl == 0: + return self._empty() + elif step == 1: + assert start >= 0 and stop >= 0 + ofs = self._offset + return self._new(self._data[start + ofs : stop + ofs]) + else: + start += self._offset + ret = _descr_getslice_slowpath(self._data, start, step, sl) + return self._new_from_list(ret) + + index = space.getindex_w(w_index, space.w_IndexError, self._KIND1) + return self._getitem_result(space, index) # ____________________________________________________________ @@ -1194,21 +1261,6 @@ "attempt to assign sequence of size %d to extended slice " "of size %d", len2, slicelength) - if sequence2 is items: - if step > 0: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - i = len2 - 1 - start += i*step - while i >= 0: - items[start] = sequence2[i] - start -= step - i -= 1 - return - else: - # Make a shallow copy to more easily handle the reversal case - sequence2 = list(sequence2) for i in range(len2): items[start] = sequence2[i] start += step @@ -1216,37 +1268,48 @@ class BytearrayBuffer(Buffer): _immutable_ = True + readonly = False - def __init__(self, data, readonly): - self.data = data - self.readonly = readonly + def __init__(self, ba): + self.ba = ba # the W_BytearrayObject def getlength(self): - return len(self.data) + return self.ba._len() def getitem(self, index): - return self.data[index] + ba = self.ba + return ba._data[ba._offset + index] def setitem(self, index, char): - self.data[index] = char + ba = self.ba + ba._data[ba._offset + index] = char def getslice(self, start, stop, step, size): if size == 0: return "" if step == 1: assert 0 <= start <= stop - if start == 0 and stop == len(self.data): - return "".join(self.data) - return "".join(self.data[start:stop]) + ba = self.ba + start += ba._offset + stop += ba._offset + data = ba._data + if start != 0 or stop != len(data): + data = data[start:stop] + return "".join(data) return Buffer.getslice(self, start, stop, step, size) def setslice(self, start, string): # No bounds checks. + ba = self.ba + start += ba._offset for i in range(len(string)): - self.data[start + i] = string[i] + ba._data[start + i] = string[i] def get_raw_address(self): - return nonmoving_raw_ptr_for_resizable_list(self.data) + ba = self.ba + p = nonmoving_raw_ptr_for_resizable_list(ba._data) + p = rffi.ptradd(p, ba._offset) + return p @specialize.argtype(1) @@ -1257,3 +1320,9 @@ if selfvalue[i] > buffer[i]: return 1 return 0 + +def _tweak_for_tests(w_bytearray): + "Patched in test_bytearray.py" + +def _shrink_after_delete_from_start(w_bytearray): + w_bytearray.getdata() diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -25,7 +25,8 @@ value = self._val(space) lenself = len(value) start, end = unwrap_start_stop(space, lenself, w_start, w_end) - return (value, start, end) + # the None means "no offset"; see bytearrayobject.py + return (value, start, end, None) @staticmethod def descr_maketrans(space, w_type, w_from, w_to): @@ -79,12 +80,12 @@ return W_StringIterObject(self, self._iter_getitem_result) def descr_contains(self, space, w_sub): - value = self._val(space) + value, start, end, _ = self._convert_idx_params(space, None, None) other = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): - res = value.find(other) + res = value.find(other, start, end) else: - res = find(value, other, 0, len(value)) + res = find(value, other, start, end) return space.newbool(res >= 0) def descr_add(self, space, w_other): @@ -179,7 +180,7 @@ return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): - value, start, end = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -247,27 +248,31 @@ return distance def descr_find(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): res = value.find(sub, start, end) else: res = find(value, sub, start, end) + if ofs is not None and res >= 0: + res -= ofs return space.wrap(res) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): res = value.rfind(sub, start, end) else: res = rfind(value, sub, start, end) + if ofs is not None and res >= 0: + res -= ofs return space.wrap(res) def descr_index(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -278,10 +283,12 @@ if res < 0: raise oefmt(space.w_ValueError, "substring not found in " + self._KIND2 + ".index") + if ofs is not None: + res -= ofs return space.wrap(res) def descr_rindex(self, space, w_sub, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) sub = self._op_val(space, w_sub, allow_char=True) if self._use_rstr_ops(space, w_sub): @@ -292,6 +299,8 @@ if res < 0: raise oefmt(space.w_ValueError, "substring not found in " + self._KIND2 + ".rindex") + if ofs is not None: + res -= ofs return space.wrap(res) @specialize.arg(2) @@ -595,7 +604,7 @@ return "bytes" def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_prefix, space.w_tuple): return self._startswith_tuple(space, value, w_prefix, start, end) try: @@ -620,7 +629,7 @@ return startswith(value, prefix, start, end) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, w_end) + value, start, end, _ = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): return self._endswith_tuple(space, value, w_suffix, start, end) try: diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -1,10 +1,27 @@ # coding: utf-8 +import random from pypy import conftest +from pypy.objspace.std import bytearrayobject + +class DontAccess(object): + pass +dont_access = DontAccess() + class AppTestBytesArray: def setup_class(cls): cls.w_runappdirect = cls.space.wrap(conftest.option.runappdirect) + def tweak(w_bytearray): + n = random.randint(-3, 16) + if n > 0: + w_bytearray._data = [dont_access] * n + w_bytearray._data + w_bytearray._offset += n + cls._old_tweak = [bytearrayobject._tweak_for_tests] + bytearrayobject._tweak_for_tests = tweak + + def teardown_class(cls): + [bytearrayobject._tweak_for_tests] = cls._old_tweak def test_basics(self): b = bytearray() @@ -68,6 +85,7 @@ raises(IndexError, b.__getitem__, 4) assert b[1:5] == bytearray(b'est') assert b[slice(1,5)] == bytearray(b'est') + assert b[1:5:2] == bytearray(b'et') def test_arithmetic(self): b1 = bytearray(b'hello ') @@ -211,6 +229,10 @@ assert bytearray(b'ab').endswith(bytearray(b''), 2) is True assert bytearray(b'ab').endswith(bytearray(b''), 3) is False + def test_startswith_self(self): + b = bytearray(b'abcd') + assert b.startswith(b) + def test_stringlike_conversions(self): # methods that should return bytearray (and not str) def check(result, expected): @@ -345,6 +367,20 @@ b.reverse() assert b == bytearray(b'olleh') + def test_delitem_from_front(self): + b = bytearray(b'abcdefghij') + del b[0] + del b[0] + assert len(b) == 8 + assert b == bytearray(b'cdefghij') + del b[-8] + del b[-7] + assert len(b) == 6 + assert b == bytearray(b'efghij') + del b[:3] + assert len(b) == 3 + assert b == bytearray(b'hij') + def test_delitem(self): b = bytearray(b'abc') del b[1] @@ -427,6 +463,18 @@ raises(TypeError, b.extend, [object()]) raises(TypeError, b.extend, "unicode") + def test_setitem_from_front(self): + b = bytearray(b'abcdefghij') + b[:2] = b'' + assert len(b) == 8 + assert b == bytearray(b'cdefghij') + b[:3] = b'X' + assert len(b) == 6 + assert b == bytearray(b'Xfghij') + b[:2] = b'ABC' + assert len(b) == 7 + assert b == bytearray(b'ABCghij') + def test_setslice(self): b = bytearray(b'hello') b[:] = [ord(c) for c in 'world'] @@ -584,3 +632,78 @@ def test_constructor_typeerror(self): raises(TypeError, bytearray, b'', 'ascii') raises(TypeError, bytearray, '') + + def test_dont_force_offset(self): + def make(x=b'abcdefghij', shift=3): + b = bytearray(b'?'*shift + x) + b + b'' # force 'b' + del b[:shift] # add shift to b._offset + return b + assert make(shift=0).__alloc__() == 11 + # + x = make(shift=3) + assert x.__alloc__() == 14 + assert memoryview(x)[1] == ord('b') + assert x.__alloc__() == 14 + assert len(x) == 10 + assert x.__alloc__() == 14 + assert x[3] == ord('d') + assert x[-3] == ord('h') + assert x.__alloc__() == 14 + assert x[3:-3] == b'defg' + assert x[-3:3:-1] == b'hgfe' + assert x.__alloc__() == 14 + assert repr(x) == "bytearray(b'abcdefghij')" + assert x.__alloc__() == 14 + # + x = make(shift=3) + x[3] = ord('D') + assert x.__alloc__() == 14 + x[4:6] = b'EF' + assert x.__alloc__() == 14 + x[6:8] = b'G' + assert x.__alloc__() == 13 + x[-2:4:-2] = b'*/' + assert x.__alloc__() == 13 + assert x == bytearray(b'abcDE/G*j') + # + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + assert len(x) == 26 + assert x.__alloc__() == 38 + del x[:1] + assert len(x) == 25 + assert x.__alloc__() == 38 + del x[0:5] + assert len(x) == 20 + assert x.__alloc__() == 38 + del x[0] + assert len(x) == 19 + assert x.__alloc__() == 38 + del x[0] # too much emptiness, forces now + assert len(x) == 18 + assert x.__alloc__() == 19 + # + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + del x[:9] # too much emptiness, forces now + assert len(x) == 17 + assert x.__alloc__() == 18 + # + x = make(b'abcdefghijklmnopqrstuvwxyz', shift=11) + assert x.__alloc__() == 38 + del x[1] + assert x.__alloc__() == 37 # not forced, but the list shrank + del x[3:10:2] + assert x.__alloc__() == 33 + assert x == bytearray(b'acdfhjlmnopqrstuvwxyz') + # + x = make(shift=3) + assert b'f' in x + assert b'ef' in x + assert b'efx' not in x + assert b'very long string longer than the original' not in x + assert x.__alloc__() == 14 + assert x.find(b'f') == 5 + assert x.rfind(b'f', 2, 11) == 5 + assert x.find(b'fe') == -1 + assert x.index(b'f', 2, 11) == 5 + assert x.__alloc__() == 14 From pypy.commits at gmail.com Sat Dec 3 09:51:30 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 06:51:30 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: extra test Message-ID: <5842dbf2.542e1c0a.730e4.d3e2@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88849:5827553866d2 Date: 2016-12-03 15:46 +0100 http://bitbucket.org/pypy/pypy/changeset/5827553866d2/ Log: extra test diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -695,3 +695,15 @@ del x[3:10:2] assert x.__alloc__() == 33 assert x == bytearray(b'acdfhjlmnopqrstuvwxyz') + # + x = make(shift=3) + assert b'f' in x + assert b'ef' in x + assert b'efx' not in x + assert b'very long string longer than the original' not in x + assert x.__alloc__() == 14 + assert x.find(b'f') == 5 + assert x.rfind(b'f', 2, 11) == 5 + assert x.find(b'fe') == -1 + assert x.index(b'f', 2, 11) == 5 + assert x.__alloc__() == 14 From pypy.commits at gmail.com Sat Dec 3 09:51:32 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 06:51:32 -0800 (PST) Subject: [pypy-commit] pypy py3.5-bytearray: close branch, ready to merge Message-ID: <5842dbf4.c89cc20a.df59e.bd03@mx.google.com> Author: Armin Rigo Branch: py3.5-bytearray Changeset: r88850:7fe4287f177e Date: 2016-12-03 15:46 +0100 http://bitbucket.org/pypy/pypy/changeset/7fe4287f177e/ Log: close branch, ready to merge From pypy.commits at gmail.com Sat Dec 3 10:13:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 07:13:34 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: done, branch py3.5-bytearray Message-ID: <5842e11e.0209c20a.75a7.c5fb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5757:996ca1cb997a Date: 2016-12-03 16:13 +0100 http://bitbucket.org/pypy/extradoc/changeset/996ca1cb997a/ Log: done, branch py3.5-bytearray diff --git a/planning/py3.5/milestone-1-progress.rst b/planning/py3.5/milestone-1-progress.rst --- a/planning/py3.5/milestone-1-progress.rst +++ b/planning/py3.5/milestone-1-progress.rst @@ -25,7 +25,7 @@ * Windows: issue 2310: kill WindowsError -* bytearray: 'del x[:10]' is now amortized constant-time +* bytearray: 'del x[:10]' is now amortized constant-time (DONE) * check that 'import array', say, finds and loads a file array.py, whereas 'import gc' does not ('gc' is a built-in module in CPython but From pypy.commits at gmail.com Sat Dec 3 11:27:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 08:27:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: Start a branch for PEP 475: Retry system calls failing with EINTR Message-ID: <5842f268.c9b3c20a.3c7fe.e0cc@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88852:7fd14fd04bce Date: 2016-12-03 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/7fd14fd04bce/ Log: Start a branch for PEP 475: Retry system calls failing with EINTR From pypy.commits at gmail.com Sat Dec 3 11:27:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 08:27:21 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: pep475-style test and fix for os.read() Message-ID: <5842f269.88711c0a.65c47.f511@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88853:1ddec1199da6 Date: 2016-12-03 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/1ddec1199da6/ Log: pep475-style test and fix for os.read() diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -561,7 +561,19 @@ @specialize.arg(3) def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError', - w_exception_class=None, w_filename2=None): + w_exception_class=None, w_filename2=None, eintr_retry=False): + """A double API here: + + * if eintr_retry is False, always return the OperationError to + be raised by the caller, which might be about EINTR + (checksignals() is still called here). + + * if eintr_retry is True (PEP 475 compliant API for retrying + system calls failing with EINTR), then this function raises + the OperationError directly, or for EINTR it calls + checksignals() and returns None in case the original + operation should be retried. + """ assert isinstance(e, OSError) if _WINDOWS and isinstance(e, WindowsError): @@ -571,6 +583,8 @@ if errno == EINTR: space.getexecutioncontext().checksignals() + if eintr_retry: + return None try: msg = strerror(errno) @@ -591,11 +605,14 @@ else: w_error = space.call_function(exc, space.wrap(errno), space.wrap(msg)) - return OperationError(exc, w_error) + operr = OperationError(exc, w_error) + if eintr_retry: + raise operr + return operr @specialize.arg(3) def wrap_oserror(space, e, filename=None, exception_name='w_OSError', - w_exception_class=None, filename2=None): + w_exception_class=None, filename2=None, eintr_retry=False): w_filename = None w_filename2 = None if filename is not None: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -263,12 +263,13 @@ @unwrap_spec(fd=c_int, length=int) def read(space, fd, length): """Read data from a file descriptor.""" - try: - s = os.read(fd, length) - except OSError as e: - raise wrap_oserror(space, e) - else: - return space.newbytes(s) + while True: + try: + s = os.read(fd, length) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return space.newbytes(s) @unwrap_spec(fd=c_int) def write(space, fd, w_data): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -7,6 +7,7 @@ from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace +from pypy.interpreter.gateway import interp2app from rpython.translator.c.test.test_extfunc import need_sparse_files from rpython.rlib import rposix @@ -1365,3 +1366,40 @@ if os.name == 'posix': assert os.open in os.supports_dir_fd # openat() + +class AppTestPep475Retry: + spaceconfig = {'usemodules': USEMODULES} + + def setup_class(cls): + if os.name != 'posix': + skip("xxx tests are posix-only") + if cls.runappdirect: + skip("xxx does not work with -A") + + def fd_data_after_delay(space): + g = os.popen("sleep 5 && echo hello", "r") + cls._keepalive_g = g + return space.wrap(g.fileno()) + + cls.w_posix = space.appexec([], GET_POSIX) + cls.w_fd_data_after_delay = cls.space.wrap( + interp2app(fd_data_after_delay)) + + def test_pep475_retry_read(self): + import _signal as signal + signalled = [] + + def foo(*args): + signalled.append("ALARM") + + signal.signal(signal.SIGALRM, foo) + try: + fd = self.fd_data_after_delay() + signal.alarm(1) + got = self.posix.read(fd, 100) + self.posix.close(fd) + finally: + signal.signal(signal.SIGALRM, signal.SIG_DFL) + + assert signalled != [] + assert got.startswith(b'h') From pypy.commits at gmail.com Sat Dec 3 11:53:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 08:53:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: More os functions Message-ID: <5842f87a.c64bc20a.dbbb2.f258@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88855:85ec8b34fd75 Date: 2016-12-03 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/85ec8b34fd75/ Log: More os functions diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -333,19 +333,23 @@ def fsync(space, w_fd): """Force write of file with filedescriptor to disk.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fsync(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fsync(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def fdatasync(space, w_fd): """Force write of file with filedescriptor to disk. Does not force update of metadata.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fdatasync(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fdatasync(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def sync(space): """Force write of everything to disk.""" @@ -355,10 +359,12 @@ """Change to the directory of the given file descriptor. fildes must be opened on a directory, not a file.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fchdir(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fchdir(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) # ____________________________________________________________ @@ -423,12 +429,13 @@ def fstat(space, fd): """Perform a stat system call on the file referenced to by an open file descriptor.""" - try: - st = rposix_stat.fstat(fd) - except OSError as e: - raise wrap_oserror(space, e) - else: - return build_stat_result(space, st) + while True: + try: + st = rposix_stat.fstat(fd) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return build_stat_result(space, st) @unwrap_spec( path=path_or_fd(allow_fd=True), @@ -511,12 +518,13 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): - try: - st = rposix_stat.fstatvfs(fd) - except OSError as e: - raise wrap_oserror(space, e) - else: - return build_statvfs_result(space, st) + while True: + try: + st = rposix_stat.fstatvfs(fd) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return build_statvfs_result(space, st) def statvfs(space, w_path): @@ -991,10 +999,12 @@ rposix.chmod(path, mode) def _chmod_fd(space, fd, mode): - try: - os.fchmod(fd, mode) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fchmod(fd, mode) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) @unwrap_spec(fd=c_int, mode=c_int) @@ -2009,10 +2019,12 @@ Change the owner and group id of the file given by file descriptor fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fchown(fd, uid, gid) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fchown(fd, uid, gid) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def getloadavg(space): try: From pypy.commits at gmail.com Sat Dec 3 11:53:12 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 08:53:12 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: open(), write()---I think I'm already no longer trying to make up test Message-ID: <5842f878.90a81c0a.c000.0538@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88854:c845268b64bf Date: 2016-12-03 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/c845268b64bf/ Log: open(), write()---I think I'm already no longer trying to make up test cases, which is a mess diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -226,15 +226,21 @@ If it is unavailable, using it will raise a NotImplementedError.""" if rposix.O_CLOEXEC is not None: flags |= rposix.O_CLOEXEC + while True: + try: + if rposix.HAVE_OPENAT and dir_fd != DEFAULT_DIR_FD: + path = space.fsencode_w(w_path) + fd = rposix.openat(path, flags, mode, dir_fd) + else: + fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) try: - if rposix.HAVE_OPENAT and dir_fd != DEFAULT_DIR_FD: - path = space.fsencode_w(w_path) - fd = rposix.openat(path, flags, mode, dir_fd) - else: - fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) _open_inhcache.set_non_inheritable(fd) except OSError as e: - raise wrap_oserror2(space, e, w_path) + rposix.c_close(fd) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.wrap(fd) @unwrap_spec(fd=c_int, position=r_longlong, how=c_int) @@ -276,12 +282,13 @@ """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" data = space.getarg_w('y*', w_data) - try: - res = os.write(fd, data.as_str()) - except OSError as e: - raise wrap_oserror(space, e) - else: - return space.wrap(res) + while True: + try: + res = os.write(fd, data.as_str()) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return space.wrap(res) @unwrap_spec(fd=c_int) def close(space, fd): @@ -911,9 +918,14 @@ "Create a pipe. Returns (read_end, write_end)." try: fd1, fd2 = rposix.pipe(rposix.O_CLOEXEC or 0) + except OSError as e: + raise wrap_oserror(space, e) + try: _pipe_inhcache.set_non_inheritable(fd1) _pipe_inhcache.set_non_inheritable(fd2) except OSError as e: + rposix.c_close(fd2) + rposix.c_close(fd1) raise wrap_oserror(space, e) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) From pypy.commits at gmail.com Sat Dec 3 11:53:15 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 08:53:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix for some app-level untranslated tests, e.g. skipped ones Message-ID: <5842f87b.c11d1c0a.a900f.fd79@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88856:eb6adc5edd60 Date: 2016-12-03 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/eb6adc5edd60/ Log: fix for some app-level untranslated tests, e.g. skipped ones diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -256,13 +256,22 @@ # MemoryError is raised at just the wrong place executioncontext = self.space.getexecutioncontext() exc_on_enter = executioncontext.sys_exc_info() - try: - return self.execute_frame() - finally: - if we_are_translated(): + if we_are_translated(): + try: + return self.execute_frame() + finally: executioncontext.set_sys_exc_info(exc_on_enter) - else: + else: + # untranslated, we check consistency, but not in case of + # interp-level exceptions different than OperationError + # (e.g. a random failing test, or a pytest Skipped exc.) + try: + w_res = self.execute_frame() assert exc_on_enter is executioncontext.sys_exc_info() + except OperationError: + assert exc_on_enter is executioncontext.sys_exc_info() + raise + return w_res run._always_inline_ = True def initialize_as_generator(self, name, qualname): From pypy.commits at gmail.com Sat Dec 3 12:14:12 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:14:12 -0800 (PST) Subject: [pypy-commit] pypy default: os.wait3() and os.wait4() need to raise OSError somewhere Message-ID: <5842fd64.8f95c20a.505c6.f780@mx.google.com> Author: Armin Rigo Branch: Changeset: r88857:034b40a26b6b Date: 2016-12-03 18:13 +0100 http://bitbucket.org/pypy/pypy/changeset/034b40a26b6b/ Log: os.wait3() and os.wait4() need to raise OSError somewhere diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,4 @@ +import os from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,6 +8,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait3(status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -16,6 +20,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait4(pid, status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -34,3 +34,7 @@ assert os.WEXITSTATUS(status) == exit_status assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) + +def test_errors(): + py.test.raises(OSError, _pypy_wait.wait3, -999) + py.test.raises(OSError, _pypy_wait.wait4, -999, -999) From pypy.commits at gmail.com Sat Dec 3 12:20:18 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:20:18 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: some more funcs Message-ID: <5842fed2.aaa3c20a.711ce.fb13@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88858:a1b55ed2014f Date: 2016-12-03 18:14 +0100 http://bitbucket.org/pypy/pypy/changeset/a1b55ed2014f/ Log: some more funcs diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -306,10 +306,12 @@ @unwrap_spec(fd=c_int, length=r_longlong) def ftruncate(space, fd, length): """Truncate a file (by file descriptor) to a specified length.""" - try: - os.ftruncate(fd, length) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.ftruncate(fd, length) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def truncate(space, w_path, w_length): """Truncate a file to a specified length.""" @@ -1072,14 +1074,16 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - try: - if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: - path = space.fsencode_w(w_path) - rposix.mkfifoat(path, mode, dir_fd) - else: - dispatch_filename(rposix.mkfifo)(space, w_path, mode) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + while True: + try: + if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: + path = space.fsencode_w(w_path) + rposix.mkfifoat(path, mode, dir_fd) + else: + dispatch_filename(rposix.mkfifo)(space, w_path, mode) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) @unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD(rposix.HAVE_MKNODAT)) def mknod(space, w_path, mode=0600, device=0, @@ -1097,14 +1101,16 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - try: - if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: - fname = space.fsencode_w(w_path) - rposix.mknodat(fname, mode, device, dir_fd) - else: - dispatch_filename(rposix.mknod)(space, w_path, mode, device) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + while True: + try: + if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: + fname = space.fsencode_w(w_path) + rposix.mknodat(fname, mode, device, dir_fd) + else: + dispatch_filename(rposix.mknod)(space, w_path, mode, device) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) @unwrap_spec(mask=c_int) def umask(space, mask): From pypy.commits at gmail.com Sat Dec 3 12:20:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:20:22 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: adapt os.wait3(), os.wait4() Message-ID: <5842fed6.d5091c0a.a6dc6.041d@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88860:0dc7003fdff5 Date: 2016-12-03 18:19 +0100 http://bitbucket.org/pypy/pypy/changeset/0dc7003fdff5/ Log: adapt os.wait3(), os.wait4() diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,4 +1,5 @@ import os +from errno import EINTR from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,10 +8,13 @@ def wait3(options): status = ffi.new("int *") ru = ffi.new("struct rusage *") - pid = lib.wait3(status, options, ru) - if pid == -1: + while True: + pid = lib.wait3(status, options, ru) + if pid != -1: + break errno = ffi.errno - raise OSError(errno, os.strerror(errno)) + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -19,10 +23,13 @@ def wait4(pid, options): status = ffi.new("int *") ru = ffi.new("struct rusage *") - pid = lib.wait4(pid, status, options, ru) - if pid == -1: + while True: + pid = lib.wait4(pid, status, options, ru) + if pid != -1: + break errno = ffi.errno - raise OSError(errno, os.strerror(errno)) + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) From pypy.commits at gmail.com Sat Dec 3 12:20:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:20:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: hg merge default Message-ID: <5842fed4.6737c20a.352bb.f722@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88859:8474d821a0d7 Date: 2016-12-03 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/8474d821a0d7/ Log: hg merge default diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,4 @@ +import os from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,6 +8,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait3(status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -16,6 +20,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait4(pid, status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -34,3 +34,7 @@ assert os.WEXITSTATUS(status) == exit_status assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) + +def test_errors(): + py.test.raises(OSError, _pypy_wait.wait3, -999) + py.test.raises(OSError, _pypy_wait.wait4, -999, -999) From pypy.commits at gmail.com Sat Dec 3 12:23:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:23:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: last os function Message-ID: <5842ff88.96a61c0a.ed422.1004@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88861:5772ffc179ab Date: 2016-12-03 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/5772ffc179ab/ Log: last os function diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1306,12 +1306,16 @@ Wait for completion of a given child process. """ - try: - pid, status = os.waitpid(pid, options) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + pid, status = os.waitpid(pid, options) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) return space.newtuple([space.wrap(pid), space.wrap(status)]) +# missing: waitid() + @unwrap_spec(status=c_int) def _exit(space, status): os._exit(status) From pypy.commits at gmail.com Sat Dec 3 12:28:53 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 03 Dec 2016 09:28:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: actually nothing to do on os.close() Message-ID: <584300d5.876ec20a.35ac6.f63c@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88862:75d80872f12a Date: 2016-12-03 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/75d80872f12a/ Log: actually nothing to do on os.close() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -293,6 +293,10 @@ @unwrap_spec(fd=c_int) def close(space, fd): """Close a file descriptor (for low level IO).""" + # PEP 475 note: os.close() must not retry upon EINTR. Like in + # previous versions of Python it raises OSError in this case. + # The text of PEP 475 seems to suggest that EINTR is eaten and + # hidden from app-level, but it is not the case in CPython 3.5.2. try: os.close(fd) except OSError as e: From pypy.commits at gmail.com Sat Dec 3 15:26:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 12:26:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Use SubBuffer to ensure correct behaviour wrt. cffi, and simplify RawBuffer Message-ID: <58432a67.54b31c0a.70faf.4892@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88863:620222c0c183 Date: 2016-12-03 20:25 +0000 http://bitbucket.org/pypy/pypy/changeset/620222c0c183/ Log: Use SubBuffer to ensure correct behaviour wrt. cffi, and simplify RawBuffer diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,7 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from rpython.rlib.buffer import Buffer +from rpython.rlib.buffer import Buffer, SubBuffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib import rposix @@ -149,20 +149,18 @@ class RawBuffer(Buffer): _immutable_ = True - def __init__(self, buf, start, length): - self.buf = buf - self.start = start - self.length = length + def __init__(self, data): + self.buf = data self.readonly = False def getlength(self): - return self.length + return len(self.buf) def getitem(self, index): return self.buf[index] def setitem(self, index, char): - self.buf[self.start + index] = char + self.buf[index] = char def get_raw_address(self): return nonmoving_raw_ptr_for_resizable_list(self.buf) @@ -577,7 +575,8 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) - w_buf = space.newbuffer(RawBuffer(buffer, start, length)) + start = intmask(start) + w_buf = space.newbuffer(SubBuffer(RawBuffer(buffer), start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) From pypy.commits at gmail.com Sat Dec 3 20:33:40 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 17:33:40 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Simplify RawBuffer again and use it more Message-ID: <58437274.43e61c0a.04f2.988c@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88864:5e7a2ffdceb2 Date: 2016-12-04 01:33 +0000 http://bitbucket.org/pypy/pypy/changeset/5e7a2ffdceb2/ Log: Simplify RawBuffer again and use it more diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -149,12 +149,13 @@ class RawBuffer(Buffer): _immutable_ = True - def __init__(self, data): - self.buf = data + def __init__(self, n): + self.length = n + self.buf = ['\0'] * n self.readonly = False def getlength(self): - return len(self.buf) + return self.length def getitem(self, index): return self.buf[index] @@ -203,7 +204,7 @@ raise oefmt(space.w_ValueError, "buffer size must be strictly positive") - self.buffer = ['\0'] * self.buffer_size + self.buffer = RawBuffer(self.buffer_size) self.lock = TryLock(space) @@ -424,11 +425,7 @@ return written def _raw_write(self, space, start, end): - # XXX inefficient - l = [] - for i in range(start, end): - l.append(self.buffer[i]) - return self._write(space, ''.join(l)) + return self._write(space, self.buffer[start:end]) def detach_w(self, space): self._check_init(space) @@ -489,7 +486,7 @@ # buffer. have = self._readahead() if have > 0: - data = ''.join(self.buffer[self.pos:self.pos+have]) + data = self.buffer[self.pos:self.pos+have] return space.newbytes(data) # Fill the buffer from the raw stream, and copy it to the result @@ -499,7 +496,7 @@ except BlockingIOError: size = 0 self.pos = 0 - data = ''.join(self.buffer[:size]) + data = self.buffer[0:size] return space.newbytes(data) @unwrap_spec(size=int) @@ -536,7 +533,7 @@ if size > have: size = have endpos = self.pos + size - data = ''.join(self.buffer[self.pos:endpos]) + data = self.buffer[self.pos:endpos] self.pos = endpos return space.newbytes(data) @@ -548,7 +545,7 @@ current_size = self._readahead() data = None if current_size: - data = ''.join(self.buffer[self.pos:self.pos + current_size]) + data = self.buffer[self.pos:self.pos + current_size] builder.append(data) self.pos += current_size # We're going past the buffer's bounds, flush it @@ -576,7 +573,7 @@ def _raw_read(self, space, buffer, start, length): length = intmask(length) start = intmask(start) - w_buf = space.newbuffer(SubBuffer(RawBuffer(buffer), start, length)) + w_buf = space.newbuffer(SubBuffer(buffer, start, length)) while True: try: w_size = space.call_method(self.w_raw, "readinto", w_buf) @@ -616,12 +613,12 @@ if n <= current_size: return self._read_fast(n) - result_buffer = ['\0'] * n + result_buffer = RawBuffer(n) remaining = n written = 0 if current_size: - for i in range(current_size): - result_buffer[written + i] = self.buffer[self.pos + i] + result_buffer.setslice( + written, self.buffer[self.pos:self.pos + current_size]) remaining -= current_size written += current_size self.pos += current_size @@ -643,7 +640,7 @@ return None size = 0 if size == 0: - return ''.join(result_buffer[:written]) + return result_buffer[0:written] remaining -= size written += size @@ -665,14 +662,13 @@ if remaining > 0: if size > remaining: size = remaining - for i in range(size): - result_buffer[written + i] = self.buffer[self.pos + i] + result_buffer.setslice( + written, self.buffer[self.pos:self.pos + size]) self.pos += size - written += size remaining -= size - return ''.join(result_buffer[:written]) + return result_buffer[0:written] def _read_fast(self, n): """Read n bytes from the buffer if it can, otherwise return None. @@ -680,7 +676,7 @@ current_size = self._readahead() if n <= current_size: endpos = self.pos + n - res = ''.join(self.buffer[self.pos:endpos]) + res = self.buffer[self.pos:endpos] self.pos = endpos return res return None @@ -703,11 +699,11 @@ else: pos = -1 if pos >= 0: - w_res = space.newbytes(''.join(self.buffer[self.pos:pos+1])) + w_res = space.newbytes(self.buffer[self.pos:pos+1]) self.pos = pos + 1 return w_res if have == limit: - w_res = space.newbytes(''.join(self.buffer[self.pos:self.pos+have])) + w_res = space.newbytes(self.buffer[self.pos:self.pos+have]) self.pos += have return w_res @@ -716,7 +712,7 @@ # Now we try to get some more from the raw stream chunks = [] if have > 0: - chunks.extend(self.buffer[self.pos:self.pos+have]) + chunks.append(self.buffer[self.pos:self.pos+have]) written += have self.pos += have if limit >= 0: @@ -734,13 +730,14 @@ pos = 0 found = False while pos < have: - c = self.buffer[pos] + # 'buffer.buf[]' instead of 'buffer[]' because RPython... + c = self.buffer.buf[pos] pos += 1 if c == '\n': self.pos = pos found = True break - chunks.extend(self.buffer[0:pos]) + chunks.append(self.buffer[0:pos]) if found: break if have == limit: @@ -767,7 +764,6 @@ size = len(data) with self.lock: - if (not (self.readable and self.read_end != -1) and not (self.writable and self.write_end != -1)): self.pos = 0 @@ -794,7 +790,8 @@ self._reader_reset_buf() # Make some place by shifting the buffer for i in range(self.write_pos, self.write_end): - self.buffer[i - self.write_pos] = self.buffer[i] + # XXX: messing with buffer internals + self.buffer.buf[i - self.write_pos] = self.buffer.buf[i] self.write_end -= self.write_pos self.raw_pos -= self.write_pos newpos = self.pos - self.write_pos From pypy.commits at gmail.com Sat Dec 3 22:49:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 19:49:02 -0800 (PST) Subject: [pypy-commit] pypy py3.5: backout d267f0decb98: SSL works now Message-ID: <5843922e.c9b3c20a.3c7fe.9b71@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88865:b19db5975aaa Date: 2016-12-04 03:23 +0000 http://bitbucket.org/pypy/pypy/changeset/b19db5975aaa/ Log: backout d267f0decb98: SSL works now diff --git a/lib-python/3/test/test_logging.py b/lib-python/3/test/test_logging.py --- a/lib-python/3/test/test_logging.py +++ b/lib-python/3/test/test_logging.py @@ -1665,7 +1665,7 @@ logger = logging.getLogger("http") root_logger = self.root_logger root_logger.removeHandler(self.root_logger.handlers[0]) - for secure in (False,): # XXX: disable SSL tests until it works + for secure in (False, True): addr = ('localhost', 0) if secure: try: From pypy.commits at gmail.com Sat Dec 3 22:49:04 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 19:49:04 -0800 (PST) Subject: [pypy-commit] pypy py3.5: unskip passing tests Message-ID: <58439230.e6b0c20a.2bab1.982b@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88866:0b14b7b2521b Date: 2016-12-04 03:31 +0000 http://bitbucket.org/pypy/pypy/changeset/0b14b7b2521b/ Log: unskip passing tests diff --git a/lib-python/3/test/test_urllib2_localnet.py b/lib-python/3/test/test_urllib2_localnet.py --- a/lib-python/3/test/test_urllib2_localnet.py +++ b/lib-python/3/test/test_urllib2_localnet.py @@ -544,7 +544,6 @@ self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"]) def test_https(self): - self.skipTest('Segfaults on PyPy') handler = self.start_https_server() context = ssl.create_default_context(cafile=CERT_localhost) data = self.urlopen("https://localhost:%s/bizarre" % handler.port, context=context) @@ -574,7 +573,6 @@ cadefault=True) def test_https_sni(self): - self.skipTest('Segfaults on PyPy') if ssl is None: self.skipTest("ssl module required") if not ssl.HAS_SNI: From pypy.commits at gmail.com Sat Dec 3 22:49:06 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 19:49:06 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Unskip tests that don't hang any more Message-ID: <58439232.272cc20a.faf1f.9ad3@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88867:421ca854cd86 Date: 2016-12-04 03:48 +0000 http://bitbucket.org/pypy/pypy/changeset/421ca854cd86/ Log: Unskip tests that don't hang any more diff --git a/lib-python/3/test/test_asyncio/test_events.py b/lib-python/3/test/test_asyncio/test_events.py --- a/lib-python/3/test/test_asyncio/test_events.py +++ b/lib-python/3/test/test_asyncio/test_events.py @@ -909,7 +909,6 @@ sslcontext = self._create_ssl_context(certfile, keyfile) return self._make_unix_server(factory, ssl=sslcontext) - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl(self): proto = MyProto(loop=self.loop) @@ -946,7 +945,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl(self): @@ -982,7 +980,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_verify_failed(self): proto = MyProto(loop=self.loop) @@ -1016,7 +1013,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_verify_failed() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl_verify_failed(self): @@ -1052,7 +1048,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl_verify_failed() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_match_failed(self): proto = MyProto(loop=self.loop) @@ -1085,7 +1080,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_match_failed() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl_verified(self): @@ -1116,7 +1110,6 @@ with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl_verified() - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_verified(self): proto = MyProto(loop=self.loop) @@ -1146,7 +1139,6 @@ server.close() self.loop.run_until_complete(proto.done) - @unittest.skipIf('__pypy__' in sys.modules, 'XXX: broken ssl') def test_legacy_create_server_ssl_verified(self): with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_verified() From pypy.commits at gmail.com Sun Dec 4 01:43:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 22:43:46 -0800 (PST) Subject: [pypy-commit] pypy vendor/stdlib-3.5: Import stdlib 3.5.2 (CPython commit 4def2a2901a5) Message-ID: <5843bb22.a351c20a.59eaa.ba3d@mx.google.com> Author: Ronan Lamy Branch: vendor/stdlib-3.5 Changeset: r88868:c3b14617fbd7 Date: 2016-12-04 05:10 +0000 http://bitbucket.org/pypy/pypy/changeset/c3b14617fbd7/ Log: Import stdlib 3.5.2 (CPython commit 4def2a2901a5) diff too long, truncating to 2000 out of 28601 lines diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -156,7 +156,7 @@ __slots__ = () @abstractmethod - async def __aiter__(self): + def __aiter__(self): return AsyncIterator() @classmethod @@ -176,7 +176,7 @@ """Return the next item or raise StopAsyncIteration when exhausted.""" raise StopAsyncIteration - async def __aiter__(self): + def __aiter__(self): return self @classmethod diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -177,6 +177,13 @@ 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', }) REVERSE_IMPORT_MAPPING.update({ diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -151,13 +151,13 @@ # can only be found inside Xcode.app if the "Command Line Tools" # are not installed. # - # Futhermore, the compiler that can be used varies between + # Furthermore, the compiler that can be used varies between # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -193,7 +193,7 @@ if cc != oldcc: # Found a replacement compiler. # Modify config vars using new compiler, if not already explicitly - # overriden by an env variable, preserving additional arguments. + # overridden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() @@ -207,7 +207,7 @@ """Remove all universal build arguments from config vars""" for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) @@ -228,7 +228,7 @@ # build extensions on OSX 10.7 and later with the prebuilt # 32-bit installer on the python.org website. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -244,7 +244,7 @@ # across Xcode and compiler versions, there is no reliable way # to be sure why it failed. Assume here it was due to lack of # PPC support and remove the related '-arch' flags from each - # config variables not explicitly overriden by an environment + # config variables not explicitly overridden by an environment # variable. If the error was for some other reason, we hope the # failure will show up again when trying to compile an extension # module. @@ -292,7 +292,7 @@ sdk = m.group(1) if not os.path.exists(sdk): for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py --- a/lib-python/3/_pydecimal.py +++ b/lib-python/3/_pydecimal.py @@ -252,7 +252,7 @@ class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. - This occurs and signals invalid-operation if an string is being + This occurs and signals invalid-operation if a string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ @@ -1102,7 +1102,7 @@ def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. - Rounds the number (if more then precision digits) + Rounds the number (if more than precision digits) """ if self._is_special: ans = self._check_nans(context=context) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -296,8 +296,9 @@ called. The basic type used for binary data read from or written to a file is - bytes. bytearrays are accepted too, and in some cases (such as - readinto) needed. Text I/O classes work with str data. + bytes. Other bytes-like objects are accepted as method arguments too. In + some cases (such as readinto), a writable object is required. Text I/O + classes work with str data. Note that calling any method (even inquiries) on a closed stream is undefined. Implementations may raise OSError in this case. @@ -390,7 +391,7 @@ def seekable(self): """Return a bool indicating whether object supports random access. - If False, seek(), tell() and truncate() will raise UnsupportedOperation. + If False, seek(), tell() and truncate() will raise OSError. This method may need to do a test seek(). """ return False @@ -405,7 +406,7 @@ def readable(self): """Return a bool indicating whether object was opened for reading. - If False, read() will raise UnsupportedOperation. + If False, read() will raise OSError. """ return False @@ -419,7 +420,7 @@ def writable(self): """Return a bool indicating whether object was opened for writing. - If False, write() and truncate() will raise UnsupportedOperation. + If False, write() and truncate() will raise OSError. """ return False @@ -439,7 +440,7 @@ return self.__closed def _checkClosed(self, msg=None): - """Internal: raise an ValueError if file is closed + """Internal: raise a ValueError if file is closed """ if self.closed: raise ValueError("I/O operation on closed file." @@ -596,7 +597,7 @@ return data def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Returns an int representing the number of bytes read (0 for EOF), or None if the object is set not to block and has no data to read. @@ -606,7 +607,8 @@ def write(self, b): """Write the given buffer to the IO stream. - Returns the number of bytes written, which may be less than len(b). + Returns the number of bytes written, which may be less than the + length of b in bytes. """ self._unsupported("write") @@ -659,7 +661,7 @@ self._unsupported("read1") def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Like read(), this may issue multiple reads to the underlying raw stream, unless the latter is 'interactive'. @@ -673,7 +675,7 @@ return self._readinto(b, read1=False) def readinto1(self, b): - """Read up to len(b) bytes into *b*, using at most one system call + """Read bytes into buffer *b*, using at most one system call Returns an int representing the number of bytes read (0 for EOF). @@ -701,8 +703,8 @@ def write(self, b): """Write the given bytes buffer to the IO stream. - Return the number of bytes written, which is never less than - len(b). + Return the number of bytes written, which is always the length of b + in bytes. Raises BlockingIOError if the buffer is full and the underlying raw stream cannot accept more data at the moment. @@ -787,12 +789,6 @@ def seekable(self): return self.raw.seekable() - def readable(self): - return self.raw.readable() - - def writable(self): - return self.raw.writable() - @property def raw(self): return self._raw @@ -890,7 +886,8 @@ raise ValueError("write to closed file") if isinstance(b, str): raise TypeError("can't write str to binary stream") - n = len(b) + with memoryview(b) as view: + n = view.nbytes # Size of any bytes-like object if n == 0: return 0 pos = self._pos @@ -982,6 +979,9 @@ self._reset_read_buf() self._read_lock = Lock() + def readable(self): + return self.raw.readable() + def _reset_read_buf(self): self._read_buf = b"" self._read_pos = 0 @@ -1043,7 +1043,7 @@ break avail += len(chunk) chunks.append(chunk) - # n is more then avail only when an EOF occurred or when + # n is more than avail only when an EOF occurred or when # read() would have blocked. n = min(n, avail) out = b"".join(chunks) @@ -1093,14 +1093,13 @@ def _readinto(self, buf, read1): """Read data into *buf* with at most one system call.""" - if len(buf) == 0: - return 0 - # Need to create a memoryview object of type 'b', otherwise # we may not be able to assign bytes to it, and slicing it # would create a new object. if not isinstance(buf, memoryview): buf = memoryview(buf) + if buf.nbytes == 0: + return 0 buf = buf.cast('B') written = 0 @@ -1170,6 +1169,9 @@ self._write_buf = bytearray() self._write_lock = Lock() + def writable(self): + return self.raw.writable() + def write(self, b): if self.closed: raise ValueError("write to closed file") diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -307,13 +311,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: @@ -456,6 +462,10 @@ week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) + if julian <= 0: + year -= 1 + yday = 366 if calendar.isleap(year) else 365 + julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py --- a/lib-python/3/asyncio/base_events.py +++ b/lib-python/3/asyncio/base_events.py @@ -52,6 +52,12 @@ # before cleanup of cancelled handles is performed. _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 +# Exceptions which must not call the exception handler in fatal error +# methods (_fatal_error()) +_FATAL_ERROR_IGNORE = (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError) + + def _format_handle(handle): cb = handle._callback if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): @@ -70,49 +76,89 @@ return repr(fd) -def _check_resolved_address(sock, address): - # Ensure that the address is already resolved to avoid the trap of hanging - # the entire event loop when the address requires doing a DNS lookup. - # - # getaddrinfo() is slow (around 10 us per call): this function should only - # be called in debug mode - family = sock.family +# Linux's sock.type is a bitmask that can include extra info about socket. +_SOCKET_TYPE_MASK = 0 +if hasattr(socket, 'SOCK_NONBLOCK'): + _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK +if hasattr(socket, 'SOCK_CLOEXEC'): + _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC - if family == socket.AF_INET: - host, port = address - elif family == socket.AF_INET6: - host, port = address[:2] - else: + +def _ipaddr_info(host, port, family, type, proto): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): return - # On Windows, socket.inet_pton() is only available since Python 3.4 - if hasattr(socket, 'inet_pton'): - # getaddrinfo() is slow and has known issue: prefer inet_pton() - # if available + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + type &= ~_SOCKET_TYPE_MASK + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes): + if port == b'': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like b"http". + port = socket.getservbyname(port.decode('ascii')) + elif isinstance(port, str): + if port == '': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like "http". + port = socket.getservbyname(port) + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET, socket.AF_INET6] + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: try: - socket.inet_pton(family, host) - except OSError as exc: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, exc)) + socket.inet_pton(af, host) + # The host has already been resolved. + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, + flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto) + if info is not None: + # "host" is already a resolved IP. + fut = loop.create_future() + fut.set_result([info]) + return fut else: - # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is - # already resolved. - type_mask = 0 - if hasattr(socket, 'SOCK_NONBLOCK'): - type_mask |= socket.SOCK_NONBLOCK - if hasattr(socket, 'SOCK_CLOEXEC'): - type_mask |= socket.SOCK_CLOEXEC - try: - socket.getaddrinfo(host, port, - family=family, - type=(sock.type & ~type_mask), - proto=sock.proto, - flags=socket.AI_NUMERICHOST) - except socket.gaierror as err: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, err)) + return loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) def _run_until_complete_cb(fut): @@ -167,7 +213,7 @@ def wait_closed(self): if self.sockets is None or self._waiters is None: return - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._waiters.append(waiter) yield from waiter @@ -201,6 +247,10 @@ % (self.__class__.__name__, self.is_running(), self.is_closed(), self.get_debug())) + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + def create_task(self, coro): """Schedule a coroutine object. @@ -494,7 +544,7 @@ assert not args assert not isinstance(func, events.TimerHandle) if func._cancelled: - f = futures.Future(loop=self) + f = self.create_future() f.set_result(None) return f func, args = func._callback, func._args @@ -584,14 +634,14 @@ raise ValueError( 'host/port and sock can not be specified at the same time') - f1 = self.getaddrinfo( - host, port, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f1 = _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs = [f1] if local_addr is not None: - f2 = self.getaddrinfo( - *local_addr, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f2 = _ensure_resolved(local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs.append(f2) else: f2 = None @@ -673,7 +723,7 @@ def _create_connection_transport(self, sock, protocol_factory, ssl, server_hostname): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( @@ -726,9 +776,9 @@ assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) + infos = yield from _ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') @@ -793,7 +843,7 @@ raise exceptions[0] protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_datagram_transport( sock, protocol, r_addr, waiter) if self._debug: @@ -816,9 +866,9 @@ @coroutine def _create_server_getaddrinfo(self, host, port, family, flags): - infos = yield from self.getaddrinfo(host, port, family=family, + infos = yield from _ensure_resolved((host, port), family=family, type=socket.SOCK_STREAM, - flags=flags) + flags=flags, loop=self) if not infos: raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) return infos @@ -839,7 +889,10 @@ to host and port. The host parameter can also be a sequence of strings and in that case - the TCP server is bound to all hosts of the sequence. + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. Return a Server object which can be used to stop the service. @@ -868,7 +921,7 @@ flags=flags) for host in hosts] infos = yield from tasks.gather(*fs, loop=self) - infos = itertools.chain.from_iterable(infos) + infos = set(itertools.chain.from_iterable(infos)) completed = False try: @@ -929,7 +982,7 @@ @coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_read_pipe_transport(pipe, protocol, waiter) try: @@ -946,7 +999,7 @@ @coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_write_pipe_transport(pipe, protocol, waiter) try: @@ -1028,6 +1081,11 @@ logger.info('%s: %r' % (debug_log, transport)) return transport, protocol + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py --- a/lib-python/3/asyncio/base_subprocess.py +++ b/lib-python/3/asyncio/base_subprocess.py @@ -210,6 +210,10 @@ logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() @@ -227,7 +231,7 @@ if self._returncode is not None: return self._returncode - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._exit_waiters.append(waiter) return (yield from waiter) diff --git a/lib-python/3/asyncio/compat.py b/lib-python/3/asyncio/compat.py --- a/lib-python/3/asyncio/compat.py +++ b/lib-python/3/asyncio/compat.py @@ -4,6 +4,7 @@ PY34 = sys.version_info >= (3, 4) PY35 = sys.version_info >= (3, 5) +PY352 = sys.version_info >= (3, 5, 2) def flatten_list_bytes(list_of_data): diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py --- a/lib-python/3/asyncio/coroutines.py +++ b/lib-python/3/asyncio/coroutines.py @@ -27,8 +27,8 @@ # before you define your coroutines. A downside of using this feature # is that tracebacks show entries for the CoroWrapper.__next__ method # when _DEBUG is true. -_DEBUG = (not sys.flags.ignore_environment - and bool(os.environ.get('PYTHONASYNCIODEBUG'))) +_DEBUG = (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) try: @@ -86,7 +86,7 @@ def __init__(self, gen, func=None): assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen self.gen = gen - self.func = func # Used to unwrap @coroutine decorator + self.func = func # Used to unwrap @coroutine decorator self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.__name__ = getattr(gen, '__name__', None) self.__qualname__ = getattr(gen, '__qualname__', None) @@ -204,7 +204,8 @@ @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) - if isinstance(res, futures.Future) or inspect.isgenerator(res): + if isinstance(res, futures.Future) or inspect.isgenerator(res) or \ + isinstance(res, CoroWrapper): res = yield from res elif _AwaitableABC is not None: # If 'func' returns an Awaitable (new in 3.5) we @@ -283,10 +284,13 @@ coro_frame = coro.cr_frame filename = coro_code.co_filename - if (isinstance(coro, CoroWrapper) - and not inspect.isgeneratorfunction(coro.func) - and coro.func is not None): - filename, lineno = events._get_function_source(coro.func) + lineno = 0 + if (isinstance(coro, CoroWrapper) and + not inspect.isgeneratorfunction(coro.func) and + coro.func is not None): + source = events._get_function_source(coro.func) + if source is not None: + filename, lineno = source if coro_frame is None: coro_repr = ('%s done, defined at %s:%s' % (coro_name, filename, lineno)) diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -266,6 +266,9 @@ def time(self): raise NotImplementedError + def create_future(self): + raise NotImplementedError + # Method scheduling a coroutine object: create a task. def create_task(self, coro): @@ -484,6 +487,9 @@ # Error handlers. + def get_exception_handler(self): + raise NotImplementedError + def set_exception_handler(self, handler): raise NotImplementedError diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py --- a/lib-python/3/asyncio/futures.py +++ b/lib-python/3/asyncio/futures.py @@ -142,7 +142,7 @@ def __init__(self, *, loop=None): """Initialize the future. - The optional event_loop argument allows to explicitly set the event + The optional event_loop argument allows explicitly setting the event loop object used by the future. If it's not provided, the future uses the default event loop. """ @@ -341,6 +341,9 @@ raise InvalidStateError('{}: {!r}'.format(self._state, self)) if isinstance(exception, type): exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") self._exception = exception self._state = _FINISHED self._schedule_callbacks() @@ -448,6 +451,8 @@ return future assert isinstance(future, concurrent.futures.Future), \ 'concurrent.futures.Future is expected, got {!r}'.format(future) - new_future = Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() _chain_future(future, new_future) return new_future diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py --- a/lib-python/3/asyncio/locks.py +++ b/lib-python/3/asyncio/locks.py @@ -111,7 +111,7 @@ acquire() is a coroutine and should be called with 'yield from'. Locks also support the context management protocol. '(yield from lock)' - should be used as context manager expression. + should be used as the context manager expression. Usage: @@ -170,7 +170,7 @@ self._locked = True return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -258,7 +258,7 @@ if self._value: return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -320,7 +320,7 @@ self.release() try: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -329,7 +329,13 @@ self._waiters.remove(fut) finally: - yield from self.acquire() + # Must reacquire lock even if wait is cancelled + while True: + try: + yield from self.acquire() + break + except futures.CancelledError: + pass @coroutine def wait_for(self, predicate): @@ -433,7 +439,7 @@ True. """ while self._value <= 0: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut diff --git a/lib-python/3/asyncio/proactor_events.py b/lib-python/3/asyncio/proactor_events.py --- a/lib-python/3/asyncio/proactor_events.py +++ b/lib-python/3/asyncio/proactor_events.py @@ -90,7 +90,7 @@ self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -440,15 +440,7 @@ return self._proactor.send(sock, data) def sock_connect(self, sock, address): - try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut = futures.Future(loop=self) - fut.set_exception(err) - return fut - else: - return self._proactor.connect(sock, address) + return self._proactor.connect(sock, address) def sock_accept(self, sock): return self._proactor.accept(sock) diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py --- a/lib-python/3/asyncio/queues.py +++ b/lib-python/3/asyncio/queues.py @@ -128,7 +128,7 @@ This method is a coroutine. """ while self.full(): - putter = futures.Future(loop=self._loop) + putter = self._loop.create_future() self._putters.append(putter) try: yield from putter @@ -162,7 +162,7 @@ This method is a coroutine. """ while self.empty(): - getter = futures.Future(loop=self._loop) + getter = self._loop.create_future() self._getters.append(getter) try: yield from getter diff --git a/lib-python/3/asyncio/selector_events.py b/lib-python/3/asyncio/selector_events.py --- a/lib-python/3/asyncio/selector_events.py +++ b/lib-python/3/asyncio/selector_events.py @@ -196,7 +196,7 @@ transport = None try: protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if sslcontext: transport = self._make_ssl_transport( conn, protocol, sslcontext, waiter=waiter, @@ -314,7 +314,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_recv(fut, False, sock, n) return fut @@ -352,7 +352,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() if data: self._sock_sendall(fut, False, sock, data) else: @@ -385,25 +385,28 @@ def sock_connect(self, sock, address): """Connect to a remote socket at address. - The address must be already resolved to avoid the trap of hanging the - entire event loop when the address requires doing a DNS lookup. For - example, it must be an IP address, not an hostname, for AF_INET and - AF_INET6 address families. Use getaddrinfo() to resolve the hostname - asynchronously. - This method is a coroutine. """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + + fut = self.create_future() + if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX: + self._sock_connect(fut, sock, address) + else: + resolved = base_events._ensure_resolved(address, loop=self) + resolved.add_done_callback( + lambda resolved: self._on_resolved(fut, sock, resolved)) + + return fut + + def _on_resolved(self, fut, sock, resolved): try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut.set_exception(err) + _, _, _, _, address = resolved.result()[0] + except Exception as exc: + fut.set_exception(exc) else: self._sock_connect(fut, sock, address) - return fut def _sock_connect(self, fut, sock, address): fd = sock.fileno() @@ -454,7 +457,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_accept(fut, False, sock) return fut @@ -566,6 +569,7 @@ self._loop.remove_reader(self._sock_fd) if not self._buffer: self._conn_lost += 1 + self._loop.remove_writer(self._sock_fd) self._loop.call_soon(self._call_connection_lost, None) # On Python 3.3 and older, objects with a destructor part of a reference @@ -579,8 +583,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, - ConnectionResetError, ConnectionAbortedError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -660,6 +663,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): @@ -683,8 +688,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if not data: @@ -719,6 +724,8 @@ def _write_ready(self): assert self._buffer, 'Data should not be empty' + if self._conn_lost: + return try: n = self._sock.send(self._buffer) except (BlockingIOError, InterruptedError): @@ -889,6 +896,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return if self._write_wants_read: self._write_wants_read = False self._write_ready() @@ -921,6 +930,8 @@ self.close() def _write_ready(self): + if self._conn_lost: + return if self._read_wants_write: self._read_wants_write = False self._read_ready() @@ -955,8 +966,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return @@ -998,6 +1009,8 @@ return sum(len(data) for data, _ in self._buffer) def _read_ready(self): + if self._conn_lost: + return try: data, addr = self._sock.recvfrom(self.max_size) except (BlockingIOError, InterruptedError): @@ -1011,8 +1024,8 @@ def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py --- a/lib-python/3/asyncio/sslproto.py +++ b/lib-python/3/asyncio/sslproto.py @@ -603,7 +603,7 @@ self._wakeup_waiter() self._session_established = True # In case transport.write() was already called. Don't call - # immediatly _process_write_backlog(), but schedule it: + # immediately _process_write_backlog(), but schedule it: # _on_handshake_complete() can be called indirectly from # _process_write_backlog(), and _process_write_backlog() is not # reentrant. @@ -655,7 +655,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py --- a/lib-python/3/asyncio/streams.py +++ b/lib-python/3/asyncio/streams.py @@ -3,6 +3,7 @@ __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', 'open_connection', 'start_server', 'IncompleteReadError', + 'LimitOverrunError', ] import socket @@ -13,13 +14,12 @@ from . import coroutines from . import compat from . import events -from . import futures from . import protocols from .coroutines import coroutine from .log import logger -_DEFAULT_LIMIT = 2**16 +_DEFAULT_LIMIT = 2 ** 16 class IncompleteReadError(EOFError): @@ -27,15 +27,26 @@ Incomplete read error. Attributes: - partial: read bytes string before the end of stream was reached - - expected: total number of expected bytes + - expected: total number of expected bytes (or None if unknown) """ def __init__(self, partial, expected): - EOFError.__init__(self, "%s bytes read on a total of %s expected bytes" - % (len(partial), expected)) + super().__init__("%d bytes read on a total of %r expected bytes" + % (len(partial), expected)) self.partial = partial self.expected = expected +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + @coroutine def open_connection(host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -118,7 +129,6 @@ writer = StreamWriter(transport, protocol, reader, loop) return reader, writer - @coroutine def start_unix_server(client_connected_cb, path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -196,7 +206,7 @@ return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._drain_waiter = waiter yield from waiter @@ -215,9 +225,11 @@ self._stream_reader = stream_reader self._stream_writer = None self._client_connected_cb = client_connected_cb + self._over_ssl = False def connection_made(self, transport): self._stream_reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: self._stream_writer = StreamWriter(transport, self, self._stream_reader, @@ -228,17 +240,25 @@ self._loop.create_task(res) def connection_lost(self, exc): - if exc is None: - self._stream_reader.feed_eof() - else: - self._stream_reader.set_exception(exc) + if self._stream_reader is not None: + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) super().connection_lost(exc) + self._stream_reader = None + self._stream_writer = None def data_received(self, data): self._stream_reader.feed_data(data) def eof_received(self): self._stream_reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False return True @@ -318,6 +338,10 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None): # The line length limit is a security feature; # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + self._limit = limit if loop is None: self._loop = events.get_event_loop() @@ -361,7 +385,7 @@ waiter.set_exception(exc) def _wakeup_waiter(self): - """Wakeup read() or readline() function waiting for data or EOF.""" + """Wakeup read*() functions waiting for data or EOF.""" waiter = self._waiter if waiter is not None: self._waiter = None @@ -395,8 +419,8 @@ self._wakeup_waiter() if (self._transport is not None and - not self._paused and - len(self._buffer) > 2*self._limit): + not self._paused and + len(self._buffer) > 2 * self._limit): try: self._transport.pause_reading() except NotImplementedError: @@ -409,7 +433,10 @@ @coroutine def _wait_for_data(self, func_name): - """Wait until feed_data() or feed_eof() is called.""" + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know @@ -418,7 +445,14 @@ raise RuntimeError('%s() called while another coroutine is ' 'already waiting for incoming data' % func_name) - self._waiter = futures.Future(loop=self._loop) + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() try: yield from self._waiter finally: @@ -426,43 +460,154 @@ @coroutine def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = yield from self.readuntil(sep) + except IncompleteReadError as e: + return e.partial + except LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + @coroutine + def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + if self._exception is not None: raise self._exception - line = bytearray() - not_enough = True + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) - while not_enough: - while self._buffer and not_enough: - ichar = self._buffer.find(b'\n') - if ichar < 0: - line.extend(self._buffer) - self._buffer.clear() - else: - ichar += 1 - line.extend(self._buffer[:ichar]) - del self._buffer[:ichar] - not_enough = False + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 - if len(line) > self._limit: - self._maybe_resume_transport() - raise ValueError('Line is too long') + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. if self._eof: - break + chunk = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(chunk, None) - if not_enough: - yield from self._wait_for_data('readline') + # _wait_for_data() will resume reading if stream was paused. + yield from self._wait_for_data('readuntil') + if isep > self._limit: + raise LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] self._maybe_resume_transport() - return bytes(line) + return bytes(chunk) @coroutine def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediatelly. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if self._exception is not None: raise self._exception - if not n: + if n == 0: return b'' if n < 0: @@ -477,26 +622,42 @@ break blocks.append(block) return b''.join(blocks) - else: - if not self._buffer and not self._eof: - yield from self._wait_for_data('read') - if n < 0 or len(self._buffer) <= n: - data = bytes(self._buffer) - self._buffer.clear() - else: - # n > 0 and len(self._buffer) > n - data = bytes(self._buffer[:n]) - del self._buffer[:n] + if not self._buffer and not self._eof: + yield from self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] self._maybe_resume_transport() return data @coroutine def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + if self._exception is not None: raise self._exception + if n == 0: + return b'' + # There used to be "optimized" code here. It created its own # Future and waited until self._buffer had at least the n # bytes, then called read(n). Unfortunately, this could pause @@ -513,6 +674,8 @@ blocks.append(block) n -= len(block) + assert n == 0 + return b''.join(blocks) if compat.PY35: @@ -526,3 +689,9 @@ if val == b'': raise StopAsyncIteration return val + + if compat.PY352: + # In Python 3.5.2 and greater, __aiter__ should return + # the asynchronous iterator directly. + def __aiter__(self): + return self diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -166,7 +166,7 @@ @coroutine def communicate(self, input=None): - if input: + if input is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -251,7 +251,13 @@ else: if isinstance(result, futures.Future): # Yielded Future must come from Future.__iter__(). - if result._blocking: + if result._loop is not self._loop: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task {!r} got Future {!r} attached to a ' + 'different loop'.format(self, result))) + elif result._blocking: result._blocking = False result.add_done_callback(self._wakeup) self._fut_waiter = result @@ -366,7 +372,7 @@ if timeout is None: return (yield from fut) - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) @@ -394,12 +400,12 @@ @coroutine def _wait(fs, timeout, return_when, loop): - """Internal helper for wait() and _wait_for(). + """Internal helper for wait() and wait_for(). The fs argument must be a collection of Futures. """ assert fs, 'Set of Futures is empty.' - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) @@ -500,7 +506,9 @@ yield return result - future = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + future = loop.create_future() h = future._loop.call_later(delay, futures._set_result_unless_cancelled, future, result) @@ -597,7 +605,9 @@ be cancelled.) """ if not coros_or_futures: - outer = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + outer = loop.create_future() outer.set_result([]) return outer @@ -685,7 +695,7 @@ # Shortcut. return inner loop = inner._loop - outer = futures.Future(loop=loop) + outer = loop.create_future() def _done_callback(inner): if outer.cancelled(): diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -446,9 +446,14 @@ finally: logger.setLevel(old_level) -def mock_nonblocking_socket(): + +def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, + family=socket.AF_INET): """Create a mock of a non-blocking socket.""" - sock = mock.Mock(socket.socket) + sock = mock.MagicMock(socket.socket) + sock.proto = proto + sock.type = type + sock.family = family sock.gettimeout.return_value = 0.0 return sock diff --git a/lib-python/3/asyncio/unix_events.py b/lib-python/3/asyncio/unix_events.py --- a/lib-python/3/asyncio/unix_events.py +++ b/lib-python/3/asyncio/unix_events.py @@ -177,7 +177,7 @@ stdin, stdout, stderr, bufsize, extra=None, **kwargs): with events.get_child_watcher() as watcher: - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _UnixSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -329,14 +329,17 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_READ) if polling: info.append('polling') else: info.append('idle') + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -453,9 +456,10 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_WRITE) if polling: info.append('polling') @@ -464,6 +468,8 @@ bufsize = self.get_write_buffer_size() info.append('bufsize=%s' % bufsize) + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -575,7 +581,7 @@ def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/windows_events.py b/lib-python/3/asyncio/windows_events.py --- a/lib-python/3/asyncio/windows_events.py +++ b/lib-python/3/asyncio/windows_events.py @@ -197,7 +197,7 @@ # # If the IocpProactor already received the event, it's safe to call # _unregister() because we kept a reference to the Overlapped object - # which is used as an unique key. + # which is used as a unique key. self._proactor._unregister(self._ov) self._proactor = None @@ -366,7 +366,7 @@ def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _WindowsSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -417,7 +417,7 @@ return tmp def _result(self, value): - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() fut.set_result(value) return fut diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -12,7 +12,7 @@ __all__ = [ - # Legacy interface exports traditional RFC 1521 Base64 encodings + # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', @@ -49,14 +49,11 @@ # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): - """Encode a byte string using Base64. + """Encode the bytes-like object s using Base64 and return a bytes object. - s is the byte string to encode. Optional altchars must be a byte - string of length 2 which specifies an alternative alphabet for the - '+' and '/' characters. This allows an application to - e.g. generate url or filesystem safe Base64 strings. - - The encoded byte string is returned. + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. """ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] @@ -67,18 +64,19 @@ def b64decode(s, altchars=None, validate=False): - """Decode a Base64 encoded byte string. + """Decode the Base64 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional altchars must be a - string of length 2 which specifies the alternative alphabet used - instead of the '+' and '/' characters. + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. - The decoded string is returned. A binascii.Error is raised if s is - incorrectly padded. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. - If validate is False (the default), non-base64-alphabet characters are - discarded prior to the padding check. If validate is True, - non-base64-alphabet characters in the input result in a binascii.Error. + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. """ s = _bytes_from_decode_data(s) if altchars is not None: @@ -91,19 +89,19 @@ def standard_b64encode(s): - """Encode a byte string using the standard Base64 alphabet. + """Encode bytes-like object s using the standard Base64 alphabet. - s is the byte string to encode. The encoded byte string is returned. + The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes encoded with the standard Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. """ return b64decode(s) @@ -112,21 +110,22 @@ _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): - """Encode a byte string using a url-safe Base64 alphabet. + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to encode. The encoded byte string is - returned. The alphabet uses '-' instead of '+' and '_' instead of + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ @@ -142,9 +141,7 @@ _b32rev = None def b32encode(s): - """Encode a byte string using Base32. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base32 and return a bytes object. """ global _b32tab2 # Delay the initialization of the table to not waste memory @@ -182,11 +179,10 @@ return bytes(encoded) def b32decode(s, casefold=False, map01=None): - """Decode a Base32 encoded byte string. + """Decode the Base32 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to @@ -196,7 +192,7 @@ the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. - The decoded byte string is returned. binascii.Error is raised if + The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. """ @@ -257,23 +253,20 @@ # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): - """Encode a byte string using Base16. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): - """Decode a Base16 encoded byte string. + """Decode the Base16 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. - The decoded byte string is returned. binascii.Error is raised if - s were incorrectly padded or if there are non-alphabet characters - present in the string. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. """ s = _bytes_from_decode_data(s) if casefold: @@ -316,19 +309,17 @@ return b''.join(chunks) def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): - """Encode a byte string using Ascii85. - - b is the byte string to encode. The encoded byte string is returned. + """Encode bytes-like object b using Ascii85 and return a bytes object. foldspaces is an optional flag that uses the special short sequence 'y' instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This feature is not supported by the "standard" Adobe encoding. - wrapcol controls whether the output should have newline ('\\n') characters + wrapcol controls whether the output should have newline (b'\\n') characters added to it. If this is non-zero, each output line will be at most this many characters long. - pad controls whether the input string is padded to a multiple of 4 before + pad controls whether the input is padded to a multiple of 4 before encoding. Note that the btoa implementation always pads. adobe controls whether the encoded byte sequence is framed with <~ and ~>, @@ -359,9 +350,7 @@ return result def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): - """Decode an Ascii85 encoded byte string. - - s is the byte string to decode. + """Decode the Ascii85 encoded bytes-like object or ASCII string b. foldspaces is a flag that specifies whether the 'y' short sequence should be accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is @@ -373,13 +362,20 @@ ignorechars should be a byte string containing characters to ignore from the input. This should only contain whitespace characters, and by default contains all whitespace characters in ASCII. + + The result is returned as a bytes object. """ b = _bytes_from_decode_data(b) if adobe: - if not (b.startswith(_A85START) and b.endswith(_A85END)): - raise ValueError("Ascii85 encoded byte sequences must be bracketed " - "by {!r} and {!r}".format(_A85START, _A85END)) - b = b[2:-2] # Strip off start/end markers + if not b.endswith(_A85END): + raise ValueError( + "Ascii85 encoded byte sequences must end " + "with {!r}".format(_A85END) + ) + if b.startswith(_A85START): + b = b[2:-2] # Strip off start/end markers + else: + b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences @@ -432,10 +428,10 @@ _b85dec = None def b85encode(b, pad=False): - """Encode an ASCII-encoded byte array in base85 format. + """Encode bytes-like object b in base85 format and return a bytes object. - If pad is true, the input is padded with "\\0" so its length is a multiple of - 4 characters before encoding. + If pad is true, the input is padded with b'\\0' so its length is a multiple of + 4 bytes before encoding. """ global _b85chars, _b85chars2 # Delay the initialization of tables to not waste memory @@ -446,7 +442,10 @@ return _85encode(b, _b85chars, _b85chars2, pad) def b85decode(b): - """Decode base85-encoded byte array""" + """Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + """ global _b85dec # Delay the initialization of tables to not waste memory # if the function is never called @@ -531,7 +530,7 @@ def encodebytes(s): - """Encode a bytestring into a bytestring containing multiple lines + """Encode a bytestring into a bytes object containing multiple lines of base-64 data.""" _input_type_check(s) pieces = [] @@ -549,7 +548,7 @@ def decodebytes(s): - """Decode a bytestring of base-64 data into a bytestring.""" + """Decode a bytestring of base-64 data into a bytes object.""" _input_type_check(s) return binascii.a2b_base64(s) From pypy.commits at gmail.com Sun Dec 4 01:43:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 22:43:50 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Update lib-python/conftest.py Message-ID: <5843bb26.448e1c0a.b23f5.d21a@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88870:42a4c3d75bea Date: 2016-12-04 06:41 +0000 http://bitbucket.org/pypy/pypy/changeset/42a4c3d75bea/ Log: Update lib-python/conftest.py diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -317,6 +317,7 @@ RegrTest('test_openpty.py'), RegrTest('test_operator.py', core=True), RegrTest('test_optparse.py'), + RegrTest('test_ordered_dict.py'), RegrTest('test_os.py', core=True), RegrTest('test_ossaudiodev.py'), RegrTest('test_osx_env.py'), @@ -326,7 +327,6 @@ RegrTest('test_peepholer.py'), RegrTest('test_pep247.py'), RegrTest('test_pep277.py'), - RegrTest('test_pep292.py'), RegrTest('test_pep3120.py'), RegrTest('test_pep3131.py'), RegrTest('test_pep3151.py'), @@ -446,6 +446,7 @@ RegrTest('test_ttk_guionly.py'), RegrTest('test_ttk_textonly.py'), RegrTest('test_tuple.py', core=True), + RegrTest('test_turtle.py'), RegrTest('test_typechecks.py'), RegrTest('test_types.py', core=True), RegrTest('test_typing.py'), From pypy.commits at gmail.com Sun Dec 4 01:43:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 03 Dec 2016 22:43:48 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge vendor/stdlib-3.5: upgrade stdlib to 3.5.2 Message-ID: <5843bb24.c6bdc20a.bad00.bc73@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88869:215771f42f81 Date: 2016-12-04 06:32 +0000 http://bitbucket.org/pypy/pypy/changeset/215771f42f81/ Log: hg merge vendor/stdlib-3.5: upgrade stdlib to 3.5.2 diff too long, truncating to 2000 out of 27757 lines diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -156,7 +156,7 @@ __slots__ = () @abstractmethod - async def __aiter__(self): + def __aiter__(self): return AsyncIterator() @classmethod @@ -176,7 +176,7 @@ """Return the next item or raise StopAsyncIteration when exhausted.""" raise StopAsyncIteration - async def __aiter__(self): + def __aiter__(self): return self @classmethod diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -177,6 +177,13 @@ 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', }) REVERSE_IMPORT_MAPPING.update({ diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -151,13 +151,13 @@ # can only be found inside Xcode.app if the "Command Line Tools" # are not installed. # - # Futhermore, the compiler that can be used varies between + # Furthermore, the compiler that can be used varies between # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -193,7 +193,7 @@ if cc != oldcc: # Found a replacement compiler. # Modify config vars using new compiler, if not already explicitly - # overriden by an env variable, preserving additional arguments. + # overridden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() @@ -207,7 +207,7 @@ """Remove all universal build arguments from config vars""" for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) @@ -228,7 +228,7 @@ # build extensions on OSX 10.7 and later with the prebuilt # 32-bit installer on the python.org website. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -244,7 +244,7 @@ # across Xcode and compiler versions, there is no reliable way # to be sure why it failed. Assume here it was due to lack of # PPC support and remove the related '-arch' flags from each - # config variables not explicitly overriden by an environment + # config variables not explicitly overridden by an environment # variable. If the error was for some other reason, we hope the # failure will show up again when trying to compile an extension # module. @@ -292,7 +292,7 @@ sdk = m.group(1) if not os.path.exists(sdk): for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py --- a/lib-python/3/_pydecimal.py +++ b/lib-python/3/_pydecimal.py @@ -252,7 +252,7 @@ class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. - This occurs and signals invalid-operation if an string is being + This occurs and signals invalid-operation if a string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ @@ -1102,7 +1102,7 @@ def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. - Rounds the number (if more then precision digits) + Rounds the number (if more than precision digits) """ if self._is_special: ans = self._check_nans(context=context) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -296,8 +296,9 @@ called. The basic type used for binary data read from or written to a file is - bytes. bytearrays are accepted too, and in some cases (such as - readinto) needed. Text I/O classes work with str data. + bytes. Other bytes-like objects are accepted as method arguments too. In + some cases (such as readinto), a writable object is required. Text I/O + classes work with str data. Note that calling any method (even inquiries) on a closed stream is undefined. Implementations may raise OSError in this case. @@ -390,7 +391,7 @@ def seekable(self): """Return a bool indicating whether object supports random access. - If False, seek(), tell() and truncate() will raise UnsupportedOperation. + If False, seek(), tell() and truncate() will raise OSError. This method may need to do a test seek(). """ return False @@ -405,7 +406,7 @@ def readable(self): """Return a bool indicating whether object was opened for reading. - If False, read() will raise UnsupportedOperation. + If False, read() will raise OSError. """ return False @@ -419,7 +420,7 @@ def writable(self): """Return a bool indicating whether object was opened for writing. - If False, write() and truncate() will raise UnsupportedOperation. + If False, write() and truncate() will raise OSError. """ return False @@ -439,7 +440,7 @@ return self.__closed def _checkClosed(self, msg=None): - """Internal: raise an ValueError if file is closed + """Internal: raise a ValueError if file is closed """ if self.closed: raise ValueError("I/O operation on closed file." @@ -596,7 +597,7 @@ return data def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Returns an int representing the number of bytes read (0 for EOF), or None if the object is set not to block and has no data to read. @@ -606,7 +607,8 @@ def write(self, b): """Write the given buffer to the IO stream. - Returns the number of bytes written, which may be less than len(b). + Returns the number of bytes written, which may be less than the + length of b in bytes. """ self._unsupported("write") @@ -659,7 +661,7 @@ self._unsupported("read1") def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Like read(), this may issue multiple reads to the underlying raw stream, unless the latter is 'interactive'. @@ -673,7 +675,7 @@ return self._readinto(b, read1=False) def readinto1(self, b): - """Read up to len(b) bytes into *b*, using at most one system call + """Read bytes into buffer *b*, using at most one system call Returns an int representing the number of bytes read (0 for EOF). @@ -701,8 +703,8 @@ def write(self, b): """Write the given bytes buffer to the IO stream. - Return the number of bytes written, which is never less than - len(b). + Return the number of bytes written, which is always the length of b + in bytes. Raises BlockingIOError if the buffer is full and the underlying raw stream cannot accept more data at the moment. @@ -787,12 +789,6 @@ def seekable(self): return self.raw.seekable() - def readable(self): - return self.raw.readable() - - def writable(self): - return self.raw.writable() - @property def raw(self): return self._raw @@ -890,7 +886,8 @@ raise ValueError("write to closed file") if isinstance(b, str): raise TypeError("can't write str to binary stream") - n = len(b) + with memoryview(b) as view: + n = view.nbytes # Size of any bytes-like object if n == 0: return 0 pos = self._pos @@ -982,6 +979,9 @@ self._reset_read_buf() self._read_lock = Lock() + def readable(self): + return self.raw.readable() + def _reset_read_buf(self): self._read_buf = b"" self._read_pos = 0 @@ -1043,7 +1043,7 @@ break avail += len(chunk) chunks.append(chunk) - # n is more then avail only when an EOF occurred or when + # n is more than avail only when an EOF occurred or when # read() would have blocked. n = min(n, avail) out = b"".join(chunks) @@ -1093,14 +1093,13 @@ def _readinto(self, buf, read1): """Read data into *buf* with at most one system call.""" - if len(buf) == 0: - return 0 - # Need to create a memoryview object of type 'b', otherwise # we may not be able to assign bytes to it, and slicing it # would create a new object. if not isinstance(buf, memoryview): buf = memoryview(buf) + if buf.nbytes == 0: + return 0 buf = buf.cast('B') written = 0 @@ -1170,6 +1169,9 @@ self._write_buf = bytearray() self._write_lock = Lock() + def writable(self): + return self.raw.writable() + def write(self, b): if self.closed: raise ValueError("write to closed file") diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -307,13 +311,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: @@ -456,6 +462,10 @@ week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) + if julian <= 0: + year -= 1 + yday = 366 if calendar.isleap(year) else 365 + julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py --- a/lib-python/3/asyncio/base_events.py +++ b/lib-python/3/asyncio/base_events.py @@ -52,6 +52,12 @@ # before cleanup of cancelled handles is performed. _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 +# Exceptions which must not call the exception handler in fatal error +# methods (_fatal_error()) +_FATAL_ERROR_IGNORE = (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError) + + def _format_handle(handle): cb = handle._callback if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): @@ -70,49 +76,89 @@ return repr(fd) -def _check_resolved_address(sock, address): - # Ensure that the address is already resolved to avoid the trap of hanging - # the entire event loop when the address requires doing a DNS lookup. - # - # getaddrinfo() is slow (around 10 us per call): this function should only - # be called in debug mode - family = sock.family +# Linux's sock.type is a bitmask that can include extra info about socket. +_SOCKET_TYPE_MASK = 0 +if hasattr(socket, 'SOCK_NONBLOCK'): + _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK +if hasattr(socket, 'SOCK_CLOEXEC'): + _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC - if family == socket.AF_INET: - host, port = address - elif family == socket.AF_INET6: - host, port = address[:2] - else: + +def _ipaddr_info(host, port, family, type, proto): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): return - # On Windows, socket.inet_pton() is only available since Python 3.4 - if hasattr(socket, 'inet_pton'): - # getaddrinfo() is slow and has known issue: prefer inet_pton() - # if available + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + type &= ~_SOCKET_TYPE_MASK + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes): + if port == b'': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like b"http". + port = socket.getservbyname(port.decode('ascii')) + elif isinstance(port, str): + if port == '': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like "http". + port = socket.getservbyname(port) + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET, socket.AF_INET6] + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: try: - socket.inet_pton(family, host) - except OSError as exc: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, exc)) + socket.inet_pton(af, host) + # The host has already been resolved. + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, + flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto) + if info is not None: + # "host" is already a resolved IP. + fut = loop.create_future() + fut.set_result([info]) + return fut else: - # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is - # already resolved. - type_mask = 0 - if hasattr(socket, 'SOCK_NONBLOCK'): - type_mask |= socket.SOCK_NONBLOCK - if hasattr(socket, 'SOCK_CLOEXEC'): - type_mask |= socket.SOCK_CLOEXEC - try: - socket.getaddrinfo(host, port, - family=family, - type=(sock.type & ~type_mask), - proto=sock.proto, - flags=socket.AI_NUMERICHOST) - except socket.gaierror as err: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, err)) + return loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) def _run_until_complete_cb(fut): @@ -167,7 +213,7 @@ def wait_closed(self): if self.sockets is None or self._waiters is None: return - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._waiters.append(waiter) yield from waiter @@ -201,6 +247,10 @@ % (self.__class__.__name__, self.is_running(), self.is_closed(), self.get_debug())) + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + def create_task(self, coro): """Schedule a coroutine object. @@ -494,7 +544,7 @@ assert not args assert not isinstance(func, events.TimerHandle) if func._cancelled: - f = futures.Future(loop=self) + f = self.create_future() f.set_result(None) return f func, args = func._callback, func._args @@ -584,14 +634,14 @@ raise ValueError( 'host/port and sock can not be specified at the same time') - f1 = self.getaddrinfo( - host, port, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f1 = _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs = [f1] if local_addr is not None: - f2 = self.getaddrinfo( - *local_addr, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f2 = _ensure_resolved(local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs.append(f2) else: f2 = None @@ -673,7 +723,7 @@ def _create_connection_transport(self, sock, protocol_factory, ssl, server_hostname): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( @@ -726,9 +776,9 @@ assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) + infos = yield from _ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') @@ -793,7 +843,7 @@ raise exceptions[0] protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_datagram_transport( sock, protocol, r_addr, waiter) if self._debug: @@ -816,9 +866,9 @@ @coroutine def _create_server_getaddrinfo(self, host, port, family, flags): - infos = yield from self.getaddrinfo(host, port, family=family, + infos = yield from _ensure_resolved((host, port), family=family, type=socket.SOCK_STREAM, - flags=flags) + flags=flags, loop=self) if not infos: raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) return infos @@ -839,7 +889,10 @@ to host and port. The host parameter can also be a sequence of strings and in that case - the TCP server is bound to all hosts of the sequence. + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. Return a Server object which can be used to stop the service. @@ -868,7 +921,7 @@ flags=flags) for host in hosts] infos = yield from tasks.gather(*fs, loop=self) - infos = itertools.chain.from_iterable(infos) + infos = set(itertools.chain.from_iterable(infos)) completed = False try: @@ -929,7 +982,7 @@ @coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_read_pipe_transport(pipe, protocol, waiter) try: @@ -946,7 +999,7 @@ @coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_write_pipe_transport(pipe, protocol, waiter) try: @@ -1028,6 +1081,11 @@ logger.info('%s: %r' % (debug_log, transport)) return transport, protocol + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py --- a/lib-python/3/asyncio/base_subprocess.py +++ b/lib-python/3/asyncio/base_subprocess.py @@ -210,6 +210,10 @@ logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() @@ -227,7 +231,7 @@ if self._returncode is not None: return self._returncode - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._exit_waiters.append(waiter) return (yield from waiter) diff --git a/lib-python/3/asyncio/compat.py b/lib-python/3/asyncio/compat.py --- a/lib-python/3/asyncio/compat.py +++ b/lib-python/3/asyncio/compat.py @@ -4,6 +4,7 @@ PY34 = sys.version_info >= (3, 4) PY35 = sys.version_info >= (3, 5) +PY352 = sys.version_info >= (3, 5, 2) def flatten_list_bytes(list_of_data): diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py --- a/lib-python/3/asyncio/coroutines.py +++ b/lib-python/3/asyncio/coroutines.py @@ -27,8 +27,8 @@ # before you define your coroutines. A downside of using this feature # is that tracebacks show entries for the CoroWrapper.__next__ method # when _DEBUG is true. -_DEBUG = (not sys.flags.ignore_environment - and bool(os.environ.get('PYTHONASYNCIODEBUG'))) +_DEBUG = (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) try: @@ -86,7 +86,7 @@ def __init__(self, gen, func=None): assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen self.gen = gen - self.func = func # Used to unwrap @coroutine decorator + self.func = func # Used to unwrap @coroutine decorator self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.__name__ = getattr(gen, '__name__', None) self.__qualname__ = getattr(gen, '__qualname__', None) @@ -204,7 +204,8 @@ @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) - if isinstance(res, futures.Future) or inspect.isgenerator(res): + if isinstance(res, futures.Future) or inspect.isgenerator(res) or \ + isinstance(res, CoroWrapper): res = yield from res elif _AwaitableABC is not None: # If 'func' returns an Awaitable (new in 3.5) we @@ -283,10 +284,13 @@ coro_frame = coro.cr_frame filename = coro_code.co_filename - if (isinstance(coro, CoroWrapper) - and not inspect.isgeneratorfunction(coro.func) - and coro.func is not None): - filename, lineno = events._get_function_source(coro.func) + lineno = 0 + if (isinstance(coro, CoroWrapper) and + not inspect.isgeneratorfunction(coro.func) and + coro.func is not None): + source = events._get_function_source(coro.func) + if source is not None: + filename, lineno = source if coro_frame is None: coro_repr = ('%s done, defined at %s:%s' % (coro_name, filename, lineno)) diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -266,6 +266,9 @@ def time(self): raise NotImplementedError + def create_future(self): + raise NotImplementedError + # Method scheduling a coroutine object: create a task. def create_task(self, coro): @@ -484,6 +487,9 @@ # Error handlers. + def get_exception_handler(self): + raise NotImplementedError + def set_exception_handler(self, handler): raise NotImplementedError diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py --- a/lib-python/3/asyncio/futures.py +++ b/lib-python/3/asyncio/futures.py @@ -142,7 +142,7 @@ def __init__(self, *, loop=None): """Initialize the future. - The optional event_loop argument allows to explicitly set the event + The optional event_loop argument allows explicitly setting the event loop object used by the future. If it's not provided, the future uses the default event loop. """ @@ -341,6 +341,9 @@ raise InvalidStateError('{}: {!r}'.format(self._state, self)) if isinstance(exception, type): exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") self._exception = exception self._state = _FINISHED self._schedule_callbacks() @@ -448,6 +451,8 @@ return future assert isinstance(future, concurrent.futures.Future), \ 'concurrent.futures.Future is expected, got {!r}'.format(future) - new_future = Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() _chain_future(future, new_future) return new_future diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py --- a/lib-python/3/asyncio/locks.py +++ b/lib-python/3/asyncio/locks.py @@ -111,7 +111,7 @@ acquire() is a coroutine and should be called with 'yield from'. Locks also support the context management protocol. '(yield from lock)' - should be used as context manager expression. + should be used as the context manager expression. Usage: @@ -170,7 +170,7 @@ self._locked = True return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -258,7 +258,7 @@ if self._value: return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -320,7 +320,7 @@ self.release() try: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -329,7 +329,13 @@ self._waiters.remove(fut) finally: - yield from self.acquire() + # Must reacquire lock even if wait is cancelled + while True: + try: + yield from self.acquire() + break + except futures.CancelledError: + pass @coroutine def wait_for(self, predicate): @@ -433,7 +439,7 @@ True. """ while self._value <= 0: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut diff --git a/lib-python/3/asyncio/proactor_events.py b/lib-python/3/asyncio/proactor_events.py --- a/lib-python/3/asyncio/proactor_events.py +++ b/lib-python/3/asyncio/proactor_events.py @@ -90,7 +90,7 @@ self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -440,15 +440,7 @@ return self._proactor.send(sock, data) def sock_connect(self, sock, address): - try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut = futures.Future(loop=self) - fut.set_exception(err) - return fut - else: - return self._proactor.connect(sock, address) + return self._proactor.connect(sock, address) def sock_accept(self, sock): return self._proactor.accept(sock) diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py --- a/lib-python/3/asyncio/queues.py +++ b/lib-python/3/asyncio/queues.py @@ -128,7 +128,7 @@ This method is a coroutine. """ while self.full(): - putter = futures.Future(loop=self._loop) + putter = self._loop.create_future() self._putters.append(putter) try: yield from putter @@ -162,7 +162,7 @@ This method is a coroutine. """ while self.empty(): - getter = futures.Future(loop=self._loop) + getter = self._loop.create_future() self._getters.append(getter) try: yield from getter diff --git a/lib-python/3/asyncio/selector_events.py b/lib-python/3/asyncio/selector_events.py --- a/lib-python/3/asyncio/selector_events.py +++ b/lib-python/3/asyncio/selector_events.py @@ -196,7 +196,7 @@ transport = None try: protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if sslcontext: transport = self._make_ssl_transport( conn, protocol, sslcontext, waiter=waiter, @@ -314,7 +314,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_recv(fut, False, sock, n) return fut @@ -352,7 +352,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() if data: self._sock_sendall(fut, False, sock, data) else: @@ -385,25 +385,28 @@ def sock_connect(self, sock, address): """Connect to a remote socket at address. - The address must be already resolved to avoid the trap of hanging the - entire event loop when the address requires doing a DNS lookup. For - example, it must be an IP address, not an hostname, for AF_INET and - AF_INET6 address families. Use getaddrinfo() to resolve the hostname - asynchronously. - This method is a coroutine. """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + + fut = self.create_future() + if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX: + self._sock_connect(fut, sock, address) + else: + resolved = base_events._ensure_resolved(address, loop=self) + resolved.add_done_callback( + lambda resolved: self._on_resolved(fut, sock, resolved)) + + return fut + + def _on_resolved(self, fut, sock, resolved): try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut.set_exception(err) + _, _, _, _, address = resolved.result()[0] + except Exception as exc: + fut.set_exception(exc) else: self._sock_connect(fut, sock, address) - return fut def _sock_connect(self, fut, sock, address): fd = sock.fileno() @@ -454,7 +457,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_accept(fut, False, sock) return fut @@ -566,6 +569,7 @@ self._loop.remove_reader(self._sock_fd) if not self._buffer: self._conn_lost += 1 + self._loop.remove_writer(self._sock_fd) self._loop.call_soon(self._call_connection_lost, None) # On Python 3.3 and older, objects with a destructor part of a reference @@ -579,8 +583,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, - ConnectionResetError, ConnectionAbortedError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -660,6 +663,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): @@ -683,8 +688,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if not data: @@ -719,6 +724,8 @@ def _write_ready(self): assert self._buffer, 'Data should not be empty' + if self._conn_lost: + return try: n = self._sock.send(self._buffer) except (BlockingIOError, InterruptedError): @@ -889,6 +896,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return if self._write_wants_read: self._write_wants_read = False self._write_ready() @@ -921,6 +930,8 @@ self.close() def _write_ready(self): + if self._conn_lost: + return if self._read_wants_write: self._read_wants_write = False self._read_ready() @@ -955,8 +966,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return @@ -998,6 +1009,8 @@ return sum(len(data) for data, _ in self._buffer) def _read_ready(self): + if self._conn_lost: + return try: data, addr = self._sock.recvfrom(self.max_size) except (BlockingIOError, InterruptedError): @@ -1011,8 +1024,8 @@ def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py --- a/lib-python/3/asyncio/sslproto.py +++ b/lib-python/3/asyncio/sslproto.py @@ -603,7 +603,7 @@ self._wakeup_waiter() self._session_established = True # In case transport.write() was already called. Don't call - # immediatly _process_write_backlog(), but schedule it: + # immediately _process_write_backlog(), but schedule it: # _on_handshake_complete() can be called indirectly from # _process_write_backlog(), and _process_write_backlog() is not # reentrant. @@ -655,7 +655,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py --- a/lib-python/3/asyncio/streams.py +++ b/lib-python/3/asyncio/streams.py @@ -3,6 +3,7 @@ __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', 'open_connection', 'start_server', 'IncompleteReadError', + 'LimitOverrunError', ] import socket @@ -13,13 +14,12 @@ from . import coroutines from . import compat from . import events -from . import futures from . import protocols from .coroutines import coroutine from .log import logger -_DEFAULT_LIMIT = 2**16 +_DEFAULT_LIMIT = 2 ** 16 class IncompleteReadError(EOFError): @@ -27,15 +27,26 @@ Incomplete read error. Attributes: - partial: read bytes string before the end of stream was reached - - expected: total number of expected bytes + - expected: total number of expected bytes (or None if unknown) """ def __init__(self, partial, expected): - EOFError.__init__(self, "%s bytes read on a total of %s expected bytes" - % (len(partial), expected)) + super().__init__("%d bytes read on a total of %r expected bytes" + % (len(partial), expected)) self.partial = partial self.expected = expected +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + @coroutine def open_connection(host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -118,7 +129,6 @@ writer = StreamWriter(transport, protocol, reader, loop) return reader, writer - @coroutine def start_unix_server(client_connected_cb, path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -196,7 +206,7 @@ return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._drain_waiter = waiter yield from waiter @@ -215,9 +225,11 @@ self._stream_reader = stream_reader self._stream_writer = None self._client_connected_cb = client_connected_cb + self._over_ssl = False def connection_made(self, transport): self._stream_reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: self._stream_writer = StreamWriter(transport, self, self._stream_reader, @@ -228,17 +240,25 @@ self._loop.create_task(res) def connection_lost(self, exc): - if exc is None: - self._stream_reader.feed_eof() - else: - self._stream_reader.set_exception(exc) + if self._stream_reader is not None: + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) super().connection_lost(exc) + self._stream_reader = None + self._stream_writer = None def data_received(self, data): self._stream_reader.feed_data(data) def eof_received(self): self._stream_reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False return True @@ -318,6 +338,10 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None): # The line length limit is a security feature; # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + self._limit = limit if loop is None: self._loop = events.get_event_loop() @@ -361,7 +385,7 @@ waiter.set_exception(exc) def _wakeup_waiter(self): - """Wakeup read() or readline() function waiting for data or EOF.""" + """Wakeup read*() functions waiting for data or EOF.""" waiter = self._waiter if waiter is not None: self._waiter = None @@ -395,8 +419,8 @@ self._wakeup_waiter() if (self._transport is not None and - not self._paused and - len(self._buffer) > 2*self._limit): + not self._paused and + len(self._buffer) > 2 * self._limit): try: self._transport.pause_reading() except NotImplementedError: @@ -409,7 +433,10 @@ @coroutine def _wait_for_data(self, func_name): - """Wait until feed_data() or feed_eof() is called.""" + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know @@ -418,7 +445,14 @@ raise RuntimeError('%s() called while another coroutine is ' 'already waiting for incoming data' % func_name) - self._waiter = futures.Future(loop=self._loop) + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() try: yield from self._waiter finally: @@ -426,43 +460,154 @@ @coroutine def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = yield from self.readuntil(sep) + except IncompleteReadError as e: + return e.partial + except LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + @coroutine + def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + if self._exception is not None: raise self._exception - line = bytearray() - not_enough = True + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) - while not_enough: - while self._buffer and not_enough: - ichar = self._buffer.find(b'\n') - if ichar < 0: - line.extend(self._buffer) - self._buffer.clear() - else: - ichar += 1 - line.extend(self._buffer[:ichar]) - del self._buffer[:ichar] - not_enough = False + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 - if len(line) > self._limit: - self._maybe_resume_transport() - raise ValueError('Line is too long') + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. if self._eof: - break + chunk = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(chunk, None) - if not_enough: - yield from self._wait_for_data('readline') + # _wait_for_data() will resume reading if stream was paused. + yield from self._wait_for_data('readuntil') + if isep > self._limit: + raise LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] self._maybe_resume_transport() - return bytes(line) + return bytes(chunk) @coroutine def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediatelly. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if self._exception is not None: raise self._exception - if not n: + if n == 0: return b'' if n < 0: @@ -477,26 +622,42 @@ break blocks.append(block) return b''.join(blocks) - else: - if not self._buffer and not self._eof: - yield from self._wait_for_data('read') - if n < 0 or len(self._buffer) <= n: - data = bytes(self._buffer) - self._buffer.clear() - else: - # n > 0 and len(self._buffer) > n - data = bytes(self._buffer[:n]) - del self._buffer[:n] + if not self._buffer and not self._eof: + yield from self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] self._maybe_resume_transport() return data @coroutine def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + if self._exception is not None: raise self._exception + if n == 0: + return b'' + # There used to be "optimized" code here. It created its own # Future and waited until self._buffer had at least the n # bytes, then called read(n). Unfortunately, this could pause @@ -513,6 +674,8 @@ blocks.append(block) n -= len(block) + assert n == 0 + return b''.join(blocks) if compat.PY35: @@ -526,3 +689,9 @@ if val == b'': raise StopAsyncIteration return val + + if compat.PY352: + # In Python 3.5.2 and greater, __aiter__ should return + # the asynchronous iterator directly. + def __aiter__(self): + return self diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -166,7 +166,7 @@ @coroutine def communicate(self, input=None): - if input: + if input is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -251,7 +251,13 @@ else: if isinstance(result, futures.Future): # Yielded Future must come from Future.__iter__(). - if result._blocking: + if result._loop is not self._loop: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task {!r} got Future {!r} attached to a ' + 'different loop'.format(self, result))) + elif result._blocking: result._blocking = False result.add_done_callback(self._wakeup) self._fut_waiter = result @@ -366,7 +372,7 @@ if timeout is None: return (yield from fut) - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) @@ -394,12 +400,12 @@ @coroutine def _wait(fs, timeout, return_when, loop): - """Internal helper for wait() and _wait_for(). + """Internal helper for wait() and wait_for(). The fs argument must be a collection of Futures. """ assert fs, 'Set of Futures is empty.' - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) @@ -500,7 +506,9 @@ yield return result - future = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + future = loop.create_future() h = future._loop.call_later(delay, futures._set_result_unless_cancelled, future, result) @@ -597,7 +605,9 @@ be cancelled.) """ if not coros_or_futures: - outer = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + outer = loop.create_future() outer.set_result([]) return outer @@ -685,7 +695,7 @@ # Shortcut. return inner loop = inner._loop - outer = futures.Future(loop=loop) + outer = loop.create_future() def _done_callback(inner): if outer.cancelled(): diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -446,9 +446,14 @@ finally: logger.setLevel(old_level) -def mock_nonblocking_socket(): + +def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, + family=socket.AF_INET): """Create a mock of a non-blocking socket.""" - sock = mock.Mock(socket.socket) + sock = mock.MagicMock(socket.socket) + sock.proto = proto + sock.type = type + sock.family = family sock.gettimeout.return_value = 0.0 return sock diff --git a/lib-python/3/asyncio/unix_events.py b/lib-python/3/asyncio/unix_events.py --- a/lib-python/3/asyncio/unix_events.py +++ b/lib-python/3/asyncio/unix_events.py @@ -177,7 +177,7 @@ stdin, stdout, stderr, bufsize, extra=None, **kwargs): with events.get_child_watcher() as watcher: - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _UnixSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -329,14 +329,17 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_READ) if polling: info.append('polling') else: info.append('idle') + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -453,9 +456,10 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_WRITE) if polling: info.append('polling') @@ -464,6 +468,8 @@ bufsize = self.get_write_buffer_size() info.append('bufsize=%s' % bufsize) + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -575,7 +581,7 @@ def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/windows_events.py b/lib-python/3/asyncio/windows_events.py --- a/lib-python/3/asyncio/windows_events.py +++ b/lib-python/3/asyncio/windows_events.py @@ -197,7 +197,7 @@ # # If the IocpProactor already received the event, it's safe to call # _unregister() because we kept a reference to the Overlapped object - # which is used as an unique key. + # which is used as a unique key. self._proactor._unregister(self._ov) self._proactor = None @@ -366,7 +366,7 @@ def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _WindowsSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -417,7 +417,7 @@ return tmp def _result(self, value): - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() fut.set_result(value) return fut diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -12,7 +12,7 @@ __all__ = [ - # Legacy interface exports traditional RFC 1521 Base64 encodings + # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', @@ -49,14 +49,11 @@ # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): - """Encode a byte string using Base64. + """Encode the bytes-like object s using Base64 and return a bytes object. - s is the byte string to encode. Optional altchars must be a byte - string of length 2 which specifies an alternative alphabet for the - '+' and '/' characters. This allows an application to - e.g. generate url or filesystem safe Base64 strings. - - The encoded byte string is returned. + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. """ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] @@ -67,18 +64,19 @@ def b64decode(s, altchars=None, validate=False): - """Decode a Base64 encoded byte string. + """Decode the Base64 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional altchars must be a - string of length 2 which specifies the alternative alphabet used - instead of the '+' and '/' characters. + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. - The decoded string is returned. A binascii.Error is raised if s is - incorrectly padded. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. - If validate is False (the default), non-base64-alphabet characters are - discarded prior to the padding check. If validate is True, - non-base64-alphabet characters in the input result in a binascii.Error. + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. """ s = _bytes_from_decode_data(s) if altchars is not None: @@ -91,19 +89,19 @@ def standard_b64encode(s): - """Encode a byte string using the standard Base64 alphabet. + """Encode bytes-like object s using the standard Base64 alphabet. - s is the byte string to encode. The encoded byte string is returned. + The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes encoded with the standard Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. """ return b64decode(s) @@ -112,21 +110,22 @@ _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): - """Encode a byte string using a url-safe Base64 alphabet. + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to encode. The encoded byte string is - returned. The alphabet uses '-' instead of '+' and '_' instead of + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ @@ -142,9 +141,7 @@ _b32rev = None def b32encode(s): - """Encode a byte string using Base32. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base32 and return a bytes object. """ global _b32tab2 # Delay the initialization of the table to not waste memory @@ -182,11 +179,10 @@ return bytes(encoded) def b32decode(s, casefold=False, map01=None): - """Decode a Base32 encoded byte string. + """Decode the Base32 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to @@ -196,7 +192,7 @@ the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. - The decoded byte string is returned. binascii.Error is raised if + The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. """ @@ -257,23 +253,20 @@ # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): - """Encode a byte string using Base16. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): - """Decode a Base16 encoded byte string. + """Decode the Base16 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. - The decoded byte string is returned. binascii.Error is raised if - s were incorrectly padded or if there are non-alphabet characters - present in the string. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. """ s = _bytes_from_decode_data(s) if casefold: @@ -316,19 +309,17 @@ return b''.join(chunks) def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): - """Encode a byte string using Ascii85. - - b is the byte string to encode. The encoded byte string is returned. + """Encode bytes-like object b using Ascii85 and return a bytes object. foldspaces is an optional flag that uses the special short sequence 'y' instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This feature is not supported by the "standard" Adobe encoding. - wrapcol controls whether the output should have newline ('\\n') characters + wrapcol controls whether the output should have newline (b'\\n') characters added to it. If this is non-zero, each output line will be at most this many characters long. - pad controls whether the input string is padded to a multiple of 4 before + pad controls whether the input is padded to a multiple of 4 before encoding. Note that the btoa implementation always pads. adobe controls whether the encoded byte sequence is framed with <~ and ~>, @@ -359,9 +350,7 @@ return result def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): - """Decode an Ascii85 encoded byte string. - - s is the byte string to decode. + """Decode the Ascii85 encoded bytes-like object or ASCII string b. foldspaces is a flag that specifies whether the 'y' short sequence should be accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is @@ -373,13 +362,20 @@ ignorechars should be a byte string containing characters to ignore from the input. This should only contain whitespace characters, and by default contains all whitespace characters in ASCII. + + The result is returned as a bytes object. """ b = _bytes_from_decode_data(b) if adobe: - if not (b.startswith(_A85START) and b.endswith(_A85END)): - raise ValueError("Ascii85 encoded byte sequences must be bracketed " - "by {!r} and {!r}".format(_A85START, _A85END)) - b = b[2:-2] # Strip off start/end markers + if not b.endswith(_A85END): + raise ValueError( + "Ascii85 encoded byte sequences must end " + "with {!r}".format(_A85END) + ) + if b.startswith(_A85START): + b = b[2:-2] # Strip off start/end markers + else: + b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences @@ -432,10 +428,10 @@ _b85dec = None def b85encode(b, pad=False): - """Encode an ASCII-encoded byte array in base85 format. + """Encode bytes-like object b in base85 format and return a bytes object. - If pad is true, the input is padded with "\\0" so its length is a multiple of - 4 characters before encoding. + If pad is true, the input is padded with b'\\0' so its length is a multiple of + 4 bytes before encoding. """ global _b85chars, _b85chars2 # Delay the initialization of tables to not waste memory @@ -446,7 +442,10 @@ return _85encode(b, _b85chars, _b85chars2, pad) def b85decode(b): - """Decode base85-encoded byte array""" + """Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + """ global _b85dec # Delay the initialization of tables to not waste memory # if the function is never called @@ -531,7 +530,7 @@ def encodebytes(s): - """Encode a bytestring into a bytestring containing multiple lines + """Encode a bytestring into a bytes object containing multiple lines of base-64 data.""" _input_type_check(s) pieces = [] @@ -549,7 +548,7 @@ def decodebytes(s): - """Decode a bytestring of base-64 data into a bytestring.""" + """Decode a bytestring of base-64 data into a bytes object.""" _input_type_check(s) return binascii.a2b_base64(s) From pypy.commits at gmail.com Sun Dec 4 04:21:53 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 01:21:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: Add eintr_retry=False systematically (the goal would be to remove Message-ID: <5843e031.d5091c0a.a6dc6.eb55@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88871:63df1181a419 Date: 2016-12-04 10:19 +0100 http://bitbucket.org/pypy/pypy/changeset/63df1181a419/ Log: Add eintr_retry=False systematically (the goal would be to remove the default value of the argument, to be sure we're complete) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -251,7 +251,7 @@ try: pos = os.lseek(fd, position, how) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(pos) @@ -262,7 +262,7 @@ try: res = os.isatty(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(res) @@ -300,7 +300,7 @@ try: os.close(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(fd_low=c_int, fd_high=c_int) def closerange(fd_low, fd_high): @@ -487,7 +487,7 @@ raise oefmt(space.w_NotImplementedError, "%s: unsupported argument combination", funcname) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) else: return build_stat_result(space, st) @@ -546,7 +546,7 @@ rposix_stat.statvfs, allow_fd_fn=rposix_stat.fstatvfs)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return build_statvfs_result(space, st) @@ -558,17 +558,19 @@ try: newfd = rposix.dup(fd, inheritable=False) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(newfd) @unwrap_spec(fd=c_int, fd2=c_int, inheritable=bool) def dup2(space, fd, fd2, inheritable=1): """Duplicate a file descriptor.""" + # like os.close(), this can still raise EINTR to app-level in + # CPython 3.5.2 try: rposix.dup2(fd, fd2, inheritable) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=bool, @@ -613,7 +615,7 @@ else: ok = dispatch_filename(rposix.access)(space, w_path, mode) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return space.wrap(ok) @@ -627,7 +629,7 @@ try: times = os.times() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.newtuple([space.wrap(times[0]), space.wrap(times[1]), @@ -641,7 +643,7 @@ try: rc = os.system(command) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(rc) @@ -662,7 +664,7 @@ else: dispatch_filename(rposix.unlink)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def remove(space, w_path, __kwonly__, dir_fd=DEFAULT_DIR_FD): @@ -681,7 +683,7 @@ else: dispatch_filename(rposix.unlink)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) def _getfullpathname(space, w_path): """helper for ntpath.abspath """ @@ -695,7 +697,7 @@ fullpath = rposix.getfullpathname(path) w_fullpath = space.newbytes(fullpath) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return w_fullpath @@ -704,7 +706,7 @@ try: cur = os.getcwd() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.newbytes(cur) @@ -714,7 +716,7 @@ try: cur = os.getcwdu() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(cur) else: @@ -731,7 +733,7 @@ else: dispatch_filename(rposix.chdir)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) def mkdir(space, w_path, mode=0o777, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): @@ -752,7 +754,7 @@ else: dispatch_filename(rposix.mkdir)(space, w_path, mode) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def rmdir(space, w_path, __kwonly__, dir_fd=DEFAULT_DIR_FD): @@ -771,7 +773,7 @@ else: dispatch_filename(rposix.rmdir)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(code=c_int) def strerror(space, code): @@ -786,7 +788,7 @@ try: cur = os.getlogin() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap_fsdecoded(cur) # ____________________________________________________________ @@ -839,7 +841,7 @@ try: rwin32._wputenv(name, value) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: def _convertenviron(space, w_env): for key, value in os.environ.items(): @@ -850,7 +852,7 @@ try: dispatch_filename_2(rposix.putenv)(space, w_name, w_value) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def unsetenv(space, w_name): """Delete an environment variable.""" @@ -859,7 +861,7 @@ except KeyError: pass except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def listdir(space, w_path=None): @@ -882,7 +884,7 @@ try: result = rposix.listdir(dirname) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.newlist_bytes(result) try: path = space.fsencode_w(w_path) @@ -896,13 +898,13 @@ try: result = rposix.fdlistdir(os.dup(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: dirname = FileEncoder(space, w_path) try: result = rposix.listdir(dirname) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) len_result = len(result) result_w = [None] * len_result for i in range(len_result): @@ -917,14 +919,14 @@ try: return space.wrap(rposix.get_inheritable(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(fd=c_int, inheritable=int) def set_inheritable(space, fd, inheritable): try: rposix.set_inheritable(fd, inheritable) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) _pipe_inhcache = rposix.SetNonInheritableCache() @@ -933,14 +935,14 @@ try: fd1, fd2 = rposix.pipe(rposix.O_CLOEXEC or 0) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) try: _pipe_inhcache.set_non_inheritable(fd1) _pipe_inhcache.set_non_inheritable(fd2) except OSError as e: rposix.c_close(fd2) rposix.c_close(fd1) - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) @unwrap_spec(flags=c_int) @@ -948,7 +950,7 @@ try: fd1, fd2 = rposix.pipe2(flags) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), @@ -978,7 +980,7 @@ dispatch_filename(rposix.chmod)(space, w_path, mode) return except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) try: path = space.fsencode_w(w_path) @@ -987,7 +989,12 @@ raise oefmt(space.w_TypeError, "argument should be string, bytes or integer, not %T", w_path) fd = unwrap_fd(space, w_path) - _chmod_fd(space, fd, mode) + # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR + # to app-level, but os.fchmod(fd) retries automatically + try: + os.fchmod(fd, mode) + except OSError as e: + raise wrap_oserror(space, e, eintr_retry=False) else: try: _chmod_path(path, mode, dir_fd, follow_symlinks) @@ -996,7 +1003,7 @@ # fchmodat() doesn't actually implement follow_symlinks=False # so raise NotImplementedError in this case raise argument_unavailable(space, "chmod", "follow_symlinks") - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) def _chmod_path(path, mode, dir_fd, follow_symlinks): if dir_fd != DEFAULT_DIR_FD or not follow_symlinks: @@ -1004,7 +1011,13 @@ else: rposix.chmod(path, mode) -def _chmod_fd(space, fd, mode): + at unwrap_spec(fd=c_int, mode=c_int) +def fchmod(space, fd, mode): + """\ + Change the access permissions of the file given by file descriptor fd. + """ + # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR + # to app-level, but os.fchmod(fd) retries automatically while True: try: os.fchmod(fd, mode) @@ -1012,14 +1025,6 @@ except OSError as e: wrap_oserror(space, e, eintr_retry=True) - - at unwrap_spec(fd=c_int, mode=c_int) -def fchmod(space, fd, mode): - """\ - Change the access permissions of the file given by file descriptor fd. - """ - _chmod_fd(space, fd, mode) - @unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) def rename(space, w_src, w_dst, __kwonly__, @@ -1042,7 +1047,8 @@ else: dispatch_filename_2(rposix.rename)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) @@ -1066,7 +1072,8 @@ else: dispatch_filename_2(rposix.replace)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT)) def mkfifo(space, w_path, mode=0666, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): @@ -1078,6 +1085,8 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" + # CPython 3.5.2: why does os.mkfifo() retry automatically if it + # gets EINTR, but not os.mkdir()? while True: try: if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: @@ -1127,7 +1136,7 @@ try: pid = os.getpid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pid) @unwrap_spec(pid=c_int, signal=c_int) @@ -1136,7 +1145,7 @@ try: rposix.kill(pid, signal) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(pgid=c_int, signal=c_int) def killpg(space, pgid, signal): @@ -1144,7 +1153,7 @@ try: os.killpg(pgid, signal) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def abort(space): """Abort the interpreter immediately. This 'dumps core' or otherwise fails @@ -1182,7 +1191,8 @@ else: rposix.link(src, dst) except OSError as e: - raise wrap_oserror(space, e, filename=src, filename2=dst) + raise wrap_oserror(space, e, filename=src, filename2=dst, + eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_SYMLINKAT)) @@ -1209,7 +1219,8 @@ else: dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec( @@ -1230,7 +1241,7 @@ else: result = call_rposix(rposix.readlink, path) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) w_result = space.newbytes(result) if space.isinstance_w(path.w_path, space.w_unicode): return space.fsdecode(w_result) @@ -1278,7 +1289,7 @@ except: # Don't clobber the OSError if the fork failed pass - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) if pid == 0: run_fork_hooks('child', space) else: @@ -1296,7 +1307,7 @@ rposix.set_inheritable(master_fd, False) rposix.set_inheritable(slave_fd, False) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): @@ -1347,7 +1358,7 @@ try: os.execv(command, args) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def _env2interp(space, w_env): @@ -1392,12 +1403,12 @@ try: rposix.fexecve(fd, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: try: os.execve(path, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(mode=int, path='fsencode') def spawnv(space, mode, path, w_argv): @@ -1405,7 +1416,7 @@ try: ret = os.spawnv(mode, path, args) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(ret) @unwrap_spec(mode=int, path='fsencode') @@ -1415,7 +1426,7 @@ try: ret = os.spawnve(mode, path, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(ret) @@ -1523,7 +1534,7 @@ # something is wrong with the file, when it also # could be the time stamp that gives a problem. */ # so we use wrap_oserror() instead of wrap_oserror2() here - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @specialize.arg(1) def do_utimes(space, func, arg, utime): @@ -1540,7 +1551,7 @@ func(arg, (atime, mtime)) except OSError as e: # see comment above: don't use wrap_oserror2() - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @specialize.argtype(1) def _dispatch_utime(path, times): @@ -1583,7 +1594,7 @@ try: r = os.uname() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) l_w = [space.wrap_fsdecoded(i) for i in [r[0], r[1], r[2], r[3], r[4]]] w_tuple = space.newtuple(l_w) @@ -1607,7 +1618,7 @@ try: os.setuid(uid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(euid=c_uid_t) def seteuid(space, euid): @@ -1618,7 +1629,7 @@ try: os.seteuid(euid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(gid=c_gid_t) def setgid(space, gid): @@ -1629,7 +1640,7 @@ try: os.setgid(gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(egid=c_gid_t) def setegid(space, egid): @@ -1640,7 +1651,7 @@ try: os.setegid(egid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(path='fsencode') def chroot(space, path): @@ -1651,7 +1662,7 @@ try: os.chroot(path) except OSError as e: - raise wrap_oserror(space, e, path) + raise wrap_oserror(space, e, path, eintr_retry=False) return space.w_None def getgid(space): @@ -1683,7 +1694,7 @@ try: list = os.getgroups() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newlist([wrap_gid(space, e) for e in list]) def setgroups(space, w_groups): @@ -1697,7 +1708,7 @@ try: os.setgroups(list[:]) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(username=str, gid=c_gid_t) def initgroups(space, username, gid): @@ -1710,7 +1721,7 @@ try: os.initgroups(username, gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def getpgrp(space): """ getpgrp() -> pgrp @@ -1727,7 +1738,7 @@ try: os.setpgrp() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None def getppid(space): @@ -1746,7 +1757,7 @@ try: pgid = os.getpgid(pid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pgid) @unwrap_spec(pid=c_int, pgrp=c_int) @@ -1758,7 +1769,7 @@ try: os.setpgid(pid, pgrp) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None @unwrap_spec(ruid=c_uid_t, euid=c_uid_t) @@ -1770,7 +1781,7 @@ try: os.setreuid(ruid, euid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(rgid=c_gid_t, egid=c_gid_t) def setregid(space, rgid, egid): @@ -1781,7 +1792,7 @@ try: os.setregid(rgid, egid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(pid=c_int) def getsid(space, pid): @@ -1792,7 +1803,7 @@ try: sid = os.getsid(pid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(sid) def setsid(space): @@ -1803,7 +1814,7 @@ try: os.setsid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None @unwrap_spec(fd=c_int) @@ -1815,7 +1826,7 @@ try: pgid = os.tcgetpgrp(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pgid) @unwrap_spec(fd=c_int, pgid=c_gid_t) @@ -1827,7 +1838,7 @@ try: os.tcsetpgrp(fd, pgid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def getresuid(space): """ getresuid() -> (ruid, euid, suid) @@ -1837,7 +1848,7 @@ try: (ruid, euid, suid) = os.getresuid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([wrap_uid(space, ruid), wrap_uid(space, euid), wrap_uid(space, suid)]) @@ -1850,7 +1861,7 @@ try: (rgid, egid, sgid) = os.getresgid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([wrap_gid(space, rgid), wrap_gid(space, egid), wrap_gid(space, sgid)]) @@ -1864,7 +1875,7 @@ try: os.setresuid(ruid, euid, suid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) def setresgid(space, rgid, egid, sgid): @@ -1875,7 +1886,7 @@ try: os.setresgid(rgid, egid, sgid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def declare_new_w_star(name): if name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'): @@ -1901,7 +1912,7 @@ try: return space.wrap_fsdecoded(os.ttyname(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def confname_w(space, w_name, namespace): @@ -1920,7 +1931,7 @@ try: res = os.sysconf(num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(fd=c_int) @@ -1929,7 +1940,7 @@ try: res = os.fpathconf(fd, num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(path=path_or_fd(allow_fd=hasattr(os, 'fpathconf'))) @@ -1939,12 +1950,12 @@ try: res = os.fpathconf(path.as_fd, num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: try: res = os.pathconf(path.as_bytes, num) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) return space.wrap(res) def confstr(space, w_name): @@ -1952,7 +1963,7 @@ try: res = os.confstr(num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec( @@ -1996,7 +2007,7 @@ try: os.fchown(fd, uid, gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: # String case try: @@ -2011,7 +2022,7 @@ assert dir_fd == DEFAULT_DIR_FD os.chown(path, uid, gid) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) @@ -2024,7 +2035,7 @@ try: os.lchown(path, uid, gid) except OSError as e: - raise wrap_oserror(space, e, path) + raise wrap_oserror(space, e, path, eintr_retry=False) @unwrap_spec(uid=c_uid_t, gid=c_gid_t) def fchown(space, w_fd, uid, gid): @@ -2032,6 +2043,7 @@ Change the owner and group id of the file given by file descriptor fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" + # same comment than about os.chmod(fd) vs. os.fchmod(fd) fd = space.c_filedescriptor_w(w_fd) while True: try: @@ -2071,7 +2083,7 @@ try: res = os.nice(increment) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(size=int) @@ -2084,7 +2096,9 @@ try: return space.newbytes(rurandom.urandom(context, size)) except OSError as e: - raise wrap_oserror(space, e) + # 'rurandom' should catch and retry internally if it gets EINTR + # (at least in os.read(), which is probably enough in practice) + raise wrap_oserror(space, e, eintr_retry=False) def ctermid(space): """ctermid() -> string @@ -2122,7 +2136,7 @@ try: info = nt._getfileinformation(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(info[0]), space.wrap(info[1]), space.wrap(info[2])]) @@ -2135,7 +2149,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap(e.msg)) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.wrap(result) @@ -2267,7 +2281,7 @@ try: flags = rposix.get_status_flags(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newbool(flags & rposix.O_NONBLOCK == 0) @unwrap_spec(fd=c_int, blocking=int) @@ -2280,4 +2294,4 @@ flags |= rposix.O_NONBLOCK rposix.set_status_flags(fd, flags) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) diff --git a/pypy/module/posix/interp_scandir.py b/pypy/module/posix/interp_scandir.py --- a/pypy/module/posix/interp_scandir.py +++ b/pypy/module/posix/interp_scandir.py @@ -28,7 +28,7 @@ try: dirp = rposix_scandir.opendir(path_bytes) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) path_prefix = path_bytes if len(path_prefix) > 0 and path_prefix[-1] != '/': path_prefix += '/' @@ -85,7 +85,8 @@ try: entry = rposix_scandir.nextentry(self.dirp) except OSError as e: - raise self.fail(wrap_oserror2(space, e, self.w_path_prefix)) + raise self.fail(wrap_oserror2(space, e, self.w_path_prefix, + eintr_retry=False)) if not entry: raise self.fail() assert rposix_scandir.has_name_bytes(entry) @@ -235,7 +236,8 @@ except OSError as e: if e.errno == ENOENT: # not found return -1 - raise wrap_oserror2(self.space, e, self.fget_path(self.space)) + raise wrap_oserror2(self.space, e, self.fget_path(self.space), + eintr_retry=False) return stat.S_IFMT(st.st_mode) def is_dir(self, follow_symlinks): @@ -287,7 +289,8 @@ try: st = self.get_stat_or_lstat(follow_symlinks) except OSError as e: - raise wrap_oserror2(space, e, self.fget_path(space)) + raise wrap_oserror2(space, e, self.fget_path(space), + eintr_retry=False) return build_stat_result(space, st) def descr_inode(self, space): From pypy.commits at gmail.com Sun Dec 4 04:38:52 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 01:38:52 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: ah bah Message-ID: <5843e42c.0209c20a.75a7.e2dd@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88872:4899480d8be8 Date: 2016-12-04 10:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4899480d8be8/ Log: ah bah diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -622,7 +622,8 @@ return wrap_oserror2(space, e, w_filename, exception_name=exception_name, w_exception_class=w_exception_class, - w_filename2=w_filename2) + w_filename2=w_filename2, + eintr_retry=eintr_retry) def exception_from_saved_errno(space, w_type): from rpython.rlib.rposix import get_saved_errno From pypy.commits at gmail.com Sun Dec 4 05:47:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 02:47:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: first try at the _io module Message-ID: <5843f438.0a22c20a.a3d0.fd4f@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88874:c9e018812658 Date: 2016-12-04 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/c9e018812658/ Log: first try at the _io module diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -175,46 +175,48 @@ "Cannot use closefd=False with file name") from pypy.module.posix.interp_posix import dispatch_filename - try: - self.fd = dispatch_filename(rposix.open)( - space, w_name, flags, 0666) - except OSError as e: - raise wrap_oserror2(space, e, w_name, - exception_name='w_IOError') - finally: - fd_is_own = True + while True: + try: + self.fd = dispatch_filename(rposix.open)( + space, w_name, flags, 0666) + fd_is_own = True + break + except OSError as e: + wrap_oserror2(space, e, w_name, + exception_name='w_IOError', + eintr_retry=True) if not rposix._WIN32: try: _open_inhcache.set_non_inheritable(self.fd) except OSError as e: - raise wrap_oserror2(space, e, w_name) + raise wrap_oserror2(space, e, w_name, eintr_retry=False) else: w_fd = space.call_function(w_opener, w_name, space.wrap(flags)) try: self.fd = space.int_w(w_fd) + fd_is_own = True except OperationError as e: if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, "expected integer from opener") - finally: - fd_is_own = True if not rposix._WIN32: try: rposix.set_inheritable(self.fd, False) except OSError as e: - raise wrap_oserror2(space, e, w_name) + raise wrap_oserror2(space, e, w_name, eintr_retry=False) try: st = os.fstat(self.fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) # On Unix, fopen will succeed for directories. # In Python, there should be no file objects referring to # directories, so we need a check. if stat.S_ISDIR(st.st_mode): raise wrap_oserror2(space, OSError(errno.EISDIR, "fstat"), - w_name, exception_name='w_IOError') + w_name, exception_name='w_IOError', + eintr_retry=False) self.blksize = DEFAULT_BUFFER_SIZE if HAS_BLKSIZE and st.st_blksize > 1: self.blksize = st.st_blksize @@ -227,7 +229,8 @@ try: os.lseek(self.fd, 0, os.SEEK_END) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) except: if not fd_is_own: self.fd = -1 @@ -285,7 +288,8 @@ os.close(fd) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) def close_w(self, space): try: @@ -319,7 +323,8 @@ pos = os.lseek(self.fd, pos, whence) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) return space.wrap(pos) def tell_w(self, space): @@ -328,7 +333,8 @@ pos = os.lseek(self.fd, 0, 1) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) return space.wrap(pos) def readable_w(self, space): @@ -361,7 +367,8 @@ try: res = os.isatty(self.fd) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) return space.wrap(res) def repr_w(self, space): @@ -387,13 +394,16 @@ self._check_writable(space) data = space.getarg_w('y*', w_data).as_str() - try: - n = os.write(self.fd, data) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + n = os.write(self.fd, data) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) return space.wrap(n) @@ -405,13 +415,16 @@ if size < 0: return self.readall_w(space) - try: - s = os.read(self.fd, size) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + s = os.read(self.fd, size) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) return space.newbytes(s) @@ -420,13 +433,16 @@ self._check_readable(space) rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() - try: - buf = os.read(self.fd, length) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + buf = os.read(self.fd, length) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) rwbuffer.setslice(0, buf) return space.wrap(len(buf)) @@ -442,17 +458,12 @@ try: chunk = os.read(self.fd, newsize - total) except OSError as e: - if e.errno == errno.EINTR: - space.getexecutioncontext().checksignals() - continue - if total > 0: - # return what we've got so far - break if e.errno == errno.EAGAIN: + if total > 0: + break # return what we've got so far return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') - + wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=True) if not chunk: break builder.append(chunk) @@ -476,7 +487,8 @@ try: self._truncate(space.r_longlong_w(w_size)) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) return w_size From pypy.commits at gmail.com Sun Dec 4 05:47:18 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 02:47:18 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: close fd's in case of error Message-ID: <5843f436.ce841c0a.94bbc.0fd6@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88873:de02c97af766 Date: 2016-12-04 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/de02c97af766/ Log: close fd's in case of error diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1302,11 +1302,16 @@ def openpty(space): "Open a pseudo-terminal, returning open fd's for both master and slave end." + master_fd = slave_fd = -1 try: master_fd, slave_fd = os.openpty() rposix.set_inheritable(master_fd, False) rposix.set_inheritable(slave_fd, False) except OSError as e: + if master_fd >= 0: + rposix.c_close(master_fd) + if slave_fd >= 0: + rposix.c_close(slave_fd) raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) From pypy.commits at gmail.com Sun Dec 4 05:47:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 02:47:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: fix Message-ID: <5843f43d.42061c0a.4ee49.0eca@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88876:a5c104036e0e Date: 2016-12-04 11:46 +0100 http://bitbucket.org/pypy/pypy/changeset/a5c104036e0e/ Log: fix diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -464,6 +464,7 @@ return space.w_None wrap_oserror(space, e, exception_name='w_IOError', eintr_retry=True) + continue if not chunk: break builder.append(chunk) From pypy.commits at gmail.com Sun Dec 4 05:47:23 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 02:47:23 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: hg merge py3.5 Message-ID: <5843f43b.4438c20a.2b668.f946@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88875:1613a0094a88 Date: 2016-12-04 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/1613a0094a88/ Log: hg merge py3.5 diff too long, truncating to 2000 out of 28117 lines diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -156,7 +156,7 @@ __slots__ = () @abstractmethod - async def __aiter__(self): + def __aiter__(self): return AsyncIterator() @classmethod @@ -176,7 +176,7 @@ """Return the next item or raise StopAsyncIteration when exhausted.""" raise StopAsyncIteration - async def __aiter__(self): + def __aiter__(self): return self @classmethod diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -177,6 +177,13 @@ 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', }) REVERSE_IMPORT_MAPPING.update({ diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -151,13 +151,13 @@ # can only be found inside Xcode.app if the "Command Line Tools" # are not installed. # - # Futhermore, the compiler that can be used varies between + # Furthermore, the compiler that can be used varies between # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -193,7 +193,7 @@ if cc != oldcc: # Found a replacement compiler. # Modify config vars using new compiler, if not already explicitly - # overriden by an env variable, preserving additional arguments. + # overridden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() @@ -207,7 +207,7 @@ """Remove all universal build arguments from config vars""" for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) @@ -228,7 +228,7 @@ # build extensions on OSX 10.7 and later with the prebuilt # 32-bit installer on the python.org website. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -244,7 +244,7 @@ # across Xcode and compiler versions, there is no reliable way # to be sure why it failed. Assume here it was due to lack of # PPC support and remove the related '-arch' flags from each - # config variables not explicitly overriden by an environment + # config variables not explicitly overridden by an environment # variable. If the error was for some other reason, we hope the # failure will show up again when trying to compile an extension # module. @@ -292,7 +292,7 @@ sdk = m.group(1) if not os.path.exists(sdk): for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py --- a/lib-python/3/_pydecimal.py +++ b/lib-python/3/_pydecimal.py @@ -252,7 +252,7 @@ class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. - This occurs and signals invalid-operation if an string is being + This occurs and signals invalid-operation if a string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ @@ -1102,7 +1102,7 @@ def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. - Rounds the number (if more then precision digits) + Rounds the number (if more than precision digits) """ if self._is_special: ans = self._check_nans(context=context) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -296,8 +296,9 @@ called. The basic type used for binary data read from or written to a file is - bytes. bytearrays are accepted too, and in some cases (such as - readinto) needed. Text I/O classes work with str data. + bytes. Other bytes-like objects are accepted as method arguments too. In + some cases (such as readinto), a writable object is required. Text I/O + classes work with str data. Note that calling any method (even inquiries) on a closed stream is undefined. Implementations may raise OSError in this case. @@ -390,7 +391,7 @@ def seekable(self): """Return a bool indicating whether object supports random access. - If False, seek(), tell() and truncate() will raise UnsupportedOperation. + If False, seek(), tell() and truncate() will raise OSError. This method may need to do a test seek(). """ return False @@ -405,7 +406,7 @@ def readable(self): """Return a bool indicating whether object was opened for reading. - If False, read() will raise UnsupportedOperation. + If False, read() will raise OSError. """ return False @@ -419,7 +420,7 @@ def writable(self): """Return a bool indicating whether object was opened for writing. - If False, write() and truncate() will raise UnsupportedOperation. + If False, write() and truncate() will raise OSError. """ return False @@ -439,7 +440,7 @@ return self.__closed def _checkClosed(self, msg=None): - """Internal: raise an ValueError if file is closed + """Internal: raise a ValueError if file is closed """ if self.closed: raise ValueError("I/O operation on closed file." @@ -596,7 +597,7 @@ return data def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Returns an int representing the number of bytes read (0 for EOF), or None if the object is set not to block and has no data to read. @@ -606,7 +607,8 @@ def write(self, b): """Write the given buffer to the IO stream. - Returns the number of bytes written, which may be less than len(b). + Returns the number of bytes written, which may be less than the + length of b in bytes. """ self._unsupported("write") @@ -659,7 +661,7 @@ self._unsupported("read1") def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Like read(), this may issue multiple reads to the underlying raw stream, unless the latter is 'interactive'. @@ -673,7 +675,7 @@ return self._readinto(b, read1=False) def readinto1(self, b): - """Read up to len(b) bytes into *b*, using at most one system call + """Read bytes into buffer *b*, using at most one system call Returns an int representing the number of bytes read (0 for EOF). @@ -701,8 +703,8 @@ def write(self, b): """Write the given bytes buffer to the IO stream. - Return the number of bytes written, which is never less than - len(b). + Return the number of bytes written, which is always the length of b + in bytes. Raises BlockingIOError if the buffer is full and the underlying raw stream cannot accept more data at the moment. @@ -787,12 +789,6 @@ def seekable(self): return self.raw.seekable() - def readable(self): - return self.raw.readable() - - def writable(self): - return self.raw.writable() - @property def raw(self): return self._raw @@ -890,7 +886,8 @@ raise ValueError("write to closed file") if isinstance(b, str): raise TypeError("can't write str to binary stream") - n = len(b) + with memoryview(b) as view: + n = view.nbytes # Size of any bytes-like object if n == 0: return 0 pos = self._pos @@ -982,6 +979,9 @@ self._reset_read_buf() self._read_lock = Lock() + def readable(self): + return self.raw.readable() + def _reset_read_buf(self): self._read_buf = b"" self._read_pos = 0 @@ -1043,7 +1043,7 @@ break avail += len(chunk) chunks.append(chunk) - # n is more then avail only when an EOF occurred or when + # n is more than avail only when an EOF occurred or when # read() would have blocked. n = min(n, avail) out = b"".join(chunks) @@ -1093,14 +1093,13 @@ def _readinto(self, buf, read1): """Read data into *buf* with at most one system call.""" - if len(buf) == 0: - return 0 - # Need to create a memoryview object of type 'b', otherwise # we may not be able to assign bytes to it, and slicing it # would create a new object. if not isinstance(buf, memoryview): buf = memoryview(buf) + if buf.nbytes == 0: + return 0 buf = buf.cast('B') written = 0 @@ -1170,6 +1169,9 @@ self._write_buf = bytearray() self._write_lock = Lock() + def writable(self): + return self.raw.writable() + def write(self, b): if self.closed: raise ValueError("write to closed file") diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -307,13 +311,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: @@ -456,6 +462,10 @@ week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) + if julian <= 0: + year -= 1 + yday = 366 if calendar.isleap(year) else 365 + julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py --- a/lib-python/3/asyncio/base_events.py +++ b/lib-python/3/asyncio/base_events.py @@ -52,6 +52,12 @@ # before cleanup of cancelled handles is performed. _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 +# Exceptions which must not call the exception handler in fatal error +# methods (_fatal_error()) +_FATAL_ERROR_IGNORE = (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError) + + def _format_handle(handle): cb = handle._callback if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): @@ -70,49 +76,89 @@ return repr(fd) -def _check_resolved_address(sock, address): - # Ensure that the address is already resolved to avoid the trap of hanging - # the entire event loop when the address requires doing a DNS lookup. - # - # getaddrinfo() is slow (around 10 us per call): this function should only - # be called in debug mode - family = sock.family +# Linux's sock.type is a bitmask that can include extra info about socket. +_SOCKET_TYPE_MASK = 0 +if hasattr(socket, 'SOCK_NONBLOCK'): + _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK +if hasattr(socket, 'SOCK_CLOEXEC'): + _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC - if family == socket.AF_INET: - host, port = address - elif family == socket.AF_INET6: - host, port = address[:2] - else: + +def _ipaddr_info(host, port, family, type, proto): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): return - # On Windows, socket.inet_pton() is only available since Python 3.4 - if hasattr(socket, 'inet_pton'): - # getaddrinfo() is slow and has known issue: prefer inet_pton() - # if available + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + type &= ~_SOCKET_TYPE_MASK + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes): + if port == b'': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like b"http". + port = socket.getservbyname(port.decode('ascii')) + elif isinstance(port, str): + if port == '': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like "http". + port = socket.getservbyname(port) + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET, socket.AF_INET6] + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: try: - socket.inet_pton(family, host) - except OSError as exc: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, exc)) + socket.inet_pton(af, host) + # The host has already been resolved. + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, + flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto) + if info is not None: + # "host" is already a resolved IP. + fut = loop.create_future() + fut.set_result([info]) + return fut else: - # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is - # already resolved. - type_mask = 0 - if hasattr(socket, 'SOCK_NONBLOCK'): - type_mask |= socket.SOCK_NONBLOCK - if hasattr(socket, 'SOCK_CLOEXEC'): - type_mask |= socket.SOCK_CLOEXEC - try: - socket.getaddrinfo(host, port, - family=family, - type=(sock.type & ~type_mask), - proto=sock.proto, - flags=socket.AI_NUMERICHOST) - except socket.gaierror as err: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, err)) + return loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) def _run_until_complete_cb(fut): @@ -167,7 +213,7 @@ def wait_closed(self): if self.sockets is None or self._waiters is None: return - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._waiters.append(waiter) yield from waiter @@ -201,6 +247,10 @@ % (self.__class__.__name__, self.is_running(), self.is_closed(), self.get_debug())) + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + def create_task(self, coro): """Schedule a coroutine object. @@ -494,7 +544,7 @@ assert not args assert not isinstance(func, events.TimerHandle) if func._cancelled: - f = futures.Future(loop=self) + f = self.create_future() f.set_result(None) return f func, args = func._callback, func._args @@ -584,14 +634,14 @@ raise ValueError( 'host/port and sock can not be specified at the same time') - f1 = self.getaddrinfo( - host, port, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f1 = _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs = [f1] if local_addr is not None: - f2 = self.getaddrinfo( - *local_addr, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f2 = _ensure_resolved(local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs.append(f2) else: f2 = None @@ -673,7 +723,7 @@ def _create_connection_transport(self, sock, protocol_factory, ssl, server_hostname): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( @@ -726,9 +776,9 @@ assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) + infos = yield from _ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') @@ -793,7 +843,7 @@ raise exceptions[0] protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_datagram_transport( sock, protocol, r_addr, waiter) if self._debug: @@ -816,9 +866,9 @@ @coroutine def _create_server_getaddrinfo(self, host, port, family, flags): - infos = yield from self.getaddrinfo(host, port, family=family, + infos = yield from _ensure_resolved((host, port), family=family, type=socket.SOCK_STREAM, - flags=flags) + flags=flags, loop=self) if not infos: raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) return infos @@ -839,7 +889,10 @@ to host and port. The host parameter can also be a sequence of strings and in that case - the TCP server is bound to all hosts of the sequence. + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. Return a Server object which can be used to stop the service. @@ -868,7 +921,7 @@ flags=flags) for host in hosts] infos = yield from tasks.gather(*fs, loop=self) - infos = itertools.chain.from_iterable(infos) + infos = set(itertools.chain.from_iterable(infos)) completed = False try: @@ -929,7 +982,7 @@ @coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_read_pipe_transport(pipe, protocol, waiter) try: @@ -946,7 +999,7 @@ @coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_write_pipe_transport(pipe, protocol, waiter) try: @@ -1028,6 +1081,11 @@ logger.info('%s: %r' % (debug_log, transport)) return transport, protocol + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py --- a/lib-python/3/asyncio/base_subprocess.py +++ b/lib-python/3/asyncio/base_subprocess.py @@ -210,6 +210,10 @@ logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() @@ -227,7 +231,7 @@ if self._returncode is not None: return self._returncode - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._exit_waiters.append(waiter) return (yield from waiter) diff --git a/lib-python/3/asyncio/compat.py b/lib-python/3/asyncio/compat.py --- a/lib-python/3/asyncio/compat.py +++ b/lib-python/3/asyncio/compat.py @@ -4,6 +4,7 @@ PY34 = sys.version_info >= (3, 4) PY35 = sys.version_info >= (3, 5) +PY352 = sys.version_info >= (3, 5, 2) def flatten_list_bytes(list_of_data): diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py --- a/lib-python/3/asyncio/coroutines.py +++ b/lib-python/3/asyncio/coroutines.py @@ -27,8 +27,8 @@ # before you define your coroutines. A downside of using this feature # is that tracebacks show entries for the CoroWrapper.__next__ method # when _DEBUG is true. -_DEBUG = (not sys.flags.ignore_environment - and bool(os.environ.get('PYTHONASYNCIODEBUG'))) +_DEBUG = (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) try: @@ -86,7 +86,7 @@ def __init__(self, gen, func=None): assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen self.gen = gen - self.func = func # Used to unwrap @coroutine decorator + self.func = func # Used to unwrap @coroutine decorator self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.__name__ = getattr(gen, '__name__', None) self.__qualname__ = getattr(gen, '__qualname__', None) @@ -204,7 +204,8 @@ @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) - if isinstance(res, futures.Future) or inspect.isgenerator(res): + if isinstance(res, futures.Future) or inspect.isgenerator(res) or \ + isinstance(res, CoroWrapper): res = yield from res elif _AwaitableABC is not None: # If 'func' returns an Awaitable (new in 3.5) we @@ -283,10 +284,13 @@ coro_frame = coro.cr_frame filename = coro_code.co_filename - if (isinstance(coro, CoroWrapper) - and not inspect.isgeneratorfunction(coro.func) - and coro.func is not None): - filename, lineno = events._get_function_source(coro.func) + lineno = 0 + if (isinstance(coro, CoroWrapper) and + not inspect.isgeneratorfunction(coro.func) and + coro.func is not None): + source = events._get_function_source(coro.func) + if source is not None: + filename, lineno = source if coro_frame is None: coro_repr = ('%s done, defined at %s:%s' % (coro_name, filename, lineno)) diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -266,6 +266,9 @@ def time(self): raise NotImplementedError + def create_future(self): + raise NotImplementedError + # Method scheduling a coroutine object: create a task. def create_task(self, coro): @@ -484,6 +487,9 @@ # Error handlers. + def get_exception_handler(self): + raise NotImplementedError + def set_exception_handler(self, handler): raise NotImplementedError diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py --- a/lib-python/3/asyncio/futures.py +++ b/lib-python/3/asyncio/futures.py @@ -142,7 +142,7 @@ def __init__(self, *, loop=None): """Initialize the future. - The optional event_loop argument allows to explicitly set the event + The optional event_loop argument allows explicitly setting the event loop object used by the future. If it's not provided, the future uses the default event loop. """ @@ -341,6 +341,9 @@ raise InvalidStateError('{}: {!r}'.format(self._state, self)) if isinstance(exception, type): exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") self._exception = exception self._state = _FINISHED self._schedule_callbacks() @@ -448,6 +451,8 @@ return future assert isinstance(future, concurrent.futures.Future), \ 'concurrent.futures.Future is expected, got {!r}'.format(future) - new_future = Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() _chain_future(future, new_future) return new_future diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py --- a/lib-python/3/asyncio/locks.py +++ b/lib-python/3/asyncio/locks.py @@ -111,7 +111,7 @@ acquire() is a coroutine and should be called with 'yield from'. Locks also support the context management protocol. '(yield from lock)' - should be used as context manager expression. + should be used as the context manager expression. Usage: @@ -170,7 +170,7 @@ self._locked = True return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -258,7 +258,7 @@ if self._value: return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -320,7 +320,7 @@ self.release() try: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -329,7 +329,13 @@ self._waiters.remove(fut) finally: - yield from self.acquire() + # Must reacquire lock even if wait is cancelled + while True: + try: + yield from self.acquire() + break + except futures.CancelledError: + pass @coroutine def wait_for(self, predicate): @@ -433,7 +439,7 @@ True. """ while self._value <= 0: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut diff --git a/lib-python/3/asyncio/proactor_events.py b/lib-python/3/asyncio/proactor_events.py --- a/lib-python/3/asyncio/proactor_events.py +++ b/lib-python/3/asyncio/proactor_events.py @@ -90,7 +90,7 @@ self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -440,15 +440,7 @@ return self._proactor.send(sock, data) def sock_connect(self, sock, address): - try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut = futures.Future(loop=self) - fut.set_exception(err) - return fut - else: - return self._proactor.connect(sock, address) + return self._proactor.connect(sock, address) def sock_accept(self, sock): return self._proactor.accept(sock) diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py --- a/lib-python/3/asyncio/queues.py +++ b/lib-python/3/asyncio/queues.py @@ -128,7 +128,7 @@ This method is a coroutine. """ while self.full(): - putter = futures.Future(loop=self._loop) + putter = self._loop.create_future() self._putters.append(putter) try: yield from putter @@ -162,7 +162,7 @@ This method is a coroutine. """ while self.empty(): - getter = futures.Future(loop=self._loop) + getter = self._loop.create_future() self._getters.append(getter) try: yield from getter diff --git a/lib-python/3/asyncio/selector_events.py b/lib-python/3/asyncio/selector_events.py --- a/lib-python/3/asyncio/selector_events.py +++ b/lib-python/3/asyncio/selector_events.py @@ -196,7 +196,7 @@ transport = None try: protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if sslcontext: transport = self._make_ssl_transport( conn, protocol, sslcontext, waiter=waiter, @@ -314,7 +314,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_recv(fut, False, sock, n) return fut @@ -352,7 +352,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() if data: self._sock_sendall(fut, False, sock, data) else: @@ -385,25 +385,28 @@ def sock_connect(self, sock, address): """Connect to a remote socket at address. - The address must be already resolved to avoid the trap of hanging the - entire event loop when the address requires doing a DNS lookup. For - example, it must be an IP address, not an hostname, for AF_INET and - AF_INET6 address families. Use getaddrinfo() to resolve the hostname - asynchronously. - This method is a coroutine. """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + + fut = self.create_future() + if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX: + self._sock_connect(fut, sock, address) + else: + resolved = base_events._ensure_resolved(address, loop=self) + resolved.add_done_callback( + lambda resolved: self._on_resolved(fut, sock, resolved)) + + return fut + + def _on_resolved(self, fut, sock, resolved): try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut.set_exception(err) + _, _, _, _, address = resolved.result()[0] + except Exception as exc: + fut.set_exception(exc) else: self._sock_connect(fut, sock, address) - return fut def _sock_connect(self, fut, sock, address): fd = sock.fileno() @@ -454,7 +457,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_accept(fut, False, sock) return fut @@ -566,6 +569,7 @@ self._loop.remove_reader(self._sock_fd) if not self._buffer: self._conn_lost += 1 + self._loop.remove_writer(self._sock_fd) self._loop.call_soon(self._call_connection_lost, None) # On Python 3.3 and older, objects with a destructor part of a reference @@ -579,8 +583,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, - ConnectionResetError, ConnectionAbortedError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -660,6 +663,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): @@ -683,8 +688,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if not data: @@ -719,6 +724,8 @@ def _write_ready(self): assert self._buffer, 'Data should not be empty' + if self._conn_lost: + return try: n = self._sock.send(self._buffer) except (BlockingIOError, InterruptedError): @@ -889,6 +896,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return if self._write_wants_read: self._write_wants_read = False self._write_ready() @@ -921,6 +930,8 @@ self.close() def _write_ready(self): + if self._conn_lost: + return if self._read_wants_write: self._read_wants_write = False self._read_ready() @@ -955,8 +966,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return @@ -998,6 +1009,8 @@ return sum(len(data) for data, _ in self._buffer) def _read_ready(self): + if self._conn_lost: + return try: data, addr = self._sock.recvfrom(self.max_size) except (BlockingIOError, InterruptedError): @@ -1011,8 +1024,8 @@ def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py --- a/lib-python/3/asyncio/sslproto.py +++ b/lib-python/3/asyncio/sslproto.py @@ -603,7 +603,7 @@ self._wakeup_waiter() self._session_established = True # In case transport.write() was already called. Don't call - # immediatly _process_write_backlog(), but schedule it: + # immediately _process_write_backlog(), but schedule it: # _on_handshake_complete() can be called indirectly from # _process_write_backlog(), and _process_write_backlog() is not # reentrant. @@ -655,7 +655,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py --- a/lib-python/3/asyncio/streams.py +++ b/lib-python/3/asyncio/streams.py @@ -3,6 +3,7 @@ __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', 'open_connection', 'start_server', 'IncompleteReadError', + 'LimitOverrunError', ] import socket @@ -13,13 +14,12 @@ from . import coroutines from . import compat from . import events -from . import futures from . import protocols from .coroutines import coroutine from .log import logger -_DEFAULT_LIMIT = 2**16 +_DEFAULT_LIMIT = 2 ** 16 class IncompleteReadError(EOFError): @@ -27,15 +27,26 @@ Incomplete read error. Attributes: - partial: read bytes string before the end of stream was reached - - expected: total number of expected bytes + - expected: total number of expected bytes (or None if unknown) """ def __init__(self, partial, expected): - EOFError.__init__(self, "%s bytes read on a total of %s expected bytes" - % (len(partial), expected)) + super().__init__("%d bytes read on a total of %r expected bytes" + % (len(partial), expected)) self.partial = partial self.expected = expected +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + @coroutine def open_connection(host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -118,7 +129,6 @@ writer = StreamWriter(transport, protocol, reader, loop) return reader, writer - @coroutine def start_unix_server(client_connected_cb, path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -196,7 +206,7 @@ return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._drain_waiter = waiter yield from waiter @@ -215,9 +225,11 @@ self._stream_reader = stream_reader self._stream_writer = None self._client_connected_cb = client_connected_cb + self._over_ssl = False def connection_made(self, transport): self._stream_reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: self._stream_writer = StreamWriter(transport, self, self._stream_reader, @@ -228,17 +240,25 @@ self._loop.create_task(res) def connection_lost(self, exc): - if exc is None: - self._stream_reader.feed_eof() - else: - self._stream_reader.set_exception(exc) + if self._stream_reader is not None: + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) super().connection_lost(exc) + self._stream_reader = None + self._stream_writer = None def data_received(self, data): self._stream_reader.feed_data(data) def eof_received(self): self._stream_reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False return True @@ -318,6 +338,10 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None): # The line length limit is a security feature; # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + self._limit = limit if loop is None: self._loop = events.get_event_loop() @@ -361,7 +385,7 @@ waiter.set_exception(exc) def _wakeup_waiter(self): - """Wakeup read() or readline() function waiting for data or EOF.""" + """Wakeup read*() functions waiting for data or EOF.""" waiter = self._waiter if waiter is not None: self._waiter = None @@ -395,8 +419,8 @@ self._wakeup_waiter() if (self._transport is not None and - not self._paused and - len(self._buffer) > 2*self._limit): + not self._paused and + len(self._buffer) > 2 * self._limit): try: self._transport.pause_reading() except NotImplementedError: @@ -409,7 +433,10 @@ @coroutine def _wait_for_data(self, func_name): - """Wait until feed_data() or feed_eof() is called.""" + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know @@ -418,7 +445,14 @@ raise RuntimeError('%s() called while another coroutine is ' 'already waiting for incoming data' % func_name) - self._waiter = futures.Future(loop=self._loop) + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() try: yield from self._waiter finally: @@ -426,43 +460,154 @@ @coroutine def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = yield from self.readuntil(sep) + except IncompleteReadError as e: + return e.partial + except LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + @coroutine + def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + if self._exception is not None: raise self._exception - line = bytearray() - not_enough = True + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) - while not_enough: - while self._buffer and not_enough: - ichar = self._buffer.find(b'\n') - if ichar < 0: - line.extend(self._buffer) - self._buffer.clear() - else: - ichar += 1 - line.extend(self._buffer[:ichar]) - del self._buffer[:ichar] - not_enough = False + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 - if len(line) > self._limit: - self._maybe_resume_transport() - raise ValueError('Line is too long') + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. if self._eof: - break + chunk = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(chunk, None) - if not_enough: - yield from self._wait_for_data('readline') + # _wait_for_data() will resume reading if stream was paused. + yield from self._wait_for_data('readuntil') + if isep > self._limit: + raise LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] self._maybe_resume_transport() - return bytes(line) + return bytes(chunk) @coroutine def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediatelly. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if self._exception is not None: raise self._exception - if not n: + if n == 0: return b'' if n < 0: @@ -477,26 +622,42 @@ break blocks.append(block) return b''.join(blocks) - else: - if not self._buffer and not self._eof: - yield from self._wait_for_data('read') - if n < 0 or len(self._buffer) <= n: - data = bytes(self._buffer) - self._buffer.clear() - else: - # n > 0 and len(self._buffer) > n - data = bytes(self._buffer[:n]) - del self._buffer[:n] + if not self._buffer and not self._eof: + yield from self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] self._maybe_resume_transport() return data @coroutine def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + if self._exception is not None: raise self._exception + if n == 0: + return b'' + # There used to be "optimized" code here. It created its own # Future and waited until self._buffer had at least the n # bytes, then called read(n). Unfortunately, this could pause @@ -513,6 +674,8 @@ blocks.append(block) n -= len(block) + assert n == 0 + return b''.join(blocks) if compat.PY35: @@ -526,3 +689,9 @@ if val == b'': raise StopAsyncIteration return val + + if compat.PY352: + # In Python 3.5.2 and greater, __aiter__ should return + # the asynchronous iterator directly. + def __aiter__(self): + return self diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -166,7 +166,7 @@ @coroutine def communicate(self, input=None): - if input: + if input is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -251,7 +251,13 @@ else: if isinstance(result, futures.Future): # Yielded Future must come from Future.__iter__(). - if result._blocking: + if result._loop is not self._loop: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task {!r} got Future {!r} attached to a ' + 'different loop'.format(self, result))) + elif result._blocking: result._blocking = False result.add_done_callback(self._wakeup) self._fut_waiter = result @@ -366,7 +372,7 @@ if timeout is None: return (yield from fut) - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) @@ -394,12 +400,12 @@ @coroutine def _wait(fs, timeout, return_when, loop): - """Internal helper for wait() and _wait_for(). + """Internal helper for wait() and wait_for(). The fs argument must be a collection of Futures. """ assert fs, 'Set of Futures is empty.' - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) @@ -500,7 +506,9 @@ yield return result - future = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + future = loop.create_future() h = future._loop.call_later(delay, futures._set_result_unless_cancelled, future, result) @@ -597,7 +605,9 @@ be cancelled.) """ if not coros_or_futures: - outer = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + outer = loop.create_future() outer.set_result([]) return outer @@ -685,7 +695,7 @@ # Shortcut. return inner loop = inner._loop - outer = futures.Future(loop=loop) + outer = loop.create_future() def _done_callback(inner): if outer.cancelled(): diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -446,9 +446,14 @@ finally: logger.setLevel(old_level) -def mock_nonblocking_socket(): + +def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, + family=socket.AF_INET): """Create a mock of a non-blocking socket.""" - sock = mock.Mock(socket.socket) + sock = mock.MagicMock(socket.socket) + sock.proto = proto + sock.type = type + sock.family = family sock.gettimeout.return_value = 0.0 return sock diff --git a/lib-python/3/asyncio/unix_events.py b/lib-python/3/asyncio/unix_events.py --- a/lib-python/3/asyncio/unix_events.py +++ b/lib-python/3/asyncio/unix_events.py @@ -177,7 +177,7 @@ stdin, stdout, stderr, bufsize, extra=None, **kwargs): with events.get_child_watcher() as watcher: - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _UnixSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -329,14 +329,17 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_READ) if polling: info.append('polling') else: info.append('idle') + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -453,9 +456,10 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_WRITE) if polling: info.append('polling') @@ -464,6 +468,8 @@ bufsize = self.get_write_buffer_size() info.append('bufsize=%s' % bufsize) + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -575,7 +581,7 @@ def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/windows_events.py b/lib-python/3/asyncio/windows_events.py --- a/lib-python/3/asyncio/windows_events.py +++ b/lib-python/3/asyncio/windows_events.py @@ -197,7 +197,7 @@ # # If the IocpProactor already received the event, it's safe to call # _unregister() because we kept a reference to the Overlapped object - # which is used as an unique key. + # which is used as a unique key. self._proactor._unregister(self._ov) self._proactor = None @@ -366,7 +366,7 @@ def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _WindowsSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -417,7 +417,7 @@ return tmp def _result(self, value): - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() fut.set_result(value) return fut diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -12,7 +12,7 @@ __all__ = [ - # Legacy interface exports traditional RFC 1521 Base64 encodings + # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', @@ -49,14 +49,11 @@ # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): - """Encode a byte string using Base64. + """Encode the bytes-like object s using Base64 and return a bytes object. - s is the byte string to encode. Optional altchars must be a byte - string of length 2 which specifies an alternative alphabet for the - '+' and '/' characters. This allows an application to - e.g. generate url or filesystem safe Base64 strings. - - The encoded byte string is returned. + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. """ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] @@ -67,18 +64,19 @@ def b64decode(s, altchars=None, validate=False): - """Decode a Base64 encoded byte string. + """Decode the Base64 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional altchars must be a - string of length 2 which specifies the alternative alphabet used - instead of the '+' and '/' characters. + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. - The decoded string is returned. A binascii.Error is raised if s is - incorrectly padded. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. - If validate is False (the default), non-base64-alphabet characters are - discarded prior to the padding check. If validate is True, - non-base64-alphabet characters in the input result in a binascii.Error. + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. """ s = _bytes_from_decode_data(s) if altchars is not None: @@ -91,19 +89,19 @@ def standard_b64encode(s): - """Encode a byte string using the standard Base64 alphabet. + """Encode bytes-like object s using the standard Base64 alphabet. - s is the byte string to encode. The encoded byte string is returned. + The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes encoded with the standard Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. """ return b64decode(s) @@ -112,21 +110,22 @@ _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): - """Encode a byte string using a url-safe Base64 alphabet. + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to encode. The encoded byte string is - returned. The alphabet uses '-' instead of '+' and '_' instead of + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ @@ -142,9 +141,7 @@ _b32rev = None def b32encode(s): - """Encode a byte string using Base32. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base32 and return a bytes object. """ global _b32tab2 # Delay the initialization of the table to not waste memory @@ -182,11 +179,10 @@ return bytes(encoded) def b32decode(s, casefold=False, map01=None): - """Decode a Base32 encoded byte string. + """Decode the Base32 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to @@ -196,7 +192,7 @@ the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. - The decoded byte string is returned. binascii.Error is raised if + The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. """ @@ -257,23 +253,20 @@ # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): - """Encode a byte string using Base16. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): - """Decode a Base16 encoded byte string. + """Decode the Base16 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. - The decoded byte string is returned. binascii.Error is raised if - s were incorrectly padded or if there are non-alphabet characters - present in the string. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. """ s = _bytes_from_decode_data(s) if casefold: @@ -316,19 +309,17 @@ return b''.join(chunks) def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): - """Encode a byte string using Ascii85. - - b is the byte string to encode. The encoded byte string is returned. + """Encode bytes-like object b using Ascii85 and return a bytes object. foldspaces is an optional flag that uses the special short sequence 'y' instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This feature is not supported by the "standard" Adobe encoding. - wrapcol controls whether the output should have newline ('\\n') characters + wrapcol controls whether the output should have newline (b'\\n') characters added to it. If this is non-zero, each output line will be at most this many characters long. - pad controls whether the input string is padded to a multiple of 4 before + pad controls whether the input is padded to a multiple of 4 before encoding. Note that the btoa implementation always pads. adobe controls whether the encoded byte sequence is framed with <~ and ~>, @@ -359,9 +350,7 @@ return result def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): - """Decode an Ascii85 encoded byte string. - - s is the byte string to decode. + """Decode the Ascii85 encoded bytes-like object or ASCII string b. foldspaces is a flag that specifies whether the 'y' short sequence should be accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is @@ -373,13 +362,20 @@ ignorechars should be a byte string containing characters to ignore from the input. This should only contain whitespace characters, and by default contains all whitespace characters in ASCII. + + The result is returned as a bytes object. """ b = _bytes_from_decode_data(b) if adobe: - if not (b.startswith(_A85START) and b.endswith(_A85END)): - raise ValueError("Ascii85 encoded byte sequences must be bracketed " - "by {!r} and {!r}".format(_A85START, _A85END)) - b = b[2:-2] # Strip off start/end markers + if not b.endswith(_A85END): + raise ValueError( + "Ascii85 encoded byte sequences must end " + "with {!r}".format(_A85END) + ) + if b.startswith(_A85START): + b = b[2:-2] # Strip off start/end markers + else: + b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences @@ -432,10 +428,10 @@ _b85dec = None def b85encode(b, pad=False): - """Encode an ASCII-encoded byte array in base85 format. + """Encode bytes-like object b in base85 format and return a bytes object. - If pad is true, the input is padded with "\\0" so its length is a multiple of - 4 characters before encoding. + If pad is true, the input is padded with b'\\0' so its length is a multiple of + 4 bytes before encoding. """ global _b85chars, _b85chars2 # Delay the initialization of tables to not waste memory @@ -446,7 +442,10 @@ return _85encode(b, _b85chars, _b85chars2, pad) def b85decode(b): - """Decode base85-encoded byte array""" + """Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + """ global _b85dec # Delay the initialization of tables to not waste memory # if the function is never called @@ -531,7 +530,7 @@ def encodebytes(s): - """Encode a bytestring into a bytestring containing multiple lines + """Encode a bytestring into a bytes object containing multiple lines of base-64 data.""" _input_type_check(s) pieces = [] @@ -549,7 +548,7 @@ def decodebytes(s): - """Decode a bytestring of base-64 data into a bytestring.""" + """Decode a bytestring of base-64 data into a bytes object.""" _input_type_check(s) return binascii.a2b_base64(s) From pypy.commits at gmail.com Sun Dec 4 05:59:19 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 02:59:19 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: translation fix Message-ID: <5843f707.6602c20a.9d1f2.00c3@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88877:dab6fcde4713 Date: 2016-12-04 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/dab6fcde4713/ Log: translation fix diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -543,6 +543,7 @@ _WINDOWS = True def wrap_windowserror(space, e, w_filename=None): + XXX # WindowsError no longer exists in Py3.5 from rpython.rlib import rwin32 winerror = e.winerror @@ -559,13 +560,13 @@ space.wrap(msg)) return OperationError(exc, w_error) - at specialize.arg(3) + at specialize.arg(3, 6) def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError', w_exception_class=None, w_filename2=None, eintr_retry=False): """A double API here: * if eintr_retry is False, always return the OperationError to - be raised by the caller, which might be about EINTR + be raised by the caller. It can possibly be about EINTR (checksignals() is still called here). * if eintr_retry is True (PEP 475 compliant API for retrying @@ -579,6 +580,21 @@ if _WINDOWS and isinstance(e, WindowsError): return wrap_windowserror(space, e, w_filename) + if w_exception_class is None: + w_exc = getattr(space, exception_name) + else: + w_exc = w_exception_class + operror = _wrap_oserror2_impl(space, e, w_filename, w_filename2, w_exc, + eintr_retry) + if eintr_retry: + assert operror is None # otherwise, _wrap_oserror2_impl() has raised + else: + assert operror is not None # tell the annotator we don't return None + return operror + +def _wrap_oserror2_impl(space, e, w_filename, w_filename2, w_exc, eintr_retry): + # move the common logic in its own function, instead of having it + # duplicated 4 times in all 4 specialized versions of wrap_oserror2() errno = e.errno if errno == EINTR: @@ -590,27 +606,24 @@ msg = strerror(errno) except ValueError: msg = u'error %d' % errno - if w_exception_class is None: - exc = getattr(space, exception_name) - else: - exc = w_exception_class if w_filename is not None: if w_filename2 is not None: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg), w_filename, space.w_None, w_filename2) else: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg), w_filename) else: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg)) - operr = OperationError(exc, w_error) + operror = OperationError(w_exc, w_error) if eintr_retry: - raise operr - return operr + raise operror + return operror +_wrap_oserror2_impl._dont_inline_ = True - at specialize.arg(3) + at specialize.arg(3, 6) def wrap_oserror(space, e, filename=None, exception_name='w_OSError', w_exception_class=None, filename2=None, eintr_retry=False): w_filename = None @@ -624,6 +637,7 @@ w_exception_class=w_exception_class, w_filename2=w_filename2, eintr_retry=eintr_retry) +wrap_oserror._dont_inline_ = True def exception_from_saved_errno(space, w_type): from rpython.rlib.rposix import get_saved_errno From pypy.commits at gmail.com Sun Dec 4 06:41:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 03:41:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: pep475ify time.sleep() Message-ID: <584400da.90121c0a.1a3af.b79c@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88878:b98fafbb5f1c Date: 2016-12-04 12:40 +0100 http://bitbucket.org/pypy/pypy/changeset/b98fafbb5f1c/ Log: pep475ify time.sleep() diff --git a/pypy/interpreter/timeutils.py b/pypy/interpreter/timeutils.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/timeutils.py @@ -0,0 +1,11 @@ +""" +Access to the time module's high-resolution monotonic clock +""" + +def monotonic(space): + from pypy.module.time import interp_time + if interp_time.HAS_MONOTONIC: + w_res = interp_time.monotonic(space) + else: + w_res = interp_time.gettimeofday(space) + return space.float_w(w_res) # xxx back and forth diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import rffi from pypy.interpreter.error import OperationError, oefmt, strerror as _strerror, exception_from_saved_errno from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import timeutils from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rarithmetic import intmask, r_ulonglong, r_longfloat, widen from rpython.rlib.rtime import (GETTIMEOFDAY_NO_TZ, TIMEVAL, @@ -438,12 +439,31 @@ return _strerror(errno) if sys.platform != 'win32': + from errno import EINTR + from rpython.rlib.rtime import c_select + @unwrap_spec(secs=float) def sleep(space, secs): if secs < 0: raise oefmt(space.w_ValueError, "sleep length must be non-negative") - rtime.sleep(secs) + end_time = timeutils.monotonic(space) + secs + while True: + void = lltype.nullptr(rffi.VOIDP.TO) + with lltype.scoped_alloc(TIMEVAL) as t: + frac = math.fmod(secs, 1.0) + rffi.setintfield(t, 'c_tv_sec', int(secs)) + rffi.setintfield(t, 'c_tv_usec', int(frac*1000000.0)) + + res = rffi.cast(rffi.LONG, c_select(0, void, void, void, t)) + if res == 0: + break # normal path + if rposix.get_saved_errno() != EINTR: + raise exception_from_saved_errno(space, space.w_OSError) + secs = end_time - timeutils.monotonic(space) # retry + if secs <= 0.0: + break + else: from rpython.rlib import rwin32 from errno import EINTR @@ -463,6 +483,7 @@ OSError(EINTR, "sleep() interrupted")) @unwrap_spec(secs=float) def sleep(space, secs): + XXX # review for EINTR / PEP475 if secs < 0: raise oefmt(space.w_ValueError, "sleep length must be non-negative") diff --git a/pypy/module/time/test/test_time.py b/pypy/module/time/test/test_time.py --- a/pypy/module/time/test/test_time.py +++ b/pypy/module/time/test/test_time.py @@ -1,6 +1,6 @@ class AppTestTime: spaceconfig = { - "usemodules": ['time', 'struct', 'binascii'], + "usemodules": ['time', 'struct', 'binascii', 'signal'], } def test_attributes(self): @@ -394,3 +394,23 @@ assert info.resolution > 0.0 assert info.resolution <= 1.0 assert isinstance(info.adjustable, bool) + + def test_pep475_retry_sleep(self): + import time + import _signal as signal + signalled = [] + + def foo(*args): + signalled.append("ALARM") + + signal.signal(signal.SIGALRM, foo) + try: + t1 = time.time() + signal.alarm(1) + time.sleep(3.0) + t2 = time.time() + finally: + signal.signal(signal.SIGALRM, signal.SIG_DFL) + + assert signalled != [] + assert t2 - t1 > 2.99 From pypy.commits at gmail.com Sun Dec 4 08:45:56 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 05:45:56 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <58441e14.61adc20a.f6b81.3b72@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r828:5a234313e695 Date: 2016-12-04 14:45 +0100 http://bitbucket.org/pypy/pypy.org/changeset/5a234313e695/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66367 of $105000 (63.2%) + $66386 of $105000 (63.2%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sun Dec 4 13:30:08 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 10:30:08 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: select.*() functions (the subset of the ones available on Linux) Message-ID: <584460b0.a285c20a.2931c.9354@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88879:98fd8574f000 Date: 2016-12-04 19:29 +0100 http://bitbucket.org/pypy/pypy/changeset/98fd8574f000/ Log: select.*() functions (the subset of the ones available on Linux) diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter import timeutils from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.rlib._rsocket_rffi import socketclose, FD_SETSIZE @@ -156,9 +157,11 @@ def descr_poll(self, space, timeout=-1.0, maxevents=-1): self.check_closed(space) if timeout < 0: - timeout = -1.0 + end_time = 0.0 + itimeout = -1 else: - timeout *= 1000.0 + end_time = timeutils.monotonic(space) + timeout + itimeout = int(timeout * 1000.0 + 0.999) if maxevents == -1: maxevents = FD_SETSIZE - 1 @@ -167,9 +170,18 @@ "maxevents must be greater than 0, not %d", maxevents) with lltype.scoped_alloc(rffi.CArray(epoll_event), maxevents) as evs: - nfds = epoll_wait(self.epfd, evs, maxevents, int(timeout)) - if nfds < 0: - raise exception_from_saved_errno(space, space.w_IOError) + while True: + nfds = epoll_wait(self.epfd, evs, maxevents, itimeout) + if nfds < 0: + if get_saved_errno() == errno.EINTR: + space.getexecutioncontext().checksignals() + if itimeout >= 0: + timeout = end_time - timeutils.monotonic(space) + timeout = max(timeout, 0.0) + itimeout = int(timeout * 1000.0 + 0.999) + continue + raise exception_from_saved_errno(space, space.w_IOError) + break elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -180,6 +180,7 @@ raise oefmt(space.w_ValueError, "Timeout must be None or >= 0, got %s", str(_timeout)) + XXX # fix test_select_signal.py first, for PEP475! sec = int(_timeout) nsec = int(1e9 * (_timeout - sec)) rffi.setintfield(timeout, 'c_tv_sec', sec) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -10,6 +10,7 @@ from pypy.interpreter.gateway import ( Unwrapper, WrappedDefault, interp2app, unwrap_spec) from pypy.interpreter.typedef import TypeDef +from pypy.interpreter import timeutils defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -49,8 +50,10 @@ @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): + """WARNING: the timeout parameter is in **milliseconds**!""" if space.is_w(w_timeout, space.w_None): timeout = -1 + end_time = 0 else: # we want to be compatible with cpython and also accept things # that can be casted to integer (I think) @@ -61,19 +64,29 @@ raise oefmt(space.w_TypeError, "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) + end_time = timeutils.monotonic(space) + timeout * 0.001 if self.running: raise oefmt(space.w_RuntimeError, "concurrent poll() invocation") - self.running = True - try: - retval = rpoll.poll(self.fddict, timeout) - except rpoll.PollError as e: - message = e.get_msg() - raise OperationError(space.w_OSError, - space.newtuple([space.wrap(e.errno), - space.wrap(message)])) - finally: - self.running = False + while True: + self.running = True + try: + retval = rpoll.poll(self.fddict, timeout) + except rpoll.PollError as e: + if e.errno == errno.EINTR: + space.getexecutioncontext().checksignals() + timeout = int((end_time - timeutils.monotonic(space)) + * 1000.0 + 0.999) # round up + if timeout < 0: + timeout = 0 + continue + message = e.get_msg() + raise OperationError(space.w_OSError, + space.newtuple([space.wrap(e.errno), + space.wrap(message)])) + finally: + self.running = False + break retval_w = [] for fd, revents in retval: @@ -112,7 +125,7 @@ def _call_select(space, iwtd_w, owtd_w, ewtd_w, - ll_inl, ll_outl, ll_errl, ll_timeval): + ll_inl, ll_outl, ll_errl, ll_timeval, timeout): fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: @@ -122,13 +135,32 @@ if ll_errl: fdlisterr, nfds = _build_fd_set(space, ewtd_w, ll_errl, nfds) - res = _c.select(nfds + 1, ll_inl, ll_outl, ll_errl, ll_timeval) + if ll_timeval: + end_time = timeutils.monotonic(space) + timeout + else: + end_time = 0.0 - if res < 0: - errno = _c.geterrno() - msg = _c.socket_strerror_str(errno) - raise OperationError(space.w_OSError, space.newtuple([ - space.wrap(errno), space.wrap(msg)])) + while True: + if ll_timeval: + i = int(timeout) + rffi.setintfield(ll_timeval, 'c_tv_sec', i) + rffi.setintfield(ll_timeval, 'c_tv_usec', int((timeout-i)*1000000)) + + res = _c.select(nfds + 1, ll_inl, ll_outl, ll_errl, ll_timeval) + + if res >= 0: + break # normal path + err = _c.geterrno() + if err != errno.EINTR: + msg = _c.socket_strerror_str(err) + raise OperationError(space.w_OSError, space.newtuple([ + space.wrap(err), space.wrap(msg)])) + # got EINTR, automatic retry + space.getexecutioncontext().checksignals() + if timeout > 0.0: + timeout = end_time - timeutils.monotonic(space) + if timeout < 0.0: + timeout = 0.0 resin_w = [] resout_w = [] @@ -193,15 +225,12 @@ ll_errl = lltype.malloc(_c.fd_set.TO, flavor='raw') if timeout >= 0.0: ll_timeval = rffi.make(_c.timeval) - i = int(timeout) - rffi.setintfield(ll_timeval, 'c_tv_sec', i) - rffi.setintfield(ll_timeval, 'c_tv_usec', int((timeout-i)*1000000)) # Call this as a separate helper to avoid a large piece of code # in try:finally:. Needed for calling further _always_inline_ # helpers like _build_fd_set(). return _call_select(space, iwtd_w, owtd_w, ewtd_w, - ll_inl, ll_outl, ll_errl, ll_timeval) + ll_inl, ll_outl, ll_errl, ll_timeval, timeout) finally: if ll_timeval: lltype.free(ll_timeval, flavor='raw') diff --git a/pypy/module/select/test/test_select_signal.py b/pypy/module/select/test/test_select_signal.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_select_signal.py @@ -0,0 +1,48 @@ + +class AppTestSelectSignal: + spaceconfig = { + "usemodules": ['select', 'time', 'signal'], + } + + def test_pep475_retry(self): + import select, time + import _signal as signal + + def foo(*args): + signalled.append("ALARM") + + # a list of functions that will do nothing more than sleep for 3 + # seconds + cases = [(select.select, [], [], [], 3.0)] + + if hasattr(select, 'poll'): + import posix + poll = select.poll() + cases.append((poll.poll, 3000)) # milliseconds + + if hasattr(select, 'epoll'): + epoll = select.epoll() + cases.append((epoll.poll, 3.0)) + + if hasattr(select, 'kqueue'): + kqueue = select.kqueue() + cases.append((kqueue.control, [], 1, 3.0)) + + if hasattr(select, 'devpoll'): + raise NotImplementedError("write this test if we have devpoll") + + for wait_for_three_seconds in cases: + print(wait_for_three_seconds[0]) + signalled = [] + signal.signal(signal.SIGALRM, foo) + try: + t1 = time.time() + signal.alarm(1) + wait_for_three_seconds[0](*wait_for_three_seconds[1:]) + t2 = time.time() + finally: + signal.signal(signal.SIGALRM, signal.SIG_DFL) + + print("result: signalled = %r in %s seconds" % (signalled, t2 - t1)) + assert signalled != [] + assert t2 - t1 > 2.99 From pypy.commits at gmail.com Sun Dec 4 14:15:11 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 11:15:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: pep475ify the socket objects Message-ID: <58446b3f.52301c0a.42568.ba92@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88880:dbc5c6b1041f Date: 2016-12-04 20:14 +0100 http://bitbucket.org/pypy/pypy/changeset/dbc5c6b1041f/ Log: pep475ify the socket objects diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,5 +1,6 @@ -import sys +import sys, errno from rpython.rlib import rsocket, rweaklist +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.rsocket import ( RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, @@ -227,12 +228,13 @@ representing the connection, and the address of the client. For IP sockets, the address info is a pair (hostaddr, port). """ - try: - fd, addr = self.sock.accept(inheritable=False) - return space.newtuple([space.wrap(fd), - addr_as_object(addr, fd, space)]) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + fd, addr = self.sock.accept(inheritable=False) + return space.newtuple([space.wrap(fd), + addr_as_object(addr, fd, space)]) + except SocketError as e: + converted_error(space, e, eintr_retry=True) # convert an Address into an app-level object def addr_as_object(self, space, address): @@ -274,10 +276,12 @@ Connect the socket to a remote address. For IP sockets, the address is a pair (host, port). """ - try: - self.sock.connect(self.addr_from_object(space, w_addr)) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + self.sock.connect(self.addr_from_object(space, w_addr)) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) def connect_ex_w(self, space, w_addr): """connect_ex(address) -> errno @@ -289,7 +293,11 @@ addr = self.addr_from_object(space, w_addr) except SocketError as e: raise converted_error(space, e) - error = self.sock.connect_ex(addr) + while True: + error = self.sock.connect_ex(addr) + if error != errno.EINTR: + break + space.getexecutioncontext().checksignals() return space.wrap(error) def fileno_w(self, space): @@ -384,10 +392,12 @@ at least one byte is available or until the remote end is closed. When the remote end is closed and all data is read, return the empty string. """ - try: - data = self.sock.recv(buffersize, flags) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + data = self.sock.recv(buffersize, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.newbytes(data) @unwrap_spec(buffersize='nonnegint', flags=int) @@ -396,15 +406,17 @@ Like recv(buffersize, flags) but also return the sender's address info. """ - try: - data, addr = self.sock.recvfrom(buffersize, flags) - if addr: - w_addr = addr_as_object(addr, self.sock.fd, space) - else: - w_addr = space.w_None - return space.newtuple([space.newbytes(data), w_addr]) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + data, addr = self.sock.recvfrom(buffersize, flags) + if addr: + w_addr = addr_as_object(addr, self.sock.fd, space) + else: + w_addr = space.w_None + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + return space.newtuple([space.newbytes(data), w_addr]) @unwrap_spec(data='bufferstr', flags=int) def send_w(self, space, data, flags=0): @@ -414,10 +426,12 @@ argument, see the Unix manual. Return the number of bytes sent; this may be less than len(data) if the network is busy. """ - try: - count = self.sock.send(data, flags) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + count = self.sock.send(data, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.wrap(count) @unwrap_spec(data='bufferstr', flags=int) @@ -450,11 +464,13 @@ # 3 args version flags = space.int_w(w_param2) w_addr = w_param3 - try: - addr = self.addr_from_object(space, w_addr) - count = self.sock.sendto(data, flags, addr) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + addr = self.addr_from_object(space, w_addr) + count = self.sock.sendto(data, flags, addr) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.wrap(count) @unwrap_spec(flag=int) @@ -520,10 +536,13 @@ lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt - try: - return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags)) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + nbytes_read = self.sock.recvinto(rwbuffer, nbytes, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + return space.wrap(nbytes_read) @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): @@ -538,15 +557,20 @@ elif nbytes > lgt: raise oefmt(space.w_ValueError, "nbytes is greater than the length of the buffer") - try: - readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) - if addr: + while True: + try: + readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + if addr: + try: w_addr = addr_as_object(addr, self.sock.fd, space) - else: - w_addr = space.w_None - return space.newtuple([space.wrap(readlgt), w_addr]) - except SocketError as e: - raise converted_error(space, e) + except SocketError as e: + raise converted_error(space, e) + else: + w_addr = space.w_None + return space.newtuple([space.wrap(readlgt), w_addr]) @unwrap_spec(cmd=int) def ioctl_w(self, space, cmd, w_option): @@ -690,15 +714,20 @@ def get_error(space, name): return space.fromcache(SocketAPI).get_exception(name) -def converted_error(space, e): + at specialize.arg(2) +def converted_error(space, e, eintr_retry=False): message = e.get_msg() w_exception_class = get_error(space, e.applevelerrcls) if isinstance(e, SocketErrorWithErrno): + if e.errno == errno.EINTR: + space.getexecutioncontext().checksignals() + if eintr_retry: + return # only return None if eintr_retry==True w_exception = space.call_function(w_exception_class, space.wrap(e.errno), space.wrap(message)) else: w_exception = space.call_function(w_exception_class, space.wrap(message)) - return OperationError(w_exception_class, w_exception) + raise OperationError(w_exception_class, w_exception) # ____________________________________________________________ From pypy.commits at gmail.com Sun Dec 4 15:26:04 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Dec 2016 12:26:04 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: try to identify and convert a GetSetProperty to a W_GetSetPropertyEx Message-ID: <58447bdc.12ad1c0a.ddd21.cddc@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r88883:e012751a80df Date: 2016-12-04 22:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e012751a80df/ Log: try to identify and convert a GetSetProperty to a W_GetSetPropertyEx diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -1,4 +1,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.objectmodel import specialize +from pypy.interpreter.error import OperationError +from pypy.objspace.std.classdict import ClassDictStrategy +from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING, PyObjectFields, cpython_struct, @@ -7,8 +11,7 @@ make_typedescr, track_reference, create_ref, from_ref, Py_DecRef, Py_IncRef) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import specialize +from pypy.module.cpyext.typeobject import W_GetSetPropertyEx PyDictObjectStruct = lltype.ForwardReference() PyDictObject = lltype.Ptr(PyDictObjectStruct) @@ -258,6 +261,10 @@ return 0 w_key = space.listview(w_keys)[pos] w_value = space.getitem(w_dict, w_key) + if isinstance(w_value, GetSetProperty): + # XXX doesn't quite work, need to convert GetSetProperty + # to PyGetSetDef, with c_name, c_get, c_set, c_doc, c_closure + w_value = W_GetSetPropertyEx(w_value, w_dict.dstorage._x) if pkey: pkey[0] = as_pyobj(space, w_key) if pvalue: diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP +from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP, PyTypeObjectPtr from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -182,8 +182,8 @@ assert api.PyDictProxy_Check(w_proxy) def test_typedict(self, space, api): - py_type = make_ref(space, space.w_type) - py_dict = py_type.c_ob_type.c_tp_dict + py_type = make_ref(space, space.w_int) + py_dict = rffi.cast(PyTypeObjectPtr, py_type).c_tp_dict ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') ppos[0] = 0 From pypy.commits at gmail.com Sun Dec 4 15:26:00 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Dec 2016 12:26:00 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: move tests and add one for tp_hash Message-ID: <58447bd8.8675c20a.108e7.b8c9@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r88881:7fe5dcdd537f Date: 2016-11-26 21:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7fe5dcdd537f/ Log: move tests and add one for tp_hash diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -452,48 +452,6 @@ ref = make_ref(space, w_obj) api.Py_DecRef(ref) - def test_nb_add_from_python(self, space, api): - w_date = space.appexec([], """(): - class DateType(object): - def __add__(self, other): - return 'sum!' - return DateType() - """) - w_datetype = space.type(w_date) - py_date = make_ref(space, w_date) - py_datetype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_datetype)) - assert py_datetype.c_tp_as_number - assert py_datetype.c_tp_as_number.c_nb_add - w_obj = generic_cpy_call(space, py_datetype.c_tp_as_number.c_nb_add, - py_date, py_date) - assert space.str_w(w_obj) == 'sum!' - - def test_tp_new_from_python(self, space, api): - w_date = space.appexec([], """(): - class Date(object): - def __new__(cls, year, month, day): - self = object.__new__(cls) - self.year = year - self.month = month - self.day = day - return self - return Date - """) - py_datetype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_date)) - one = space.newint(1) - arg = space.newtuple([one, one, one]) - # call w_date.__new__ - w_obj = space.call_function(w_date, one, one, one) - w_year = space.getattr(w_obj, space.newbytes('year')) - assert space.int_w(w_year) == 1 - - # currently fails with "object() takse no parameters, - # from the tp_new of space.w_object - w_obj = generic_cpy_call(space, py_datetype.c_tp_new, py_datetype, - arg, space.newdict({})) - w_year = space.getattr(w_obj, space.newbytes('year')) - assert space.int_w(w_year) == 1 - class AppTestSlots(AppTestCpythonExtensionBase): def setup_class(cls): AppTestCpythonExtensionBase.setup_class.im_func(cls) diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_userslots.py @@ -0,0 +1,63 @@ +from pypy.module.cpyext.test.test_api import BaseApiTest +from rpython.rtyper.lltypesystem import rffi +from pypy.module.cpyext.pyobject import make_ref, from_ref +from pypy.module.cpyext.api import generic_cpy_call +from pypy.module.cpyext.typeobject import PyTypeObjectPtr + + +class TestAppLevelObject(BaseApiTest): + def test_nb_add_from_python(self, space, api): + w_date = space.appexec([], """(): + class DateType(object): + def __add__(self, other): + return 'sum!' + return DateType() + """) + w_datetype = space.type(w_date) + py_date = make_ref(space, w_date) + py_datetype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_datetype)) + assert py_datetype.c_tp_as_number + assert py_datetype.c_tp_as_number.c_nb_add + w_obj = generic_cpy_call(space, py_datetype.c_tp_as_number.c_nb_add, + py_date, py_date) + assert space.str_w(w_obj) == 'sum!' + + def test_tp_hash_from_python(self, space, api): + w_c = space.appexec([], """(): + class C: + def __hash__(self): + return -23 + return C() + """) + w_ctype = space.type(w_c) + py_c = make_ref(space, w_c) + py_ctype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_ctype)) + assert py_ctype.c_tp_hash + val = generic_cpy_call(space, py_ctype.c_tp_hash, py_c) + assert val == -23 + + def test_tp_new_from_python(self, space, api): + w_date = space.appexec([], """(): + class Date(object): + def __new__(cls, year, month, day): + self = object.__new__(cls) + self.year = year + self.month = month + self.day = day + return self + return Date + """) + py_datetype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_date)) + one = space.newint(1) + arg = space.newtuple([one, one, one]) + # call w_date.__new__ + w_obj = space.call_function(w_date, one, one, one) + w_year = space.getattr(w_obj, space.newbytes('year')) + assert space.int_w(w_year) == 1 + + w_obj = generic_cpy_call(space, py_datetype.c_tp_new, py_datetype, + arg, space.newdict({})) + w_year = space.getattr(w_obj, space.newbytes('year')) + assert space.int_w(w_year) == 1 + + From pypy.commits at gmail.com Sun Dec 4 15:26:02 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 04 Dec 2016 12:26:02 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: change algo to fish the function to call, still ininitely recurses on pandas' Timestamp class Message-ID: <58447bda.4c9d1c0a.d4316.cdb1@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r88882:cea545b4e5bc Date: 2016-11-28 22:32 +0200 http://bitbucket.org/pypy/pypy/changeset/cea545b4e5bc/ Log: change algo to fish the function to call, still ininitely recurses on pandas' Timestamp class diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -504,18 +504,24 @@ this_func_ptr = llhelper(subtype_dealloc.api_func.functype, subtype_dealloc.api_func.get_wrapper(space)) w_obj = from_ref(space, rffi.cast(PyObject, base)) - obj_not_cpytype = not w_obj.is_cpytype() - # see comment in userslot.slot_tp_new, this call can infinitely recurse - # We can only get into this function if tp_dealloc is being called on - # a non-cpytype, which could or could not inherit from a cpytype - # So if the original obj is non-cpytype, climb the mro to the first non-cpytype, - # otherwise just make sure we are not calling ourselves again - # - # This logic might fail for complicated inheritance schemes. - while base.c_tp_dealloc == this_func_ptr or (obj_not_cpytype and w_obj.is_cpytype()): + # This wrapper is created on a specific type, call it w_A. + # We wish to call the dealloc function from one of the base classes of w_A, + # the first of which is not this function itself. + # w_obj is an instance of w_A or one of its subclasses. So climb up the + # inheritance chain until base.c_tp_dealloc is exactly this_func, and then + # continue on up until they differ. + print 'subtype_dealloc, start from', rffi.charp2str(base) + while base.c_tp_dealloc != this_func_ptr: base = base.c_tp_base assert base + print ' ne move to', rffi.charp2str(base) w_obj = from_ref(space, rffi.cast(PyObject, base)) + while base.c_tp_dealloc == this_func_ptr: + base = base.c_tp_base + assert base + print ' eq move to', rffi.charp2str(base) + w_obj = from_ref(space, rffi.cast(PyObject, base)) + print ' end with', rffi.charp2str(base) dealloc = base.c_tp_dealloc # XXX call tp_del if necessary generic_cpy_call(space, dealloc, obj) From pypy.commits at gmail.com Sun Dec 4 15:45:59 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 12:45:59 -0800 (PST) Subject: [pypy-commit] pypy py3.5-eintr-pep475: close branch, ready to merge Message-ID: <58448087.8c1f1c0a.f42a1.d4c0@mx.google.com> Author: Armin Rigo Branch: py3.5-eintr-pep475 Changeset: r88884:7c7067888d91 Date: 2016-12-04 21:43 +0100 http://bitbucket.org/pypy/pypy/changeset/7c7067888d91/ Log: close branch, ready to merge From pypy.commits at gmail.com Sun Dec 4 15:46:01 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 12:46:01 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge py3.5-eintr-pep475 Message-ID: <58448089.ca06c20a.ecbba.c380@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88885:b5eaea946a6b Date: 2016-12-04 21:44 +0100 http://bitbucket.org/pypy/pypy/changeset/b5eaea946a6b/ Log: hg merge py3.5-eintr-pep475 Implement PEP475, which makes a number of os-, file-, select- and socket-related functions no longer raise OSError/IOError on getting EINTR, but instead automatically retry. There are a few functions mentioned in the PEP too which are not present in PyPy so far. diff too long, truncating to 2000 out of 2118 lines diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,5 @@ +import os +from errno import EINTR from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -6,7 +8,13 @@ def wait3(options): status = ffi.new("int *") ru = ffi.new("struct rusage *") - pid = lib.wait3(status, options, ru) + while True: + pid = lib.wait3(status, options, ru) + if pid != -1: + break + errno = ffi.errno + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -15,7 +23,13 @@ def wait4(pid, options): status = ffi.new("int *") ru = ffi.new("struct rusage *") - pid = lib.wait4(pid, status, options, ru) + while True: + pid = lib.wait4(pid, status, options, ru) + if pid != -1: + break + errno = ffi.errno + if errno != EINTR: + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -543,6 +543,7 @@ _WINDOWS = True def wrap_windowserror(space, e, w_filename=None): + XXX # WindowsError no longer exists in Py3.5 from rpython.rlib import rwin32 winerror = e.winerror @@ -559,43 +560,72 @@ space.wrap(msg)) return OperationError(exc, w_error) - at specialize.arg(3) + at specialize.arg(3, 6) def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError', - w_exception_class=None, w_filename2=None): + w_exception_class=None, w_filename2=None, eintr_retry=False): + """A double API here: + + * if eintr_retry is False, always return the OperationError to + be raised by the caller. It can possibly be about EINTR + (checksignals() is still called here). + + * if eintr_retry is True (PEP 475 compliant API for retrying + system calls failing with EINTR), then this function raises + the OperationError directly, or for EINTR it calls + checksignals() and returns None in case the original + operation should be retried. + """ assert isinstance(e, OSError) if _WINDOWS and isinstance(e, WindowsError): return wrap_windowserror(space, e, w_filename) + if w_exception_class is None: + w_exc = getattr(space, exception_name) + else: + w_exc = w_exception_class + operror = _wrap_oserror2_impl(space, e, w_filename, w_filename2, w_exc, + eintr_retry) + if eintr_retry: + assert operror is None # otherwise, _wrap_oserror2_impl() has raised + else: + assert operror is not None # tell the annotator we don't return None + return operror + +def _wrap_oserror2_impl(space, e, w_filename, w_filename2, w_exc, eintr_retry): + # move the common logic in its own function, instead of having it + # duplicated 4 times in all 4 specialized versions of wrap_oserror2() errno = e.errno if errno == EINTR: space.getexecutioncontext().checksignals() + if eintr_retry: + return None try: msg = strerror(errno) except ValueError: msg = u'error %d' % errno - if w_exception_class is None: - exc = getattr(space, exception_name) - else: - exc = w_exception_class if w_filename is not None: if w_filename2 is not None: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg), w_filename, space.w_None, w_filename2) else: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg), w_filename) else: - w_error = space.call_function(exc, space.wrap(errno), + w_error = space.call_function(w_exc, space.wrap(errno), space.wrap(msg)) - return OperationError(exc, w_error) + operror = OperationError(w_exc, w_error) + if eintr_retry: + raise operror + return operror +_wrap_oserror2_impl._dont_inline_ = True - at specialize.arg(3) + at specialize.arg(3, 6) def wrap_oserror(space, e, filename=None, exception_name='w_OSError', - w_exception_class=None, filename2=None): + w_exception_class=None, filename2=None, eintr_retry=False): w_filename = None w_filename2 = None if filename is not None: @@ -605,7 +635,9 @@ return wrap_oserror2(space, e, w_filename, exception_name=exception_name, w_exception_class=w_exception_class, - w_filename2=w_filename2) + w_filename2=w_filename2, + eintr_retry=eintr_retry) +wrap_oserror._dont_inline_ = True def exception_from_saved_errno(space, w_type): from rpython.rlib.rposix import get_saved_errno diff --git a/pypy/interpreter/timeutils.py b/pypy/interpreter/timeutils.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/timeutils.py @@ -0,0 +1,11 @@ +""" +Access to the time module's high-resolution monotonic clock +""" + +def monotonic(space): + from pypy.module.time import interp_time + if interp_time.HAS_MONOTONIC: + w_res = interp_time.monotonic(space) + else: + w_res = interp_time.gettimeofday(space) + return space.float_w(w_res) # xxx back and forth diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -175,46 +175,48 @@ "Cannot use closefd=False with file name") from pypy.module.posix.interp_posix import dispatch_filename - try: - self.fd = dispatch_filename(rposix.open)( - space, w_name, flags, 0666) - except OSError as e: - raise wrap_oserror2(space, e, w_name, - exception_name='w_IOError') - finally: - fd_is_own = True + while True: + try: + self.fd = dispatch_filename(rposix.open)( + space, w_name, flags, 0666) + fd_is_own = True + break + except OSError as e: + wrap_oserror2(space, e, w_name, + exception_name='w_IOError', + eintr_retry=True) if not rposix._WIN32: try: _open_inhcache.set_non_inheritable(self.fd) except OSError as e: - raise wrap_oserror2(space, e, w_name) + raise wrap_oserror2(space, e, w_name, eintr_retry=False) else: w_fd = space.call_function(w_opener, w_name, space.wrap(flags)) try: self.fd = space.int_w(w_fd) + fd_is_own = True except OperationError as e: if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, "expected integer from opener") - finally: - fd_is_own = True if not rposix._WIN32: try: rposix.set_inheritable(self.fd, False) except OSError as e: - raise wrap_oserror2(space, e, w_name) + raise wrap_oserror2(space, e, w_name, eintr_retry=False) try: st = os.fstat(self.fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) # On Unix, fopen will succeed for directories. # In Python, there should be no file objects referring to # directories, so we need a check. if stat.S_ISDIR(st.st_mode): raise wrap_oserror2(space, OSError(errno.EISDIR, "fstat"), - w_name, exception_name='w_IOError') + w_name, exception_name='w_IOError', + eintr_retry=False) self.blksize = DEFAULT_BUFFER_SIZE if HAS_BLKSIZE and st.st_blksize > 1: self.blksize = st.st_blksize @@ -227,7 +229,8 @@ try: os.lseek(self.fd, 0, os.SEEK_END) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) except: if not fd_is_own: self.fd = -1 @@ -285,7 +288,8 @@ os.close(fd) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) def close_w(self, space): try: @@ -319,7 +323,8 @@ pos = os.lseek(self.fd, pos, whence) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) return space.wrap(pos) def tell_w(self, space): @@ -328,7 +333,8 @@ pos = os.lseek(self.fd, 0, 1) except OSError as e: raise wrap_oserror(space, e, - exception_name='w_IOError') + exception_name='w_IOError', + eintr_retry=False) return space.wrap(pos) def readable_w(self, space): @@ -361,7 +367,8 @@ try: res = os.isatty(self.fd) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) return space.wrap(res) def repr_w(self, space): @@ -387,13 +394,16 @@ self._check_writable(space) data = space.getarg_w('y*', w_data).as_str() - try: - n = os.write(self.fd, data) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + n = os.write(self.fd, data) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) return space.wrap(n) @@ -405,13 +415,16 @@ if size < 0: return self.readall_w(space) - try: - s = os.read(self.fd, size) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + s = os.read(self.fd, size) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) return space.newbytes(s) @@ -420,13 +433,16 @@ self._check_readable(space) rwbuffer = space.getarg_w('w*', w_buffer) length = rwbuffer.getlength() - try: - buf = os.read(self.fd, length) - except OSError as e: - if e.errno == errno.EAGAIN: - return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') + while True: + try: + buf = os.read(self.fd, length) + break + except OSError as e: + if e.errno == errno.EAGAIN: + return space.w_None + wrap_oserror(space, e, + exception_name='w_IOError', + eintr_retry=True) rwbuffer.setslice(0, buf) return space.wrap(len(buf)) @@ -442,17 +458,13 @@ try: chunk = os.read(self.fd, newsize - total) except OSError as e: - if e.errno == errno.EINTR: - space.getexecutioncontext().checksignals() - continue - if total > 0: - # return what we've got so far - break if e.errno == errno.EAGAIN: + if total > 0: + break # return what we've got so far return space.w_None - raise wrap_oserror(space, e, - exception_name='w_IOError') - + wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=True) + continue if not chunk: break builder.append(chunk) @@ -476,7 +488,8 @@ try: self._truncate(space.r_longlong_w(w_size)) except OSError as e: - raise wrap_oserror(space, e, exception_name='w_IOError') + raise wrap_oserror(space, e, exception_name='w_IOError', + eintr_retry=False) return w_size diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,5 +1,6 @@ -import sys +import sys, errno from rpython.rlib import rsocket, rweaklist +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.rsocket import ( RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, @@ -227,12 +228,13 @@ representing the connection, and the address of the client. For IP sockets, the address info is a pair (hostaddr, port). """ - try: - fd, addr = self.sock.accept(inheritable=False) - return space.newtuple([space.wrap(fd), - addr_as_object(addr, fd, space)]) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + fd, addr = self.sock.accept(inheritable=False) + return space.newtuple([space.wrap(fd), + addr_as_object(addr, fd, space)]) + except SocketError as e: + converted_error(space, e, eintr_retry=True) # convert an Address into an app-level object def addr_as_object(self, space, address): @@ -274,10 +276,12 @@ Connect the socket to a remote address. For IP sockets, the address is a pair (host, port). """ - try: - self.sock.connect(self.addr_from_object(space, w_addr)) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + self.sock.connect(self.addr_from_object(space, w_addr)) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) def connect_ex_w(self, space, w_addr): """connect_ex(address) -> errno @@ -289,7 +293,11 @@ addr = self.addr_from_object(space, w_addr) except SocketError as e: raise converted_error(space, e) - error = self.sock.connect_ex(addr) + while True: + error = self.sock.connect_ex(addr) + if error != errno.EINTR: + break + space.getexecutioncontext().checksignals() return space.wrap(error) def fileno_w(self, space): @@ -384,10 +392,12 @@ at least one byte is available or until the remote end is closed. When the remote end is closed and all data is read, return the empty string. """ - try: - data = self.sock.recv(buffersize, flags) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + data = self.sock.recv(buffersize, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.newbytes(data) @unwrap_spec(buffersize='nonnegint', flags=int) @@ -396,15 +406,17 @@ Like recv(buffersize, flags) but also return the sender's address info. """ - try: - data, addr = self.sock.recvfrom(buffersize, flags) - if addr: - w_addr = addr_as_object(addr, self.sock.fd, space) - else: - w_addr = space.w_None - return space.newtuple([space.newbytes(data), w_addr]) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + data, addr = self.sock.recvfrom(buffersize, flags) + if addr: + w_addr = addr_as_object(addr, self.sock.fd, space) + else: + w_addr = space.w_None + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + return space.newtuple([space.newbytes(data), w_addr]) @unwrap_spec(data='bufferstr', flags=int) def send_w(self, space, data, flags=0): @@ -414,10 +426,12 @@ argument, see the Unix manual. Return the number of bytes sent; this may be less than len(data) if the network is busy. """ - try: - count = self.sock.send(data, flags) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + count = self.sock.send(data, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.wrap(count) @unwrap_spec(data='bufferstr', flags=int) @@ -450,11 +464,13 @@ # 3 args version flags = space.int_w(w_param2) w_addr = w_param3 - try: - addr = self.addr_from_object(space, w_addr) - count = self.sock.sendto(data, flags, addr) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + addr = self.addr_from_object(space, w_addr) + count = self.sock.sendto(data, flags, addr) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) return space.wrap(count) @unwrap_spec(flag=int) @@ -520,10 +536,13 @@ lgt = rwbuffer.getlength() if nbytes == 0 or nbytes > lgt: nbytes = lgt - try: - return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags)) - except SocketError as e: - raise converted_error(space, e) + while True: + try: + nbytes_read = self.sock.recvinto(rwbuffer, nbytes, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + return space.wrap(nbytes_read) @unwrap_spec(nbytes=int, flags=int) def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): @@ -538,15 +557,20 @@ elif nbytes > lgt: raise oefmt(space.w_ValueError, "nbytes is greater than the length of the buffer") - try: - readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) - if addr: + while True: + try: + readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) + break + except SocketError as e: + converted_error(space, e, eintr_retry=True) + if addr: + try: w_addr = addr_as_object(addr, self.sock.fd, space) - else: - w_addr = space.w_None - return space.newtuple([space.wrap(readlgt), w_addr]) - except SocketError as e: - raise converted_error(space, e) + except SocketError as e: + raise converted_error(space, e) + else: + w_addr = space.w_None + return space.newtuple([space.wrap(readlgt), w_addr]) @unwrap_spec(cmd=int) def ioctl_w(self, space, cmd, w_option): @@ -690,15 +714,20 @@ def get_error(space, name): return space.fromcache(SocketAPI).get_exception(name) -def converted_error(space, e): + at specialize.arg(2) +def converted_error(space, e, eintr_retry=False): message = e.get_msg() w_exception_class = get_error(space, e.applevelerrcls) if isinstance(e, SocketErrorWithErrno): + if e.errno == errno.EINTR: + space.getexecutioncontext().checksignals() + if eintr_retry: + return # only return None if eintr_retry==True w_exception = space.call_function(w_exception_class, space.wrap(e.errno), space.wrap(message)) else: w_exception = space.call_function(w_exception_class, space.wrap(message)) - return OperationError(w_exception_class, w_exception) + raise OperationError(w_exception_class, w_exception) # ____________________________________________________________ diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -226,15 +226,21 @@ If it is unavailable, using it will raise a NotImplementedError.""" if rposix.O_CLOEXEC is not None: flags |= rposix.O_CLOEXEC + while True: + try: + if rposix.HAVE_OPENAT and dir_fd != DEFAULT_DIR_FD: + path = space.fsencode_w(w_path) + fd = rposix.openat(path, flags, mode, dir_fd) + else: + fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) try: - if rposix.HAVE_OPENAT and dir_fd != DEFAULT_DIR_FD: - path = space.fsencode_w(w_path) - fd = rposix.openat(path, flags, mode, dir_fd) - else: - fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) _open_inhcache.set_non_inheritable(fd) except OSError as e: - raise wrap_oserror2(space, e, w_path) + rposix.c_close(fd) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.wrap(fd) @unwrap_spec(fd=c_int, position=r_longlong, how=c_int) @@ -245,7 +251,7 @@ try: pos = os.lseek(fd, position, how) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(pos) @@ -256,39 +262,45 @@ try: res = os.isatty(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(res) @unwrap_spec(fd=c_int, length=int) def read(space, fd, length): """Read data from a file descriptor.""" - try: - s = os.read(fd, length) - except OSError as e: - raise wrap_oserror(space, e) - else: - return space.newbytes(s) + while True: + try: + s = os.read(fd, length) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return space.newbytes(s) @unwrap_spec(fd=c_int) def write(space, fd, w_data): """Write a string to a file descriptor. Return the number of bytes actually written, which may be smaller than len(data).""" data = space.getarg_w('y*', w_data) - try: - res = os.write(fd, data.as_str()) - except OSError as e: - raise wrap_oserror(space, e) - else: - return space.wrap(res) + while True: + try: + res = os.write(fd, data.as_str()) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return space.wrap(res) @unwrap_spec(fd=c_int) def close(space, fd): """Close a file descriptor (for low level IO).""" + # PEP 475 note: os.close() must not retry upon EINTR. Like in + # previous versions of Python it raises OSError in this case. + # The text of PEP 475 seems to suggest that EINTR is eaten and + # hidden from app-level, but it is not the case in CPython 3.5.2. try: os.close(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(fd_low=c_int, fd_high=c_int) def closerange(fd_low, fd_high): @@ -298,10 +310,12 @@ @unwrap_spec(fd=c_int, length=r_longlong) def ftruncate(space, fd, length): """Truncate a file (by file descriptor) to a specified length.""" - try: - os.ftruncate(fd, length) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.ftruncate(fd, length) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def truncate(space, w_path, w_length): """Truncate a file to a specified length.""" @@ -325,19 +339,23 @@ def fsync(space, w_fd): """Force write of file with filedescriptor to disk.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fsync(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fsync(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def fdatasync(space, w_fd): """Force write of file with filedescriptor to disk. Does not force update of metadata.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fdatasync(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fdatasync(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def sync(space): """Force write of everything to disk.""" @@ -347,10 +365,12 @@ """Change to the directory of the given file descriptor. fildes must be opened on a directory, not a file.""" fd = space.c_filedescriptor_w(w_fd) - try: - os.fchdir(fd) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fchdir(fd) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) # ____________________________________________________________ @@ -415,12 +435,13 @@ def fstat(space, fd): """Perform a stat system call on the file referenced to by an open file descriptor.""" - try: - st = rposix_stat.fstat(fd) - except OSError as e: - raise wrap_oserror(space, e) - else: - return build_stat_result(space, st) + while True: + try: + st = rposix_stat.fstat(fd) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return build_stat_result(space, st) @unwrap_spec( path=path_or_fd(allow_fd=True), @@ -466,7 +487,7 @@ raise oefmt(space.w_NotImplementedError, "%s: unsupported argument combination", funcname) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) else: return build_stat_result(space, st) @@ -503,12 +524,13 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): - try: - st = rposix_stat.fstatvfs(fd) - except OSError as e: - raise wrap_oserror(space, e) - else: - return build_statvfs_result(space, st) + while True: + try: + st = rposix_stat.fstatvfs(fd) + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + else: + return build_statvfs_result(space, st) def statvfs(space, w_path): @@ -524,7 +546,7 @@ rposix_stat.statvfs, allow_fd_fn=rposix_stat.fstatvfs)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return build_statvfs_result(space, st) @@ -536,17 +558,19 @@ try: newfd = rposix.dup(fd, inheritable=False) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(newfd) @unwrap_spec(fd=c_int, fd2=c_int, inheritable=bool) def dup2(space, fd, fd2, inheritable=1): """Duplicate a file descriptor.""" + # like os.close(), this can still raise EINTR to app-level in + # CPython 3.5.2 try: rposix.dup2(fd, fd2, inheritable) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=bool, @@ -591,7 +615,7 @@ else: ok = dispatch_filename(rposix.access)(space, w_path, mode) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return space.wrap(ok) @@ -605,7 +629,7 @@ try: times = os.times() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.newtuple([space.wrap(times[0]), space.wrap(times[1]), @@ -619,7 +643,7 @@ try: rc = os.system(command) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(rc) @@ -640,7 +664,7 @@ else: dispatch_filename(rposix.unlink)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def remove(space, w_path, __kwonly__, dir_fd=DEFAULT_DIR_FD): @@ -659,7 +683,7 @@ else: dispatch_filename(rposix.unlink)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) def _getfullpathname(space, w_path): """helper for ntpath.abspath """ @@ -673,7 +697,7 @@ fullpath = rposix.getfullpathname(path) w_fullpath = space.newbytes(fullpath) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) else: return w_fullpath @@ -682,7 +706,7 @@ try: cur = os.getcwd() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.newbytes(cur) @@ -692,7 +716,7 @@ try: cur = os.getcwdu() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: return space.wrap(cur) else: @@ -709,7 +733,7 @@ else: dispatch_filename(rposix.chdir)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) def mkdir(space, w_path, mode=0o777, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): @@ -730,7 +754,7 @@ else: dispatch_filename(rposix.mkdir)(space, w_path, mode) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def rmdir(space, w_path, __kwonly__, dir_fd=DEFAULT_DIR_FD): @@ -749,7 +773,7 @@ else: dispatch_filename(rposix.rmdir)(space, w_path) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(code=c_int) def strerror(space, code): @@ -764,7 +788,7 @@ try: cur = os.getlogin() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap_fsdecoded(cur) # ____________________________________________________________ @@ -817,7 +841,7 @@ try: rwin32._wputenv(name, value) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: def _convertenviron(space, w_env): for key, value in os.environ.items(): @@ -828,7 +852,7 @@ try: dispatch_filename_2(rposix.putenv)(space, w_name, w_value) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def unsetenv(space, w_name): """Delete an environment variable.""" @@ -837,7 +861,7 @@ except KeyError: pass except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def listdir(space, w_path=None): @@ -860,7 +884,7 @@ try: result = rposix.listdir(dirname) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.newlist_bytes(result) try: path = space.fsencode_w(w_path) @@ -874,13 +898,13 @@ try: result = rposix.fdlistdir(os.dup(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: dirname = FileEncoder(space, w_path) try: result = rposix.listdir(dirname) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) len_result = len(result) result_w = [None] * len_result for i in range(len_result): @@ -895,14 +919,14 @@ try: return space.wrap(rposix.get_inheritable(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(fd=c_int, inheritable=int) def set_inheritable(space, fd, inheritable): try: rposix.set_inheritable(fd, inheritable) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) _pipe_inhcache = rposix.SetNonInheritableCache() @@ -910,10 +934,15 @@ "Create a pipe. Returns (read_end, write_end)." try: fd1, fd2 = rposix.pipe(rposix.O_CLOEXEC or 0) + except OSError as e: + raise wrap_oserror(space, e, eintr_retry=False) + try: _pipe_inhcache.set_non_inheritable(fd1) _pipe_inhcache.set_non_inheritable(fd2) except OSError as e: - raise wrap_oserror(space, e) + rposix.c_close(fd2) + rposix.c_close(fd1) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) @unwrap_spec(flags=c_int) @@ -921,7 +950,7 @@ try: fd1, fd2 = rposix.pipe2(flags) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), @@ -951,7 +980,7 @@ dispatch_filename(rposix.chmod)(space, w_path, mode) return except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) try: path = space.fsencode_w(w_path) @@ -960,7 +989,12 @@ raise oefmt(space.w_TypeError, "argument should be string, bytes or integer, not %T", w_path) fd = unwrap_fd(space, w_path) - _chmod_fd(space, fd, mode) + # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR + # to app-level, but os.fchmod(fd) retries automatically + try: + os.fchmod(fd, mode) + except OSError as e: + raise wrap_oserror(space, e, eintr_retry=False) else: try: _chmod_path(path, mode, dir_fd, follow_symlinks) @@ -969,7 +1003,7 @@ # fchmodat() doesn't actually implement follow_symlinks=False # so raise NotImplementedError in this case raise argument_unavailable(space, "chmod", "follow_symlinks") - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) def _chmod_path(path, mode, dir_fd, follow_symlinks): if dir_fd != DEFAULT_DIR_FD or not follow_symlinks: @@ -977,19 +1011,19 @@ else: rposix.chmod(path, mode) -def _chmod_fd(space, fd, mode): - try: - os.fchmod(fd, mode) - except OSError as e: - raise wrap_oserror(space, e) - - @unwrap_spec(fd=c_int, mode=c_int) def fchmod(space, fd, mode): """\ Change the access permissions of the file given by file descriptor fd. """ - _chmod_fd(space, fd, mode) + # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR + # to app-level, but os.fchmod(fd) retries automatically + while True: + try: + os.fchmod(fd, mode) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) @unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) @@ -1013,7 +1047,8 @@ else: dispatch_filename_2(rposix.rename)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT), dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT)) @@ -1037,7 +1072,8 @@ else: dispatch_filename_2(rposix.replace)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT)) def mkfifo(space, w_path, mode=0666, __kwonly__=None, dir_fd=DEFAULT_DIR_FD): @@ -1049,14 +1085,18 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - try: - if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: - path = space.fsencode_w(w_path) - rposix.mkfifoat(path, mode, dir_fd) - else: - dispatch_filename(rposix.mkfifo)(space, w_path, mode) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + # CPython 3.5.2: why does os.mkfifo() retry automatically if it + # gets EINTR, but not os.mkdir()? + while True: + try: + if rposix.HAVE_MKFIFOAT and dir_fd != DEFAULT_DIR_FD: + path = space.fsencode_w(w_path) + rposix.mkfifoat(path, mode, dir_fd) + else: + dispatch_filename(rposix.mkfifo)(space, w_path, mode) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) @unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD(rposix.HAVE_MKNODAT)) def mknod(space, w_path, mode=0600, device=0, @@ -1074,14 +1114,16 @@ and path should be relative; path will then be relative to that directory. dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" - try: - if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: - fname = space.fsencode_w(w_path) - rposix.mknodat(fname, mode, device, dir_fd) - else: - dispatch_filename(rposix.mknod)(space, w_path, mode, device) - except OSError as e: - raise wrap_oserror2(space, e, w_path) + while True: + try: + if rposix.HAVE_MKNODAT and dir_fd != DEFAULT_DIR_FD: + fname = space.fsencode_w(w_path) + rposix.mknodat(fname, mode, device, dir_fd) + else: + dispatch_filename(rposix.mknod)(space, w_path, mode, device) + break + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) @unwrap_spec(mask=c_int) def umask(space, mask): @@ -1094,7 +1136,7 @@ try: pid = os.getpid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pid) @unwrap_spec(pid=c_int, signal=c_int) @@ -1103,7 +1145,7 @@ try: rposix.kill(pid, signal) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(pgid=c_int, signal=c_int) def killpg(space, pgid, signal): @@ -1111,7 +1153,7 @@ try: os.killpg(pgid, signal) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def abort(space): """Abort the interpreter immediately. This 'dumps core' or otherwise fails @@ -1149,7 +1191,8 @@ else: rposix.link(src, dst) except OSError as e: - raise wrap_oserror(space, e, filename=src, filename2=dst) + raise wrap_oserror(space, e, filename=src, filename2=dst, + eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_SYMLINKAT)) @@ -1176,7 +1219,8 @@ else: dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) except OSError as e: - raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst) + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, + eintr_retry=False) @unwrap_spec( @@ -1197,7 +1241,7 @@ else: result = call_rposix(rposix.readlink, path) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) w_result = space.newbytes(result) if space.isinstance_w(path.w_path, space.w_unicode): return space.fsdecode(w_result) @@ -1245,7 +1289,7 @@ except: # Don't clobber the OSError if the fork failed pass - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) if pid == 0: run_fork_hooks('child', space) else: @@ -1258,12 +1302,17 @@ def openpty(space): "Open a pseudo-terminal, returning open fd's for both master and slave end." + master_fd = slave_fd = -1 try: master_fd, slave_fd = os.openpty() rposix.set_inheritable(master_fd, False) rposix.set_inheritable(slave_fd, False) except OSError as e: - raise wrap_oserror(space, e) + if master_fd >= 0: + rposix.c_close(master_fd) + if slave_fd >= 0: + rposix.c_close(slave_fd) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(master_fd), space.wrap(slave_fd)]) def forkpty(space): @@ -1277,12 +1326,16 @@ Wait for completion of a given child process. """ - try: - pid, status = os.waitpid(pid, options) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + pid, status = os.waitpid(pid, options) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) return space.newtuple([space.wrap(pid), space.wrap(status)]) +# missing: waitid() + @unwrap_spec(status=c_int) def _exit(space, status): os._exit(status) @@ -1310,7 +1363,7 @@ try: os.execv(command, args) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def _env2interp(space, w_env): @@ -1355,12 +1408,12 @@ try: rposix.fexecve(fd, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: try: os.execve(path, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(mode=int, path='fsencode') def spawnv(space, mode, path, w_argv): @@ -1368,7 +1421,7 @@ try: ret = os.spawnv(mode, path, args) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(ret) @unwrap_spec(mode=int, path='fsencode') @@ -1378,7 +1431,7 @@ try: ret = os.spawnve(mode, path, args, env) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(ret) @@ -1486,7 +1539,7 @@ # something is wrong with the file, when it also # could be the time stamp that gives a problem. */ # so we use wrap_oserror() instead of wrap_oserror2() here - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @specialize.arg(1) def do_utimes(space, func, arg, utime): @@ -1503,7 +1556,7 @@ func(arg, (atime, mtime)) except OSError as e: # see comment above: don't use wrap_oserror2() - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @specialize.argtype(1) def _dispatch_utime(path, times): @@ -1546,7 +1599,7 @@ try: r = os.uname() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) l_w = [space.wrap_fsdecoded(i) for i in [r[0], r[1], r[2], r[3], r[4]]] w_tuple = space.newtuple(l_w) @@ -1570,7 +1623,7 @@ try: os.setuid(uid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(euid=c_uid_t) def seteuid(space, euid): @@ -1581,7 +1634,7 @@ try: os.seteuid(euid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(gid=c_gid_t) def setgid(space, gid): @@ -1592,7 +1645,7 @@ try: os.setgid(gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(egid=c_gid_t) def setegid(space, egid): @@ -1603,7 +1656,7 @@ try: os.setegid(egid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(path='fsencode') def chroot(space, path): @@ -1614,7 +1667,7 @@ try: os.chroot(path) except OSError as e: - raise wrap_oserror(space, e, path) + raise wrap_oserror(space, e, path, eintr_retry=False) return space.w_None def getgid(space): @@ -1646,7 +1699,7 @@ try: list = os.getgroups() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newlist([wrap_gid(space, e) for e in list]) def setgroups(space, w_groups): @@ -1660,7 +1713,7 @@ try: os.setgroups(list[:]) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(username=str, gid=c_gid_t) def initgroups(space, username, gid): @@ -1673,7 +1726,7 @@ try: os.initgroups(username, gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def getpgrp(space): """ getpgrp() -> pgrp @@ -1690,7 +1743,7 @@ try: os.setpgrp() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None def getppid(space): @@ -1709,7 +1762,7 @@ try: pgid = os.getpgid(pid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pgid) @unwrap_spec(pid=c_int, pgrp=c_int) @@ -1721,7 +1774,7 @@ try: os.setpgid(pid, pgrp) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None @unwrap_spec(ruid=c_uid_t, euid=c_uid_t) @@ -1733,7 +1786,7 @@ try: os.setreuid(ruid, euid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(rgid=c_gid_t, egid=c_gid_t) def setregid(space, rgid, egid): @@ -1744,7 +1797,7 @@ try: os.setregid(rgid, egid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(pid=c_int) def getsid(space, pid): @@ -1755,7 +1808,7 @@ try: sid = os.getsid(pid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(sid) def setsid(space): @@ -1766,7 +1819,7 @@ try: os.setsid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.w_None @unwrap_spec(fd=c_int) @@ -1778,7 +1831,7 @@ try: pgid = os.tcgetpgrp(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(pgid) @unwrap_spec(fd=c_int, pgid=c_gid_t) @@ -1790,7 +1843,7 @@ try: os.tcsetpgrp(fd, pgid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def getresuid(space): """ getresuid() -> (ruid, euid, suid) @@ -1800,7 +1853,7 @@ try: (ruid, euid, suid) = os.getresuid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([wrap_uid(space, ruid), wrap_uid(space, euid), wrap_uid(space, suid)]) @@ -1813,7 +1866,7 @@ try: (rgid, egid, sgid) = os.getresgid() except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([wrap_gid(space, rgid), wrap_gid(space, egid), wrap_gid(space, sgid)]) @@ -1827,7 +1880,7 @@ try: os.setresuid(ruid, euid, suid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) @unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) def setresgid(space, rgid, egid, sgid): @@ -1838,7 +1891,7 @@ try: os.setresgid(rgid, egid, sgid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def declare_new_w_star(name): if name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'): @@ -1864,7 +1917,7 @@ try: return space.wrap_fsdecoded(os.ttyname(fd)) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) def confname_w(space, w_name, namespace): @@ -1883,7 +1936,7 @@ try: res = os.sysconf(num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(fd=c_int) @@ -1892,7 +1945,7 @@ try: res = os.fpathconf(fd, num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(path=path_or_fd(allow_fd=hasattr(os, 'fpathconf'))) @@ -1902,12 +1955,12 @@ try: res = os.fpathconf(path.as_fd, num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: try: res = os.pathconf(path.as_bytes, num) except OSError as e: - raise wrap_oserror2(space, e, path.w_path) + raise wrap_oserror2(space, e, path.w_path, eintr_retry=False) return space.wrap(res) def confstr(space, w_name): @@ -1915,7 +1968,7 @@ try: res = os.confstr(num) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec( @@ -1959,7 +2012,7 @@ try: os.fchown(fd, uid, gid) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) else: # String case try: @@ -1974,7 +2027,7 @@ assert dir_fd == DEFAULT_DIR_FD os.chown(path, uid, gid) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) @@ -1987,7 +2040,7 @@ try: os.lchown(path, uid, gid) except OSError as e: - raise wrap_oserror(space, e, path) + raise wrap_oserror(space, e, path, eintr_retry=False) @unwrap_spec(uid=c_uid_t, gid=c_gid_t) def fchown(space, w_fd, uid, gid): @@ -1995,11 +2048,14 @@ Change the owner and group id of the file given by file descriptor fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" + # same comment than about os.chmod(fd) vs. os.fchmod(fd) fd = space.c_filedescriptor_w(w_fd) - try: - os.fchown(fd, uid, gid) - except OSError as e: - raise wrap_oserror(space, e) + while True: + try: + os.fchown(fd, uid, gid) + break + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) def getloadavg(space): try: @@ -2032,7 +2088,7 @@ try: res = os.nice(increment) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.wrap(res) @unwrap_spec(size=int) @@ -2045,7 +2101,9 @@ try: return space.newbytes(rurandom.urandom(context, size)) except OSError as e: - raise wrap_oserror(space, e) + # 'rurandom' should catch and retry internally if it gets EINTR + # (at least in os.read(), which is probably enough in practice) + raise wrap_oserror(space, e, eintr_retry=False) def ctermid(space): """ctermid() -> string @@ -2083,7 +2141,7 @@ try: info = nt._getfileinformation(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newtuple([space.wrap(info[0]), space.wrap(info[1]), space.wrap(info[2])]) @@ -2096,7 +2154,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap(e.msg)) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.wrap(result) @@ -2228,7 +2286,7 @@ try: flags = rposix.get_status_flags(fd) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) return space.newbool(flags & rposix.O_NONBLOCK == 0) @unwrap_spec(fd=c_int, blocking=int) @@ -2241,4 +2299,4 @@ flags |= rposix.O_NONBLOCK rposix.set_status_flags(fd, flags) except OSError as e: - raise wrap_oserror(space, e) + raise wrap_oserror(space, e, eintr_retry=False) diff --git a/pypy/module/posix/interp_scandir.py b/pypy/module/posix/interp_scandir.py --- a/pypy/module/posix/interp_scandir.py +++ b/pypy/module/posix/interp_scandir.py @@ -28,7 +28,7 @@ try: dirp = rposix_scandir.opendir(path_bytes) except OSError as e: - raise wrap_oserror2(space, e, w_path) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) path_prefix = path_bytes if len(path_prefix) > 0 and path_prefix[-1] != '/': path_prefix += '/' @@ -85,7 +85,8 @@ try: entry = rposix_scandir.nextentry(self.dirp) except OSError as e: - raise self.fail(wrap_oserror2(space, e, self.w_path_prefix)) + raise self.fail(wrap_oserror2(space, e, self.w_path_prefix, + eintr_retry=False)) if not entry: raise self.fail() assert rposix_scandir.has_name_bytes(entry) @@ -235,7 +236,8 @@ except OSError as e: if e.errno == ENOENT: # not found return -1 - raise wrap_oserror2(self.space, e, self.fget_path(self.space)) + raise wrap_oserror2(self.space, e, self.fget_path(self.space), + eintr_retry=False) return stat.S_IFMT(st.st_mode) def is_dir(self, follow_symlinks): @@ -287,7 +289,8 @@ try: st = self.get_stat_or_lstat(follow_symlinks) except OSError as e: - raise wrap_oserror2(space, e, self.fget_path(space)) + raise wrap_oserror2(space, e, self.fget_path(space), + eintr_retry=False) return build_stat_result(space, st) def descr_inode(self, space): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -7,6 +7,7 @@ from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace +from pypy.interpreter.gateway import interp2app from rpython.translator.c.test.test_extfunc import need_sparse_files from rpython.rlib import rposix @@ -1365,3 +1366,40 @@ if os.name == 'posix': assert os.open in os.supports_dir_fd # openat() + +class AppTestPep475Retry: + spaceconfig = {'usemodules': USEMODULES} + + def setup_class(cls): + if os.name != 'posix': + skip("xxx tests are posix-only") + if cls.runappdirect: + skip("xxx does not work with -A") + + def fd_data_after_delay(space): + g = os.popen("sleep 5 && echo hello", "r") + cls._keepalive_g = g + return space.wrap(g.fileno()) + + cls.w_posix = space.appexec([], GET_POSIX) + cls.w_fd_data_after_delay = cls.space.wrap( + interp2app(fd_data_after_delay)) + + def test_pep475_retry_read(self): + import _signal as signal + signalled = [] + + def foo(*args): + signalled.append("ALARM") + + signal.signal(signal.SIGALRM, foo) + try: + fd = self.fd_data_after_delay() + signal.alarm(1) + got = self.posix.read(fd, 100) + self.posix.close(fd) + finally: + signal.signal(signal.SIGALRM, signal.SIG_DFL) + + assert signalled != [] + assert got.startswith(b'h') diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.error import exception_from_saved_errno from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter import timeutils from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.rlib._rsocket_rffi import socketclose, FD_SETSIZE @@ -156,9 +157,11 @@ def descr_poll(self, space, timeout=-1.0, maxevents=-1): self.check_closed(space) if timeout < 0: - timeout = -1.0 + end_time = 0.0 + itimeout = -1 else: - timeout *= 1000.0 + end_time = timeutils.monotonic(space) + timeout + itimeout = int(timeout * 1000.0 + 0.999) if maxevents == -1: maxevents = FD_SETSIZE - 1 @@ -167,9 +170,18 @@ "maxevents must be greater than 0, not %d", maxevents) with lltype.scoped_alloc(rffi.CArray(epoll_event), maxevents) as evs: - nfds = epoll_wait(self.epfd, evs, maxevents, int(timeout)) - if nfds < 0: - raise exception_from_saved_errno(space, space.w_IOError) + while True: + nfds = epoll_wait(self.epfd, evs, maxevents, itimeout) + if nfds < 0: + if get_saved_errno() == errno.EINTR: + space.getexecutioncontext().checksignals() + if itimeout >= 0: + timeout = end_time - timeutils.monotonic(space) + timeout = max(timeout, 0.0) + itimeout = int(timeout * 1000.0 + 0.999) + continue + raise exception_from_saved_errno(space, space.w_IOError) + break elist_w = [None] * nfds for i in xrange(nfds): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -180,6 +180,7 @@ raise oefmt(space.w_ValueError, "Timeout must be None or >= 0, got %s", str(_timeout)) + XXX # fix test_select_signal.py first, for PEP475! sec = int(_timeout) nsec = int(1e9 * (_timeout - sec)) rffi.setintfield(timeout, 'c_tv_sec', sec) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -10,6 +10,7 @@ from pypy.interpreter.gateway import ( Unwrapper, WrappedDefault, interp2app, unwrap_spec) from pypy.interpreter.typedef import TypeDef +from pypy.interpreter import timeutils defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -49,8 +50,10 @@ @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): + """WARNING: the timeout parameter is in **milliseconds**!""" if space.is_w(w_timeout, space.w_None): timeout = -1 + end_time = 0 else: # we want to be compatible with cpython and also accept things # that can be casted to integer (I think) @@ -61,19 +64,29 @@ raise oefmt(space.w_TypeError, "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) + end_time = timeutils.monotonic(space) + timeout * 0.001 if self.running: raise oefmt(space.w_RuntimeError, "concurrent poll() invocation") - self.running = True - try: - retval = rpoll.poll(self.fddict, timeout) - except rpoll.PollError as e: - message = e.get_msg() - raise OperationError(space.w_OSError, - space.newtuple([space.wrap(e.errno), - space.wrap(message)])) - finally: - self.running = False + while True: + self.running = True + try: + retval = rpoll.poll(self.fddict, timeout) + except rpoll.PollError as e: + if e.errno == errno.EINTR: + space.getexecutioncontext().checksignals() + timeout = int((end_time - timeutils.monotonic(space)) + * 1000.0 + 0.999) # round up + if timeout < 0: + timeout = 0 + continue + message = e.get_msg() + raise OperationError(space.w_OSError, + space.newtuple([space.wrap(e.errno), + space.wrap(message)])) + finally: + self.running = False + break retval_w = [] for fd, revents in retval: @@ -112,7 +125,7 @@ def _call_select(space, iwtd_w, owtd_w, ewtd_w, - ll_inl, ll_outl, ll_errl, ll_timeval): + ll_inl, ll_outl, ll_errl, ll_timeval, timeout): fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: @@ -122,13 +135,32 @@ if ll_errl: fdlisterr, nfds = _build_fd_set(space, ewtd_w, ll_errl, nfds) - res = _c.select(nfds + 1, ll_inl, ll_outl, ll_errl, ll_timeval) + if ll_timeval: + end_time = timeutils.monotonic(space) + timeout + else: + end_time = 0.0 - if res < 0: - errno = _c.geterrno() - msg = _c.socket_strerror_str(errno) - raise OperationError(space.w_OSError, space.newtuple([ - space.wrap(errno), space.wrap(msg)])) + while True: + if ll_timeval: + i = int(timeout) + rffi.setintfield(ll_timeval, 'c_tv_sec', i) + rffi.setintfield(ll_timeval, 'c_tv_usec', int((timeout-i)*1000000)) + + res = _c.select(nfds + 1, ll_inl, ll_outl, ll_errl, ll_timeval) + + if res >= 0: + break # normal path + err = _c.geterrno() + if err != errno.EINTR: + msg = _c.socket_strerror_str(err) + raise OperationError(space.w_OSError, space.newtuple([ + space.wrap(err), space.wrap(msg)])) + # got EINTR, automatic retry + space.getexecutioncontext().checksignals() + if timeout > 0.0: + timeout = end_time - timeutils.monotonic(space) + if timeout < 0.0: + timeout = 0.0 resin_w = [] resout_w = [] @@ -193,15 +225,12 @@ ll_errl = lltype.malloc(_c.fd_set.TO, flavor='raw') if timeout >= 0.0: ll_timeval = rffi.make(_c.timeval) - i = int(timeout) - rffi.setintfield(ll_timeval, 'c_tv_sec', i) - rffi.setintfield(ll_timeval, 'c_tv_usec', int((timeout-i)*1000000)) # Call this as a separate helper to avoid a large piece of code # in try:finally:. Needed for calling further _always_inline_ # helpers like _build_fd_set(). return _call_select(space, iwtd_w, owtd_w, ewtd_w, - ll_inl, ll_outl, ll_errl, ll_timeval) + ll_inl, ll_outl, ll_errl, ll_timeval, timeout) finally: if ll_timeval: lltype.free(ll_timeval, flavor='raw') diff --git a/pypy/module/select/test/test_select_signal.py b/pypy/module/select/test/test_select_signal.py new file mode 100644 --- /dev/null +++ b/pypy/module/select/test/test_select_signal.py @@ -0,0 +1,48 @@ + +class AppTestSelectSignal: + spaceconfig = { + "usemodules": ['select', 'time', 'signal'], + } + + def test_pep475_retry(self): + import select, time + import _signal as signal + + def foo(*args): + signalled.append("ALARM") + + # a list of functions that will do nothing more than sleep for 3 + # seconds + cases = [(select.select, [], [], [], 3.0)] + + if hasattr(select, 'poll'): + import posix + poll = select.poll() + cases.append((poll.poll, 3000)) # milliseconds + + if hasattr(select, 'epoll'): + epoll = select.epoll() + cases.append((epoll.poll, 3.0)) + + if hasattr(select, 'kqueue'): + kqueue = select.kqueue() From pypy.commits at gmail.com Sun Dec 4 16:01:02 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 13:01:02 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Test and fix Message-ID: <5844840e.876ec20a.35ac6.c05c@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88886:2b00c209938d Date: 2016-12-04 22:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2b00c209938d/ Log: Test and fix diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -90,11 +90,15 @@ as_int = bigint.toint() except OverflowError: from pypy.objspace.std.longobject import newbigint + if space.is_w(w_inttype, space.w_bool): + return space.w_True # extremely obscure case return newbigint(space, w_inttype, bigint) else: if space.is_w(w_inttype, space.w_int): # common case return wrapint(space, as_int) + if space.is_w(w_inttype, space.w_bool): + return space.newbool(as_int) # extremely obscure case w_obj = space.allocate_instance(W_IntObject, w_inttype) W_IntObject.__init__(w_obj, as_int) return w_obj diff --git a/pypy/objspace/std/test/test_boolobject.py b/pypy/objspace/std/test/test_boolobject.py --- a/pypy/objspace/std/test/test_boolobject.py +++ b/pypy/objspace/std/test/test_boolobject.py @@ -84,3 +84,7 @@ def __bool__(self): return 1 raises(TypeError, bool, Spam()) + + def test_from_bytes(self): + assert bool.from_bytes(b"", 'little') is False + assert bool.from_bytes(b"dasijldjs" * 157, 'little') is True diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -607,6 +607,16 @@ assert int(bytearray(b'100'), 2) == 4 raises(TypeError, int, memoryview(b'100'), 2) + def test_from_bytes(self): + class X(int): + pass + x = X.from_bytes(b"", 'little') + assert type(x) is X and x == 0 + x = X.from_bytes(b"*" * 100, 'little') + assert type(x) is X + expected = sum(256 ** i for i in range(100)) + assert x == expected * ord('*') + class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} From pypy.commits at gmail.com Sun Dec 4 16:04:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 13:04:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix? Message-ID: <584484d9.ce181c0a.d2d00.d801@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88887:ea86f82696e3 Date: 2016-12-04 22:03 +0100 http://bitbucket.org/pypy/pypy/changeset/ea86f82696e3/ Log: fix? diff --git a/lib-python/3/test/test_ordered_dict.py b/lib-python/3/test/test_ordered_dict.py --- a/lib-python/3/test/test_ordered_dict.py +++ b/lib-python/3/test/test_ordered_dict.py @@ -12,7 +12,7 @@ py_coll = support.import_fresh_module('collections', blocked=['_collections']) -c_coll = import_fresh_module('_collections', fresh=['_collections']) +c_coll = support.import_fresh_module('_collections', fresh=['_collections']) @contextlib.contextmanager From pypy.commits at gmail.com Sun Dec 4 17:30:58 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 14:30:58 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Py3.5 always adds these three names to all new modules (thanks Tiberium) Message-ID: <58449922.8c1f1c0a.f42a1.f283@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88888:5e500ed90cd8 Date: 2016-12-04 23:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5e500ed90cd8/ Log: Py3.5 always adds these three names to all new modules (thanks Tiberium) diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -22,11 +22,9 @@ self.w_name = w_name if w_name is not None: space.setitem(w_dict, space.new_interned_str('__name__'), w_name) - if add_package: - # add the __package__ attribute only when created from internal - # code, but not when created from Python code (as in CPython) - space.setitem(w_dict, space.new_interned_str('__package__'), - space.w_None) + # add these three attributes always ('add_package' is no longer used) + for extra in ['__package__', '__loader__', '__spec__']: + space.setitem(w_dict, space.new_interned_str(extra), space.w_None) self.startup_called = False def _cleanup_(self): diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -181,7 +181,7 @@ assert sys.__package__ == '' assert os.__package__ == '' - assert not hasattr(type(sys)('foo'), '__package__') + assert type(sys)('foo').__package__ is None def test_name_nonascii(self): import sys @@ -206,3 +206,12 @@ def test_weakrefable(self): import weakref weakref.ref(weakref) + + def test_all_dict_content(self): + import sys + m = type(sys)('foo') + assert m.__dict__ == {'__name__': 'foo', + '__doc__': None, + '__package__': None, + '__loader__': None, + '__spec__': None} From pypy.commits at gmail.com Mon Dec 5 02:52:45 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 04 Dec 2016 23:52:45 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: PEP 475 done Message-ID: <58451ccd.55911c0a.5bdb9.6969@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5758:8bbdf2ea8e81 Date: 2016-12-05 08:52 +0100 http://bitbucket.org/pypy/extradoc/changeset/8bbdf2ea8e81/ Log: PEP 475 done diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -234,3 +234,8 @@ * if you write ``from .a import b`` inside the Python prompt, or in a module not in any package, then you get a SystemError(!) with an error message that is unlikely to help newcomers. + +* pep 475: unclear why 'os.fchmod(fd)' retries automatically when + it gets EINTR but the otherwise-equivalent 'os.chmod(fd)' does not. + (The documentation says they are fully equivalent, so someone is + wrong.) diff --git a/planning/py3.5/milestone-1-progress.rst b/planning/py3.5/milestone-1-progress.rst --- a/planning/py3.5/milestone-1-progress.rst +++ b/planning/py3.5/milestone-1-progress.rst @@ -37,7 +37,7 @@ * "except pyopcode.Return:" in pyframe can't be there, because that's outside the JIT and it gives terrible performance -* PEP 475: Retry system calls failing with EINTR +* PEP 475: Retry system calls failing with EINTR (DONE) * ast compiler: clean up POP_EXCEPT: either remove them, or use it to clean up the "finally: name = None; del name" nonsense at the end of any except block From pypy.commits at gmail.com Mon Dec 5 03:05:51 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Dec 2016 00:05:51 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: add Message-ID: <58451fdf.ca57c20a.eb3d8.660c@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5759:011142ec6be4 Date: 2016-12-05 09:05 +0100 http://bitbucket.org/pypy/extradoc/changeset/011142ec6be4/ Log: add diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -64,6 +64,21 @@ f() sys.settrace(None) +* I didn't try, but it seems that typeobject.c:mro_internal() is prone + to a refcount crash. It does this:: + + old_mro = type->tp_mro; + ...mro_invoke()... /* might cause reentrance */ + type->tp_mro = new_mro; + ... + Py_XDECREF(old_mro); + + This last XDECREF drops the reference held by the previous value of + ``type->tp_mro`` after we changed it. But ``type->tp_mro`` might have + changed because of mro_invoke(), which calls pure Python code. If it + did change, then old_mro is no longer the old value of + ``type->tp_mro``. The wrong object gets decrefed. + Non-segfaulting bugs -------------------- From pypy.commits at gmail.com Mon Dec 5 04:41:47 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Dec 2016 01:41:47 -0800 (PST) Subject: [pypy-commit] cffi default: Issue #295: use calloc() directly instead of PyObject_Malloc()+memset() Message-ID: <5845365b.624fc20a.7231f.8b5e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2824:b6adad5f4ea3 Date: 2016-12-05 10:41 +0100 http://bitbucket.org/cffi/cffi/changeset/b6adad5f4ea3/ Log: Issue #295: use calloc() directly instead of PyObject_Malloc()+memset() to handle ffi.new() with a default allocator. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1605,6 +1605,13 @@ #endif } +static PyObject * +cdataowning_no_generic_alloc(PyTypeObject *type, Py_ssize_t nitems) +{ + PyErr_SetString(PyExc_SystemError, "cdataowning: no generic alloc"); + return NULL; +} + static void cdataowning_dealloc(CDataObject *cd) { assert(!(cd->c_type->ct_flags & (CT_IS_VOID_PTR | CT_FUNCTIONPTR))); @@ -2867,6 +2874,17 @@ (getiterfunc)cdata_iter, /* tp_iter */ 0, /* tp_iternext */ cdata_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + PyType_GenericAlloc, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ + PyObject_Del, /* tp_free */ }; static PyTypeObject CDataOwning_Type = { @@ -2901,6 +2919,14 @@ 0, /* tp_members */ 0, /* tp_getset */ &CData_Type, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + cdataowning_no_generic_alloc, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ + free, /* tp_free */ }; static PyTypeObject CDataOwningGC_Type = { @@ -2936,6 +2962,14 @@ 0, /* tp_members */ 0, /* tp_getset */ &CDataOwning_Type, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + PyType_GenericAlloc, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ + PyObject_GC_Del, /* tp_free */ }; static PyTypeObject CDataGCP_Type = { @@ -3079,10 +3113,14 @@ /************************************************************/ static CDataObject *allocate_owning_object(Py_ssize_t size, - CTypeDescrObject *ct) + CTypeDescrObject *ct, + int dont_clear) { CDataObject *cd; - cd = (CDataObject *)PyObject_Malloc(size); + if (dont_clear) + cd = malloc(size); + else + cd = calloc(size, 1); if (PyObject_Init((PyObject *)cd, &CDataOwning_Type) == NULL) return NULL; @@ -3109,7 +3147,7 @@ PyErr_SetString(PyExc_TypeError, "return type is a struct/union with a varsize array member"); } - cd = allocate_owning_object(dataoffset + datasize, ct); + cd = allocate_owning_object(dataoffset + datasize, ct, /*dont_clear=*/1); if (cd == NULL) return NULL; cd->c_data = ((char *)cd) + dataoffset; @@ -3147,7 +3185,8 @@ CDataObject *cd; if (allocator->ca_alloc == NULL) { - cd = allocate_owning_object(basesize + datasize, ct); + cd = allocate_owning_object(basesize + datasize, ct, + allocator->ca_dont_clear); if (cd == NULL) return NULL; cd->c_data = ((char *)cd) + basesize; @@ -3180,9 +3219,9 @@ cd = allocate_gcp_object(cd, ct, allocator->ca_free); Py_DECREF(res); - } - if (!allocator->ca_dont_clear) - memset(cd->c_data, 0, datasize); + if (!allocator->ca_dont_clear) + memset(cd->c_data, 0, datasize); + } return cd; } @@ -3262,7 +3301,8 @@ if (cds == NULL) return NULL; - cd = allocate_owning_object(sizeof(CDataObject_own_structptr), ct); + cd = allocate_owning_object(sizeof(CDataObject_own_structptr), ct, + /*dont_clear=*/1); if (cd == NULL) { Py_DECREF(cds); return NULL; From pypy.commits at gmail.com Mon Dec 5 05:21:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Dec 2016 02:21:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5: translation fix Message-ID: <58453f9a.c9b3c20a.3c7fe.98ee@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88889:2ad002c09537 Date: 2016-12-05 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/2ad002c09537/ Log: translation fix diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -98,7 +98,7 @@ # common case return wrapint(space, as_int) if space.is_w(w_inttype, space.w_bool): - return space.newbool(as_int) # extremely obscure case + return space.newbool(as_int != 0) # extremely obscure case w_obj = space.allocate_instance(W_IntObject, w_inttype) W_IntObject.__init__(w_obj, as_int) return w_obj From pypy.commits at gmail.com Mon Dec 5 06:13:29 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 05 Dec 2016 03:13:29 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Make the test more precise, fix (thanks ronan) Message-ID: <58454bd9.46bb1c0a.a68b7.c352@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88890:2c5fdace2d2e Date: 2016-12-05 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/2c5fdace2d2e/ Log: Make the test more precise, fix (thanks ronan) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -89,19 +89,13 @@ try: as_int = bigint.toint() except OverflowError: - from pypy.objspace.std.longobject import newbigint - if space.is_w(w_inttype, space.w_bool): - return space.w_True # extremely obscure case - return newbigint(space, w_inttype, bigint) + w_obj = space.newlong_from_rbigint(bigint) else: - if space.is_w(w_inttype, space.w_int): - # common case - return wrapint(space, as_int) - if space.is_w(w_inttype, space.w_bool): - return space.newbool(as_int != 0) # extremely obscure case - w_obj = space.allocate_instance(W_IntObject, w_inttype) - W_IntObject.__init__(w_obj, as_int) - return w_obj + w_obj = space.newint(as_int) + if not space.is_w(w_inttype, space.w_int): + # That's what from_bytes() does in CPython 3.5.2 too + w_obj = space.call_function(w_inttype, w_obj) + return w_obj @unwrap_spec(nbytes=int, byteorder=str, signed=bool) def descr_to_bytes(self, space, nbytes, byteorder, signed=False): diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -608,14 +608,18 @@ raises(TypeError, int, memoryview(b'100'), 2) def test_from_bytes(self): + called = [] class X(int): - pass + def __init__(self, val): + called.append(val) x = X.from_bytes(b"", 'little') assert type(x) is X and x == 0 + assert called == [0] x = X.from_bytes(b"*" * 100, 'little') assert type(x) is X - expected = sum(256 ** i for i in range(100)) - assert x == expected * ord('*') + expected = sum(256 ** i for i in range(100)) * ord('*') + assert x == expected + assert called == [0, expected] class AppTestIntShortcut(AppTestInt): From pypy.commits at gmail.com Mon Dec 5 07:05:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 05 Dec 2016 04:05:12 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Update test to match CPython 3.6, since we don't implement CPython 3.5's faulty behaviour (cf. http://bugs.python.org/issue26492 ) Message-ID: <584557f8.655fc20a.7b268.c154@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88891:2430e096c6cb Date: 2016-12-05 12:03 +0000 http://bitbucket.org/pypy/pypy/changeset/2430e096c6cb/ Log: Update test to match CPython 3.6, since we don't implement CPython 3.5's faulty behaviour (cf. http://bugs.python.org/issue26492 ) diff --git a/lib-python/3/test/test_array.py b/lib-python/3/test/test_array.py --- a/lib-python/3/test/test_array.py +++ b/lib-python/3/test/test_array.py @@ -318,8 +318,21 @@ d = pickle.dumps((itorig, orig), proto) it, a = pickle.loads(d) a.fromlist(data2) - self.assertEqual(type(it), type(itorig)) - self.assertEqual(list(it), data2) + # PyPy change: we implement 3.6 behaviour + self.assertEqual(list(it), []) + + def test_exhausted_iterator(self): + # PyPy change: test copied from 3.6 stdlib + a = array.array(self.typecode, self.example) + self.assertEqual(list(a), list(self.example)) + exhit = iter(a) + empit = iter(a) + for x in exhit: # exhaust the iterator + next(empit) # not exhausted + a.append(self.outside) + self.assertEqual(list(exhit), []) + self.assertEqual(list(empit), [self.outside]) + self.assertEqual(list(a), list(self.example) + [self.outside]) def test_insert(self): a = array.array(self.typecode, self.example) From pypy.commits at gmail.com Tue Dec 6 04:29:09 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 01:29:09 -0800 (PST) Subject: [pypy-commit] pypy default: Add a test for 2aa7dea5ad0f. It passes normally, but not on -A Message-ID: <584684e5.aaa3c20a.711ce.6b8c@mx.google.com> Author: Armin Rigo Branch: Changeset: r88892:3cffc7191d14 Date: 2016-12-06 10:26 +0100 http://bitbucket.org/pypy/pypy/changeset/3cffc7191d14/ Log: Add a test for 2aa7dea5ad0f. It passes normally, but not on -A diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -70,6 +70,19 @@ gc.enable() assert gc.isenabled() + def test_gc_collect_overrides_gc_disable(self): + import gc + deleted = [] + class X(object): + def __del__(self): + deleted.append(1) + assert gc.isenabled() + gc.disable() + X() + gc.collect() + assert deleted == [1] + gc.enable() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) From pypy.commits at gmail.com Tue Dec 6 04:54:43 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 01:54:43 -0800 (PST) Subject: [pypy-commit] pypy default: Normalize these macros (John Zhang on pypy-dev): instead of protecting Message-ID: <58468ae3.07941c0a.18ac8.fce5@mx.google.com> Author: Armin Rigo Branch: Changeset: r88893:08ec669ddab9 Date: 2016-12-06 10:53 +0100 http://bitbucket.org/pypy/pypy/changeset/08ec669ddab9/ Log: Normalize these macros (John Zhang on pypy-dev): instead of protecting the calls with @jit.dont_look_inside, use the modern way, which is the macro=True keyword arg. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1779,22 +1779,19 @@ lltype.free(l_utsbuf, flavor='raw') # These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT) -c_major = external('major', [rffi.INT], rffi.INT) -c_minor = external('minor', [rffi.INT], rffi.INT) +c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) +c_major = external('major', [rffi.INT], rffi.INT, macro=True) +c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) @replace_os_function('makedev') - at jit.dont_look_inside def makedev(maj, min): return c_makedev(maj, min) @replace_os_function('major') - at jit.dont_look_inside def major(dev): return c_major(dev) @replace_os_function('minor') - at jit.dont_look_inside def minor(dev): return c_minor(dev) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,11 @@ def test_isatty(self): assert rposix.isatty(-1) is False + def test_makedev(self): + dev = rposix.makedev(24, 7) + assert rposix.major(dev) == 24 + assert rposix.minor(dev) == 7 + @py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): From pypy.commits at gmail.com Tue Dec 6 05:01:14 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 02:01:14 -0800 (PST) Subject: [pypy-commit] pypy default: Fix 3cffc7191d14 for -A Message-ID: <58468c6a.c6bdc20a.bad00.75a2@mx.google.com> Author: Armin Rigo Branch: Changeset: r88894:407896dd979f Date: 2016-12-06 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/407896dd979f/ Log: Fix 3cffc7191d14 for -A diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -547,6 +547,8 @@ @jit.dont_look_inside def _run_finalizers(self): + # called by perform() when we have to "perform" this action, + # and also directly at the end of gc.collect). while True: w_obj = self.space.finalizer_queue.next_dead() if w_obj is None: diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -23,9 +23,18 @@ # specifically rely on that. This is similar to how, in CPython, an # explicit gc.collect() will invoke finalizers from cycles and fully # ignore the gc.disable() mode. - if not space.user_del_action.enabled_at_app_level: + temp_reenable = not space.user_del_action.enabled_at_app_level + if temp_reenable: enable_finalizers(space) - disable_finalizers(space) + try: + # fetch the pending finalizers from the queue, where they are + # likely to have been added by rgc.collect() above, and actually + # run them now. This forces them to run before this function + # returns, and also always in the enable_finalizers() mode. + space.user_del_action._run_finalizers() + finally: + if temp_reenable: + disable_finalizers(space) return space.wrap(0) From pypy.commits at gmail.com Tue Dec 6 05:01:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 02:01:16 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <58468c6c.c6bdc20a.bad00.75a6@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88895:ed147e0d8844 Date: 2016-12-06 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/ed147e0d8844/ Log: hg merge default diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -534,6 +534,8 @@ @jit.dont_look_inside def _run_finalizers(self): + # called by perform() when we have to "perform" this action, + # and also directly at the end of gc.collect). while True: w_obj = self.space.finalizer_queue.next_dead() if w_obj is None: diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -23,9 +23,18 @@ # specifically rely on that. This is similar to how, in CPython, an # explicit gc.collect() will invoke finalizers from cycles and fully # ignore the gc.disable() mode. - if not space.user_del_action.enabled_at_app_level: + temp_reenable = not space.user_del_action.enabled_at_app_level + if temp_reenable: enable_finalizers(space) - disable_finalizers(space) + try: + # fetch the pending finalizers from the queue, where they are + # likely to have been added by rgc.collect() above, and actually + # run them now. This forces them to run before this function + # returns, and also always in the enable_finalizers() mode. + space.user_del_action._run_finalizers() + finally: + if temp_reenable: + disable_finalizers(space) return space.wrap(0) diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -70,6 +70,19 @@ gc.enable() assert gc.isenabled() + def test_gc_collect_overrides_gc_disable(self): + import gc + deleted = [] + class X(object): + def __del__(self): + deleted.append(1) + assert gc.isenabled() + gc.disable() + X() + gc.collect() + assert deleted == [1] + gc.enable() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1779,22 +1779,19 @@ lltype.free(l_utsbuf, flavor='raw') # These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT) -c_major = external('major', [rffi.INT], rffi.INT) -c_minor = external('minor', [rffi.INT], rffi.INT) +c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) +c_major = external('major', [rffi.INT], rffi.INT, macro=True) +c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) @replace_os_function('makedev') - at jit.dont_look_inside def makedev(maj, min): return c_makedev(maj, min) @replace_os_function('major') - at jit.dont_look_inside def major(dev): return c_major(dev) @replace_os_function('minor') - at jit.dont_look_inside def minor(dev): return c_minor(dev) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,11 @@ def test_isatty(self): assert rposix.isatty(-1) is False + def test_makedev(self): + dev = rposix.makedev(24, 7) + assert rposix.major(dev) == 24 + assert rposix.minor(dev) == 7 + @py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): From pypy.commits at gmail.com Tue Dec 6 05:30:53 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 02:30:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5: CPython issue #25718 Message-ID: <5846935d.e337c20a.4abe1.8acd@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88896:5e2bfa42f8f6 Date: 2016-12-06 11:28 +0100 http://bitbucket.org/pypy/pypy/changeset/5e2bfa42f8f6/ Log: CPython issue #25718 diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1711,8 +1711,17 @@ def reduce_w(self): space = self.space + w_func = space.w_None if self.w_func is None else self.w_func + if self.w_total is space.w_None: # :-( + w_it = W_Chain(space, space.iter(space.newlist([ + space.newtuple([self.w_total]), + self.w_iterable]))) + w_it = space.call_function(space.type(self), + w_it, w_func) + return space.newtuple([space.gettypefor(W_ISlice), + space.newtuple([w_it, space.wrap(1), + space.w_None])]) w_total = space.w_None if self.w_total is None else self.w_total - w_func = space.w_None if self.w_func is None else self.w_func return space.newtuple([space.gettypefor(W_Accumulate), space.newtuple([self.w_iterable, w_func]), w_total]) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -90,7 +90,7 @@ def test_repeat_len(self): import itertools - import operator + import _operator as operator r = itertools.repeat('a', 15) next(r) @@ -329,7 +329,8 @@ assert next(it) == x def test_starmap(self): - import itertools, operator + import itertools + import _operator as operator it = itertools.starmap(operator.add, []) raises(StopIteration, next, it) @@ -1070,7 +1071,7 @@ from itertools import accumulate from decimal import Decimal from fractions import Fraction - import operator + import _operator as operator expected = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45] # one positional arg assert list(accumulate(range(10))) == expected @@ -1105,3 +1106,14 @@ a = accumulate(it) a.__setstate__(20) assert a.__reduce__() == (accumulate, (it, None), 20) + + def test_accumulate_reduce_corner_case(self): + from itertools import accumulate + import _operator as operator + it = iter([None, None, None]) + a = accumulate(it, operator.is_) + next(a) + x1, x2 = a.__reduce__() + b = x1(*x2) + res = list(b) + assert res == [True, False] From pypy.commits at gmail.com Tue Dec 6 06:21:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 03:21:16 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: another missed encoding case in source files Message-ID: <58469f2c.8a29c20a.db42a.a001@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5760:b4c37569deaa Date: 2016-12-06 12:21 +0100 http://bitbucket.org/pypy/extradoc/changeset/b4c37569deaa/ Log: another missed encoding case in source files diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -231,6 +231,12 @@ print(locals()['__class__']) # 42 print(__class__) # but this is a NameError +* Follow-up on issue #25388: running ``python x.py`` if x.py contains + the following bytes... + + * ``b"#\xfd\n"`` => we get a SyntaxError: Non-UTF-8 code + * ``b"# coding: utf-8\n#\xfd\n"`` => we get no error! + Other issues of "dubious IMHO" status ------------------------------------- From pypy.commits at gmail.com Tue Dec 6 06:37:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 03:37:39 -0800 (PST) Subject: [pypy-commit] pypy py3.5: python-dev confirmed the strange repr for recursive deques Message-ID: <5846a303.05bd1c0a.6f881.2968@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88897:d96e6fa3d3ef Date: 2016-12-06 12:37 +0100 http://bitbucket.org/pypy/pypy/changeset/d96e6fa3d3ef/ Log: python-dev confirmed the strange repr for recursive deques diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -525,7 +525,9 @@ 'The app-level part of repr().' deque_id = id(d) if deque_id in currently_in_repr: - listrepr = '[...]' + return '[...]' # strange because it's a deque and this + # strongly suggests it's a list instead, + # but confirmed behavior from python-dev else: currently_in_repr[deque_id] = 1 try: diff --git a/pypy/module/_collections/test/test_deque.py b/pypy/module/_collections/test/test_deque.py --- a/pypy/module/_collections/test/test_deque.py +++ b/pypy/module/_collections/test/test_deque.py @@ -243,8 +243,9 @@ d = deque(range(20)) e = eval(repr(d)) assert d == e + d = deque() d.append(d) - assert '...' in repr(d) + assert repr(d) == "deque([[...]])" def test_hash(self): from _collections import deque From pypy.commits at gmail.com Tue Dec 6 06:42:49 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Dec 2016 03:42:49 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: try to fix two win issues Message-ID: <5846a439.69efc20a.8a7e1.9d1e@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88898:47cdbd271ed9 Date: 2016-12-06 12:06 +0100 http://bitbucket.org/pypy/pypy/changeset/47cdbd271ed9/ Log: try to fix two win issues diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -701,7 +701,7 @@ def ExpandEnvironmentStrings(space, source): "string = ExpandEnvironmentStrings(string) - Expand environment vars." try: - return space.newtext(rwinreg.ExpandEnvironmentStrings(source)) + return space.newunicode(rwinreg.ExpandEnvironmentStrings(source)) except WindowsError as e: raise wrap_windowserror(space, e) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -580,15 +580,18 @@ len_result = len(result) result_w = [None] * len_result for i in range(len_result): - w_bytes = space.newtext(result[i]) - try: - result_w[i] = space.call_method(w_bytes, - "decode", w_fs_encoding) - except OperationError as e: - # fall back to the original byte string - if e.async(space): - raise - result_w[i] = w_bytes + if type(result[i]) is unicode: + result_w[i] = space.newunicode(result[i]) + else: + w_bytes = space.newtext(result[i]) + try: + result_w[i] = space.call_method(w_bytes, + "decode", w_fs_encoding) + except OperationError as e: + # fall back to the original byte string + if e.async(space): + raise + result_w[i] = w_bytes return space.newlist(result_w) else: dirname = space.str0_w(w_dirname) From pypy.commits at gmail.com Tue Dec 6 06:42:50 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Dec 2016 03:42:50 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: add a way to express "bytes" unwrapping via unwrap_spec Message-ID: <5846a43a.c64bc20a.dbbb2.a67c@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88899:4a061011e8e3 Date: 2016-11-10 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4a061011e8e3/ Log: add a way to express "bytes" unwrapping via unwrap_spec (graft of 8d2718505c72) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -148,6 +148,9 @@ def visit_str0(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_bytes(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_nonnegint(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -277,6 +280,9 @@ def visit_str0(self, typ): self.run_args.append("space.str0_w(%s)" % (self.scopenext(),)) + def visit_bytes(self, typ): + self.run_args.append("space.bytes_w(%s)" % (self.scopenext(),)) + def visit_nonnegint(self, typ): self.run_args.append("space.gateway_nonnegint_w(%s)" % ( self.scopenext(),)) @@ -427,6 +433,9 @@ def visit_str0(self, typ): self.unwrap.append("space.str0_w(%s)" % (self.nextarg(),)) + def visit_bytes(self, typ): + self.unwrap.append("space.bytes_w(%s)" % (self.nextarg(),)) + def visit_nonnegint(self, typ): self.unwrap.append("space.gateway_nonnegint_w(%s)" % (self.nextarg(),)) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -249,6 +249,20 @@ assert self.space.eq_w(space.call_function(w_app_g, space.wrap(True)), space.wrap(True)) + def test_interp2app_unwrap_spec_bytes(self): + # we can't use the "bytes" object for the unwrap_spec, because that's + # an alias for "str" on the underlying Python2 + space = self.space + w = space.wrap + def g(space, b): + return space.newbytes(b) + app_g = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'bytes']) + app_g2 = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'bytes']) + assert app_g is app_g2 + w_app_g = space.wrap(app_g) + assert self.space.eq_w(space.call_function(w_app_g, space.newbytes("abc")), + space.newbytes("abc")) + def test_caching_methods(self): class Base(gateway.W_Root): def f(self): From pypy.commits at gmail.com Tue Dec 6 06:42:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Dec 2016 03:42:52 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: add a 'text' unwrap_spec Message-ID: <5846a43c.0a74c20a.f20e7.9e3d@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88900:8f14d8ae5141 Date: 2016-12-06 12:22 +0100 http://bitbucket.org/pypy/pypy/changeset/8f14d8ae5141/ Log: add a 'text' unwrap_spec diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -151,6 +151,9 @@ def visit_bytes(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_text(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_nonnegint(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -283,6 +286,9 @@ def visit_bytes(self, typ): self.run_args.append("space.bytes_w(%s)" % (self.scopenext(),)) + def visit_text(self, typ): + self.run_args.append("space.text_w(%s)" % (self.scopenext(),)) + def visit_nonnegint(self, typ): self.run_args.append("space.gateway_nonnegint_w(%s)" % ( self.scopenext(),)) @@ -436,6 +442,9 @@ def visit_bytes(self, typ): self.unwrap.append("space.bytes_w(%s)" % (self.nextarg(),)) + def visit_text(self, typ): + self.unwrap.append("space.text_w(%s)" % (self.nextarg(),)) + def visit_nonnegint(self, typ): self.unwrap.append("space.gateway_nonnegint_w(%s)" % (self.nextarg(),)) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -253,7 +253,6 @@ # we can't use the "bytes" object for the unwrap_spec, because that's # an alias for "str" on the underlying Python2 space = self.space - w = space.wrap def g(space, b): return space.newbytes(b) app_g = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'bytes']) @@ -263,6 +262,17 @@ assert self.space.eq_w(space.call_function(w_app_g, space.newbytes("abc")), space.newbytes("abc")) + def test_interp2app_unwrap_spec_text(self): + space = self.space + def g(space, b): + return space.newbytes(b) + app_g = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'text']) + app_g2 = gateway.interp2app(g, unwrap_spec=[gateway.ObjSpace, 'text']) + assert app_g is app_g2 + w_app_g = space.wrap(app_g) + assert self.space.eq_w(space.call_function(w_app_g, space.newtext("abc")), + space.newtext("abc")) + def test_caching_methods(self): class Base(gateway.W_Root): def f(self): From pypy.commits at gmail.com Tue Dec 6 06:45:14 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 03:45:14 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: python-dev confirmed the strange repr for recursive deques Message-ID: <5846a4ca.52301c0a.e7978.2f89@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5761:ea8c2e49fcfb Date: 2016-12-06 12:44 +0100 http://bitbucket.org/pypy/extradoc/changeset/ea8c2e49fcfb/ Log: python-dev confirmed the strange repr for recursive deques diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -184,9 +184,6 @@ # 'frame'; and we've seen that it is non-empty # as long as we don't read frame.f_locals. -* _collectionsmodule.c: deque_repr uses "[...]" as repr if recursion is - detected. I'd suggest that "deque(...)" is clearer---it's not a list. - * weak dicts (both kinds) and weak sets have an implementation of __len__ which doesn't give the "expected" result on PyPy, and in some cases on CPython too. I'm not sure what is expected and what is not. From pypy.commits at gmail.com Tue Dec 6 07:18:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 04:18:37 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Moved to bugs.python.org Message-ID: <5846ac9d.6602c20a.9d1f2.b397@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5762:69d59aa417d0 Date: 2016-12-06 13:18 +0100 http://bitbucket.org/pypy/extradoc/changeset/69d59aa417d0/ Log: Moved to bugs.python.org diff --git a/planning/py3.5/cpython-crashers.rst b/planning/py3.5/cpython-crashers.rst --- a/planning/py3.5/cpython-crashers.rst +++ b/planning/py3.5/cpython-crashers.rst @@ -1,259 +1,11 @@ CPython crashers ================ -This document ways to crash CPython 3.5, or get completely unexpected -and undocumented results, or leak memory, etc. +This used to document ways to crash CPython 3.5, or get completely +unexpected and undocumented results, or leak memory, etc. +It has since been moved to these three main issues: -* _PyGen_Finalize() should not fail with an exception - http://bugs.python.org/issue27811 - -* PyFrameObject.f_gen can be left pointing to a dangling generator - http://bugs.python.org/issue27812 - -* os.scandir() returns an iterable object that should not be used - from multiple threads. Doing so can e.g. cause one thread to - close the dirp while another thread is still using it. This is - likely to crash. Similarly, the test for (!iterator->dirp) at - the start of ScandirIterator_iternext() is only done once even - if the following loop runs two or three times because of "." or - ".." entries. - -* os.scandir() direntry objects should not have stat() called from two - threads concurrently. It will make two stat objects and leak one of - them. - -* _PyGen_yf() checks the opcode at [f_lasti + 1], which is the next - opcode that will run when we resume the generator: either it is the - opcode following the YIELD, or it is exactly YIELD_FROM. It is not - possible at the moment to write Python code that compiles to a YIELD - immediately followed by YIELD_FROM, so by chance the two cases are - correctly distinguished. *However,* the discussion so far assumes - that the generator is not currently running. If it is (which probably - doesn't occur in reasonable Python code but can be constructed - manually), then this checks for example the byte/word that describes - the argument of the currently running opcode. If we're very unlucky - this byte has the value 72, which is YIELD_FROM. Total nonsense and - crashes follow. - -* faulthandler: register(): the signal handler, faulthandler_user(), - changes errno in faulthandler_dump_traceback() but fails to restore it - if chain=False. This can rarely cause random nonsense in the main - program. - -* setting f_lineno didn't evolve when the rest of the bytecodes evolved, - which means it is not safe any more:: - - import sys - - def f(): - try: - raise ValueError # line 5 - except ValueError: - print(42) # line 7 - - def my_trace(*args): - print(args) - if args[1] == 'line': - f = args[0] - if f.f_lineno == 5: - f.f_lineno = 7 - return my_trace - - sys.settrace(my_trace) - f() - sys.settrace(None) - -* I didn't try, but it seems that typeobject.c:mro_internal() is prone - to a refcount crash. It does this:: - - old_mro = type->tp_mro; - ...mro_invoke()... /* might cause reentrance */ - type->tp_mro = new_mro; - ... - Py_XDECREF(old_mro); - - This last XDECREF drops the reference held by the previous value of - ``type->tp_mro`` after we changed it. But ``type->tp_mro`` might have - changed because of mro_invoke(), which calls pure Python code. If it - did change, then old_mro is no longer the old value of - ``type->tp_mro``. The wrong object gets decrefed. - - -Non-segfaulting bugs --------------------- - -* on modern Linux: if the first call in the process to - socketpair() ends in a EINVAL, then cpython will (possibly wrongly) - assume it was caused by SOCK_CLOEXEC and not use SOCK_CLOEXEC at all - in the future - -* fcntl.ioctl(x, y, buf, mutate_flag): mutate_flag is there for the case - of buf being a read-write buffer, which is then mutated in-place. - But if we call with a read-only buffer, mutate_flag is ignored (instead - of rejecting a True value)---ioctl(x, y, "foo", True) will not actually - mutate the string "foo", but the True is completely ignored. (I think - this is a bug introduced during the Argument Clinic refactoring.) - -* re.sub(b'y', bytearray(b'a'), bytearray(b'xyz')) -> b'xaz' - re.sub(b'y', bytearray(b'\\n'), bytearray(b'xyz')) -> internal TypeError - -* if you have a stack of generators where each is in 'yield from' from - the next one, and you call '.next()' on the outermost, then it enters - and leaves all intermediate frames. This is costly but may be - required to get the sys.settrace()/setprofile() hooks called. - However, if you call '.throw()' or '.close()' instead, then it uses a - much more efficient way to go from the outermost to the innermost - frame---as a result, the enter/leave of the intermediate frames is not - invoked. This can confuse coverage tools and profilers. For example, - in a stack ``f1()->f2()->f3()``, vmprof would show f3() as usually - called via f2() from f1() but occasionally called directly from f1(). - -* ceval.c: GET_AITER: calls _PyCoro_GetAwaitableIter(), which might - get an exception from calling the user-defined __await__() or checking - what it returns; such an exception is completely eaten. - -* this is an old issue that was forgotten twice on the - issue tracker: ``class C: __new__=int.__new__`` and ``class C(int): - __new__=object.__new__`` can each be instantiated, even though they - shouldn't. This is because ``__new__`` is completely ignored if it is - set to any built-in function that uses ``tp_new_wrapper`` as its C code - (many of the built-in types' ``__new__`` are like that). - http://bugs.python.org/issue1694663#msg75957, - http://bugs.python.org/issue5322#msg84112. In (at least) CPython 3.5, - a few classes work only thanks to abuse of this bug: for example, - ``io.UnsupportedOperation.__new__(io.UnsupportedOperation)`` doesn't - work, but that was not noticed because ``io.UnsupportedOperation()`` - mistakenly works. - -* this program fails the check for no sys.exc_info(), even though at - the point this assert runs (called from the <== line) we are not in - any except/finally block. This is a generalization of - test_exceptions:test_generator_doesnt_retain_old_exc:: - - import sys - - def g(): - try: - raise ValueError - except ValueError: - yield 1 - assert sys.exc_info() == (None, None, None) - yield 2 - - gen = g() - - try: - raise IndexError - except IndexError: - assert next(gen) is 1 - assert next(gen) is 2 # <== - -* frame.clear() does not clear f_locals, unlike what a test says - (Lib/test/test_frame.py):: - - def test_locals_clear_locals(self): - # Test f_locals before and after clear() (to exercise caching) - f, outer, inner = self.make_frames() - outer.f_locals - inner.f_locals - outer.clear() - inner.clear() - self.assertEqual(outer.f_locals, {}) - self.assertEqual(inner.f_locals, {}) - - This test passes, but the C-level PyFrameObject has got a strong - reference to f_locals, which is only updated (to be empty) if the - Python code tries to read this attribute. In the normal case, - code that calls clear() but doesn't read f_locals afterwards will - still leak everything contained in the C-level f_locals field. This - can be shown by this failing test:: - - import sys - - def g(): - x = 42 - return sys._getframe() - - frame = g() - d = frame.f_locals - frame.clear() - print(d) - assert d == {} # fails! but 'assert d is frame.f_locals' passes, - # which shows that this dict is kept alive by - # 'frame'; and we've seen that it is non-empty - # as long as we don't read frame.f_locals. - -* weak dicts (both kinds) and weak sets have an implementation of - __len__ which doesn't give the "expected" result on PyPy, and in some - cases on CPython too. I'm not sure what is expected and what is not. - Here is an example on CPython 3.5.2+ (using a thread to run the weakref - callbacks only, not to explicitly inspect or modify 'd'):: - - import weakref, _thread - from queue import Queue - - queue = Queue() - def subthread(queue): - while True: - queue.get() - _thread.start_new_thread(subthread, (queue,)) - - class X: - pass - d = weakref.WeakValueDictionary() - while True: - x = X() - d[52] = x - queue.put(x) - del x - while list(d) != []: - pass - assert len(d) == 0 # we've checked that list(d)==[], but this may fail - - On CPython I've seen the assert fail only after editing the function - WeakValueDictionary.__init__.remove() to add ``time.sleep(0.01)`` as - the first line. Otherwise I guess the timings happen to make that test - pass. - -* CPython 3.5.2: this ``nonlocal`` seems not to have a reasonable - effect (note that if we use a different name instead of ``__class__``, - this example correctly complain that there is no binding in the outer - scope of ``Y``):: - - class Y: - class X: - nonlocal __class__ - __class__ = 42 - print(locals()['__class__']) # 42 - print(__class__) # but this is a NameError - -* Follow-up on issue #25388: running ``python x.py`` if x.py contains - the following bytes... - - * ``b"#\xfd\n"`` => we get a SyntaxError: Non-UTF-8 code - * ``b"# coding: utf-8\n#\xfd\n"`` => we get no error! - - -Other issues of "dubious IMHO" status -------------------------------------- - -* argument clinic turns the "bool" specifier into - PyObject_IsTrue(), accepting any argument whatsoever. This can easily - get very confusing for the user, e.g. after messing up the number of - arguments. For example: os.symlink("/path1", "/path2", "/path3") - doesn't fail, it just considers the 3rd argument as some true value. - -* hash({}.values()) works (but hash({}.keys()) correctly gives - TypeError). That's a bit confusing and, as far as I can tell, always - pointless. Also, related: d.keys()==d.keys() but - d.values()!=d.values(). - -* if you write ``from .a import b`` inside the Python prompt, or in - a module not in any package, then you get a SystemError(!) with an - error message that is unlikely to help newcomers. - -* pep 475: unclear why 'os.fchmod(fd)' retries automatically when - it gets EINTR but the otherwise-equivalent 'os.chmod(fd)' does not. - (The documentation says they are fully equivalent, so someone is - wrong.) +http://bugs.python.org/issue28883 +http://bugs.python.org/issue28884 +http://bugs.python.org/issue28885 From pypy.commits at gmail.com Tue Dec 6 07:35:26 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 04:35:26 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Some more checks that comments contain valid utf-8 Message-ID: <5846b08e.0a22c20a.a3d0.b570@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88901:77d31587155e Date: 2016-12-06 13:34 +0100 http://bitbucket.org/pypy/pypy/changeset/77d31587155e/ Log: Some more checks that comments contain valid utf-8 diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py --- a/pypy/interpreter/pyparser/pytokenizer.py +++ b/pypy/interpreter/pyparser/pytokenizer.py @@ -44,9 +44,21 @@ return None +def verify_utf8(token): + for c in token: + if ord(c) >= 0x80: + break + else: + return True + try: + u = token.decode('utf-8') + except UnicodeDecodeError: + return False + return True + def verify_identifier(token): for c in token: - if ord(c) > 0x80: + if ord(c) >= 0x80: break else: return True @@ -159,8 +171,14 @@ pos = pos + 1 if pos == max: break - if line[pos] in '#\r\n': - # skip comments or blank lines + if line[pos] in '\r\n': + # skip blank lines + continue + if line[pos] == '#': + # skip full-line comment, but still check that it is valid utf-8 + if not verify_utf8(line): + raise TokenError("Non-UTF-8 code in comment", + line, lnum, pos, token_list) continue if column == indents[-1]: @@ -227,7 +245,10 @@ token_list.append(tok) last_comment = '' elif initial == '#': - # skip comment + # skip comment, but still check that it is valid utf-8 + if not verify_utf8(token): + raise TokenError("Non-UTF-8 code in comment", + line, lnum, start, token_list) last_comment = token elif token in triple_quoted: endDFA = endDFAs[token] diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -954,6 +954,27 @@ else: assert False, "Expected SyntaxError" + def test_invalid_utf8_in_comments_or_strings(self): + import sys + compile(b"# coding: latin1\n#\xfd\n", "dummy", "exec") + raises(SyntaxError, compile, b"# coding: utf-8\n'\xfd'\n", + "dummy", "exec") #1 + raises(SyntaxError, compile, b'# coding: utf-8\nx=5\nb"\xfd"\n', + "dummy", "exec") #2 + # the following example still fails on CPython 3.5.2, skip if -A + if '__pypy__' in sys.builtin_module_names: + raises(SyntaxError, compile, b"# coding: utf-8\n#\xfd\n", + "dummy", "exec") #3 + + def test_cpython_issues_24022_25388(self): + from _ast import PyCF_ACCEPT_NULL_BYTES + raises(SyntaxError, compile, b'0000\x00\n00000000000\n\x00\n\x9e\n', + "dummy", "exec", PyCF_ACCEPT_NULL_BYTES) + raises(SyntaxError, compile, b"#\x00\n#\xfd\n", "dummy", "exec", + PyCF_ACCEPT_NULL_BYTES) + raises(SyntaxError, compile, b"#\x00\nx=5#\xfd\n", "dummy", "exec", + PyCF_ACCEPT_NULL_BYTES) + def test_dict_and_set_literal_order(self): x = 1 l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) From pypy.commits at gmail.com Tue Dec 6 09:27:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Dec 2016 06:27:57 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: start replacing str in unwrap_spec with 'text' or 'bytes' Message-ID: <5846caed.e7b1c20a.15edc.df9f@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88902:52a246e29cf5 Date: 2016-12-06 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/52a246e29cf5/ Log: start replacing str in unwrap_spec with 'text' or 'bytes' diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -364,9 +364,9 @@ return w_result @unwrap_spec(argcount=int, nlocals=int, stacksize=int, flags=int, - codestring=str, - filename=str, name=str, firstlineno=int, - lnotab=str, magic=int) + codestring='bytes', + filename='text', name='text', firstlineno=int, + lnotab='bytes', magic=int) def descr_code__new__(space, w_subtype, argcount, nlocals, stacksize, flags, codestring, w_constants, w_names, diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -8,7 +8,7 @@ from pypy.interpreter.gateway import unwrap_spec - at unwrap_spec(filename=str, mode=str, flags=int, dont_inherit=int) + at unwrap_spec(filename='text', mode='text', flags=int, dont_inherit=int) def compile(space, w_source, filename, mode, flags=0, dont_inherit=0): """Compile the source string (a Python module, statement or expression) into a code object that can be executed by the exec statement or eval(). diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -103,7 +103,7 @@ return w_result return None - @unwrap_spec(name=str) + @unwrap_spec(name='text') def descr_getattribute(self, space, name): if name and name[0] == "_": if name == "__dict__": @@ -351,7 +351,7 @@ else: return None - @unwrap_spec(name=str) + @unwrap_spec(name='text') def descr_getattribute(self, space, name): if len(name) >= 8 and name[0] == '_': if name == "__dict__": diff --git a/pypy/module/__pypy__/interp_debug.py b/pypy/module/__pypy__/interp_debug.py --- a/pypy/module/__pypy__/interp_debug.py +++ b/pypy/module/__pypy__/interp_debug.py @@ -3,7 +3,7 @@ @jit.dont_look_inside - at unwrap_spec(category=str) + at unwrap_spec(category='text') def debug_start(space, category): debug.debug_start(category) @@ -13,12 +13,12 @@ debug.debug_print(' '.join(parts)) @jit.dont_look_inside - at unwrap_spec(category=str) + at unwrap_spec(category='text') def debug_stop(space, category): debug.debug_stop(category) - at unwrap_spec(category=str) + at unwrap_spec(category='text') def debug_print_once(space, category, args_w): debug_start(space, category) debug_print(space, args_w) diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec - at unwrap_spec(type=str) + at unwrap_spec(type='text') def newdict(space, type): """ newdict(type) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -22,7 +22,7 @@ attach_gdb() - at unwrap_spec(name=str) + at unwrap_spec(name='text') def method_cache_counter(space, name): """Return a tuple (method_cache_hits, method_cache_misses) for calls to methods with the name.""" @@ -41,7 +41,7 @@ cache.misses = {} cache.hits = {} - at unwrap_spec(name=str) + at unwrap_spec(name='text') def mapdict_cache_counter(space, name): """Return a tuple (index_cache_hits, index_cache_misses) for lookups in the mapdict cache with the given attribute name.""" @@ -75,7 +75,7 @@ operr = space.getexecutioncontext().sys_exc_info(for_hidden=True) return space.w_None if operr is None else operr.get_w_traceback(space) - at unwrap_spec(meth=str) + at unwrap_spec(meth='text') def lookup_special(space, w_obj, meth): """Lookup up a special method on an object.""" if space.is_oldstyle_instance(w_obj): @@ -163,7 +163,7 @@ else: cache._code_hook = w_callable - at unwrap_spec(string=str, byteorder=str, signed=int) + at unwrap_spec(string='bytes', byteorder='text', signed=int) def decode_long(space, string, byteorder='little', signed=1): from rpython.rlib.rbigint import rbigint, InvalidEndiannessError try: diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -121,7 +121,7 @@ raise oefmt(space.w_TypeError, "argument must be callable") - at unwrap_spec(encoding=str) + at unwrap_spec(encoding='text') def lookup_codec(space, encoding): """lookup(encoding) -> (encoder, decoder, stream_reader, stream_writer) Looks up a codec tuple in the Python codec registry and returns @@ -278,7 +278,7 @@ globals()[name]).spacebind(space) - at unwrap_spec(errors=str) + at unwrap_spec(errors='text') def lookup_error(space, errors): """lookup_error(errors) -> handler @@ -295,7 +295,7 @@ return w_err_handler - at unwrap_spec(errors=str) + at unwrap_spec(errors='text') def encode(space, w_obj, w_encoding=None, errors='strict'): """encode(obj, [encoding[,errors]]) -> object @@ -324,7 +324,7 @@ s = space.getarg_w('t#', w_data) return space.newtuple([space.newbytes(s), space.newint(len(s))]) - at unwrap_spec(errors=str) + at unwrap_spec(errors='text') def decode(space, w_obj, w_encoding=None, errors='strict'): """decode(obj, [encoding[,errors]]) -> object @@ -349,7 +349,7 @@ else: assert 0, "XXX, what to do here?" - at unwrap_spec(errors=str) + at unwrap_spec(errors='text') def register_error(space, errors, w_handler): """register_error(errors, handler) @@ -685,7 +685,7 @@ # support for the "string escape" codec # This is a bytes-to bytes transformation - at unwrap_spec(data=str, errors='str_or_None') + at unwrap_spec(data='bytes', errors='str_or_None') def escape_encode(space, data, errors='strict'): from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, quote="'") @@ -695,7 +695,7 @@ w_result = space.newbytes(result[start:end]) return space.newtuple([w_result, space.newint(len(data))]) - at unwrap_spec(data=str, errors='str_or_None') + at unwrap_spec(data='bytes', errors='str_or_None') def escape_decode(space, data, errors='strict'): from pypy.interpreter.pyparser.parsestring import PyString_DecodeEscape result = PyString_DecodeEscape(space, data, errors, None) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -132,7 +132,7 @@ # The 'direct_' methods assume that the caller already acquired the # file lock. They don't convert StreamErrors to OperationErrors, too. - @unwrap_spec(mode=str, buffering=int) + @unwrap_spec(mode='text', buffering=int) def direct___init__(self, w_name, mode='r', buffering=-1): self.direct_close() self.w_name = w_name @@ -550,7 +550,7 @@ W_File.__init__(file, space) return file - at unwrap_spec(fd=int, mode=str, buffering=int) + at unwrap_spec(fd=int, mode='text', buffering=int) def descr_file_fdopen(space, w_subtype, fd, mode='r', buffering=-1): file = space.allocate_instance(W_File, w_subtype) W_File.__init__(file, space) diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -105,6 +105,7 @@ for name, argtypes in streamio.STREAM_METHODS.iteritems(): numargs = len(argtypes) + argtypes = [typ if typ is not str else 'bytes' for typ in argtypes] args = ", ".join(["v%s" % i for i in range(numargs)]) exec py.code.Source(""" def %(name)s(self, space, %(args)s): diff --git a/pypy/module/_io/interp_fileio.py b/pypy/module/_io/interp_fileio.py --- a/pypy/module/_io/interp_fileio.py +++ b/pypy/module/_io/interp_fileio.py @@ -128,7 +128,7 @@ W_FileIO.__init__(self, space) return self - @unwrap_spec(mode=str, closefd=int) + @unwrap_spec(mode='text', closefd=int) def descr_init(self, space, w_name, mode='r', closefd=True): if space.isinstance_w(w_name, space.w_float): raise oefmt(space.w_TypeError, diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -40,7 +40,7 @@ DEFAULT_BUFFER_SIZE = 8 * 1024 - at unwrap_spec(mode=str, buffering=int, + at unwrap_spec(mode='text', buffering=int, encoding="str_or_None", errors="str_or_None", newline="str_or_None", closefd=bool) def open(space, w_file, mode="r", buffering=-1, encoding=None, errors=None, diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -73,7 +73,7 @@ new_iter = W_XRangeIterator(space, current, remaining, step) return new_iter - at unwrap_spec(identifier=str) + at unwrap_spec(identifier='text') def builtin_code(space, identifier): from pypy.interpreter import gateway try: @@ -82,7 +82,7 @@ raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) - at unwrap_spec(identifier=str) + at unwrap_spec(identifier='text') def builtin_function(space, identifier): from pypy.interpreter import function try: diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -20,7 +20,7 @@ raise converted_error(space, e) return space.newtext(res) - at unwrap_spec(host=str) + at unwrap_spec(host='text') def gethostbyname(space, host): """gethostbyname(host) -> address @@ -53,7 +53,7 @@ raise converted_error(space, e) return common_wrapgethost(space, res) - at unwrap_spec(host=str) + at unwrap_spec(host='text') def gethostbyaddr(space, host): """gethostbyaddr(host) -> (name, aliaslist, addresslist) @@ -66,7 +66,7 @@ raise converted_error(space, e) return common_wrapgethost(space, res) - at unwrap_spec(name=str, w_proto = WrappedDefault(None)) + at unwrap_spec(name='text', w_proto = WrappedDefault(None)) def getservbyname(space, name, w_proto): """getservbyname(servicename[, protocolname]) -> integer @@ -106,7 +106,7 @@ raise converted_error(space, e) return space.newtext(service) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def getprotobyname(space, name): """getprotobyname(name) -> integer @@ -198,7 +198,7 @@ """ return space.newint(rsocket.htonl(x)) - at unwrap_spec(ip=str) + at unwrap_spec(ip='text') def inet_aton(space, ip): """inet_aton(string) -> packed 32-bit IP representation @@ -211,7 +211,7 @@ raise converted_error(space, e) return space.newbytes(buf) - at unwrap_spec(packed=str) + at unwrap_spec(packed='text') def inet_ntoa(space, packed): """inet_ntoa(packed_ip) -> ip_address_string @@ -223,7 +223,7 @@ raise converted_error(space, e) return space.newtext(ip) - at unwrap_spec(family=int, ip=str) + at unwrap_spec(family=int, ip='text') def inet_pton(space, family, ip): """inet_pton(family, ip) -> packed IP address string @@ -236,7 +236,7 @@ raise converted_error(space, e) return space.newbytes(buf) - at unwrap_spec(family=int, packed=str) + at unwrap_spec(family=int, packed='bufferstr') def inet_ntop(space, family, packed): """inet_ntop(family, packed_ip) -> string formatted IP address diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -226,7 +226,7 @@ if HAVE_OPENSSL_RAND: # helper routines for seeding the SSL PRNG - @unwrap_spec(string=str, entropy=float) + @unwrap_spec(string='bytes', entropy=float) def RAND_add(space, string, entropy): """RAND_add(string, entropy) @@ -247,7 +247,7 @@ return space.newint(res) if HAVE_OPENSSL_RAND_EGD: - @unwrap_spec(path=str) + @unwrap_spec(path='text') def RAND_egd(space, path): """RAND_egd(path) -> bytes @@ -263,7 +263,7 @@ return space.newint(bytes) else: # Dummy func for platforms missing RAND_egd(). Most likely LibreSSL. - @unwrap_spec(path=str) + @unwrap_spec(path='text') def RAND_egd(space, path): raise ssl_error(space, "RAND_egd unavailable") @@ -1150,7 +1150,7 @@ return getattr(space.fromcache(Cache), name) - at unwrap_spec(filename=str) + at unwrap_spec(filename='text') def _test_decode_cert(space, filename): cert = libssl_BIO_new(libssl_BIO_s_file()) if not cert: @@ -1334,7 +1334,7 @@ def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None): return _SSLSocket.descr_new(space, self, w_sock, server_side, w_server_hostname, w_ssl_sock) - @unwrap_spec(cipherlist=str) + @unwrap_spec(cipherlist='text') def descr_set_ciphers(self, space, cipherlist): ret = libssl_SSL_CTX_set_cipher_list(self.ctx, cipherlist) if ret == 0: @@ -1499,7 +1499,7 @@ libssl_SSL_CTX_set_default_passwd_cb_userdata( self.ctx, None) - @unwrap_spec(filepath=str) + @unwrap_spec(filepath='text') def load_dh_params_w(self, space, filepath): bio = libssl_BIO_new_file(filepath, "r") if not bio: @@ -1688,7 +1688,7 @@ rlist.append(_decode_certificate(space, cert)) return space.newlist(rlist) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def set_ecdh_curve_w(self, space, name): nid = libssl_OBJ_sn2nid(name) if nid == 0: @@ -1767,7 +1767,7 @@ return space.newtuple([space.newint(nid), w_sn, w_ln, w_buf]) - at unwrap_spec(txt=str, name=bool) + at unwrap_spec(txt='bytes', name=bool) def txt2obj(space, txt, name=False): obj = libssl_OBJ_txt2obj(txt, not name) if not obj: diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -132,7 +132,7 @@ return _run_compiled_module(space, w_modulename, filename, w_file, w_mod, check_afterwards=True) - at unwrap_spec(filename=str) + at unwrap_spec(filename='text') def load_dynamic(space, w_modulename, filename, w_file=None): if not importing.has_so_extension(space): raise oefmt(space.w_ImportError, "Not implemented") diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1017,7 +1017,7 @@ except OSError as e: raise wrap_oserror(space, e) - at unwrap_spec(username=str, gid=c_gid_t) + at unwrap_spec(username='text', gid=c_gid_t) def initgroups(space, username, gid): """ initgroups(username, gid) -> None diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -32,7 +32,7 @@ return fmtiter.totalsize - at unwrap_spec(format=str) + at unwrap_spec(format='text') def calcsize(space, format): return space.newint(_calcsize(space, format)) @@ -52,13 +52,13 @@ return fmtiter.result.build() - at unwrap_spec(format=str) + at unwrap_spec(format='text') def pack(space, format, args_w): return space.newbytes(_pack(space, format, args_w)) # XXX inefficient - at unwrap_spec(format=str, offset=int) + at unwrap_spec(format='text', offset=int) def pack_into(space, format, w_buffer, offset, args_w): res = _pack(space, format, args_w) buf = space.getarg_w('w*', w_buffer) @@ -83,13 +83,13 @@ return space.newtuple(fmtiter.result_w[:]) - at unwrap_spec(format=str) + at unwrap_spec(format='text') def unpack(space, format, w_str): buf = space.getarg_w('s*', w_str) return _unpack(space, format, buf) - at unwrap_spec(format=str, offset=int) + at unwrap_spec(format='text', offset=int) def unpack_from(space, format, w_buffer, offset=0): size = _calcsize(space, format) buf = space.getarg_w('z*', w_buffer) @@ -112,7 +112,7 @@ self.format = format self.size = _calcsize(space, format) - @unwrap_spec(format=str) + @unwrap_spec(format='text') def descr__new__(space, w_subtype, format): self = space.allocate_instance(W_Struct, w_subtype) W_Struct.__init__(self, space, format) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -224,7 +224,7 @@ return w_obj @staticmethod - @unwrap_spec(kind=str) + @unwrap_spec(kind='text') def descr___getformat__(space, w_cls, kind): if kind == "float": return space.newtext(_float_format) @@ -233,7 +233,7 @@ raise oefmt(space.w_ValueError, "only float and double are valid") @staticmethod - @unwrap_spec(s=str) + @unwrap_spec(s='text') def descr_fromhex(space, w_cls, s): length = len(s) i = 0 From pypy.commits at gmail.com Tue Dec 6 09:29:25 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 06:29:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Give more precise error messages---still different from CPython, but well, too bad I suppose Message-ID: <5846cb45.c5311c0a.948fd.6808@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88903:ec42187d7005 Date: 2016-12-06 15:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ec42187d7005/ Log: Give more precise error messages---still different from CPython, but well, too bad I suppose diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -32,3 +32,4 @@ PyCF_ONLY_AST = 0x0400 PyCF_IGNORE_COOKIE = 0x0800 PyCF_ACCEPT_NULL_BYTES = 0x10000000 # PyPy only, for compile() +PyCF_FOUND_ENCODING = 0x20000000 # PyPy only, for pytokenizer diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -108,6 +108,7 @@ tree is handled here. """ # Detect source encoding. + explicit_encoding = False enc = None if compile_info.flags & consts.PyCF_SOURCE_IS_UTF8: enc = 'utf-8' @@ -119,12 +120,14 @@ enc = 'utf-8' # If an encoding is explicitly given check that it is utf-8. decl_enc = _check_for_encoding(bytessrc) + explicit_encoding = (decl_enc is not None) if decl_enc and decl_enc != "utf-8": raise error.SyntaxError("UTF-8 BOM with %s coding cookie" % decl_enc, filename=compile_info.filename) textsrc = bytessrc else: enc = _normalize_encoding(_check_for_encoding(bytessrc)) + explicit_encoding = (enc is not None) if enc is None: enc = 'utf-8' try: @@ -145,6 +148,8 @@ raise flags = compile_info.flags + if explicit_encoding: + flags |= consts.PyCF_FOUND_ENCODING # The tokenizer is very picky about how it wants its input. source_lines = textsrc.splitlines(True) diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py --- a/pypy/interpreter/pyparser/pytokenizer.py +++ b/pypy/interpreter/pyparser/pytokenizer.py @@ -56,16 +56,27 @@ return False return True +def bad_utf8(location_msg, line, lnum, pos, token_list, flags): + msg = 'Non-UTF-8 code in %s' % location_msg + if not (flags & consts.PyCF_FOUND_ENCODING): + # this extra part of the message is added only if we found no + # explicit encoding + msg += (' but no encoding declared; see ' + 'http://python.org/dev/peps/pep-0263/ for details') + return TokenError(msg, line, lnum, pos, token_list) + + def verify_identifier(token): + # 1=ok; 0=not an identifier; -1=bad utf-8 for c in token: if ord(c) >= 0x80: break else: - return True + return 1 try: u = token.decode('utf-8') except UnicodeDecodeError: - return False + return -1 from pypy.objspace.std.unicodeobject import _isidentifier return _isidentifier(u) @@ -177,8 +188,8 @@ if line[pos] == '#': # skip full-line comment, but still check that it is valid utf-8 if not verify_utf8(line): - raise TokenError("Non-UTF-8 code in comment", - line, lnum, pos, token_list) + raise bad_utf8("comment", + line, lnum, pos, token_list, flags) continue if column == indents[-1]: @@ -247,8 +258,8 @@ elif initial == '#': # skip comment, but still check that it is valid utf-8 if not verify_utf8(token): - raise TokenError("Non-UTF-8 code in comment", - line, lnum, start, token_list) + raise bad_utf8("comment", + line, lnum, start, token_list, flags) last_comment = token elif token in triple_quoted: endDFA = endDFAs[token] @@ -280,7 +291,13 @@ last_comment = '' elif (initial in namechars or # ordinary name ord(initial) >= 0x80): # unicode identifier - if not verify_identifier(token): + valid = verify_identifier(token) + if valid <= 0: + if valid == -1: + raise bad_utf8("identifier", line, lnum, start + 1, + token_list, flags) + # valid utf-8, but it gives a unicode char that cannot + # be used in identifiers raise TokenError("invalid character in identifier", line, lnum, start + 1, token_list) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -954,6 +954,15 @@ else: assert False, "Expected SyntaxError" + def test_invalid_utf8(self): + e = raises(SyntaxError, compile, b'\x80', "dummy", "exec") + assert str(e.value).startswith('Non-UTF-8 code') + assert 'but no encoding declared' in str(e.value) + e = raises(SyntaxError, compile, b'# coding: utf-8\n\x80', + "dummy", "exec") + assert str(e.value).startswith('Non-UTF-8 code') + assert 'but no encoding declared' not in str(e.value) + def test_invalid_utf8_in_comments_or_strings(self): import sys compile(b"# coding: latin1\n#\xfd\n", "dummy", "exec") From pypy.commits at gmail.com Tue Dec 6 09:37:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 06:37:39 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix for a few tests in lib-python/3/test/test_coroutine Message-ID: <5846cd33.849c1c0a.38347.686b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88904:92ee15da58e2 Date: 2016-12-06 15:37 +0100 http://bitbucket.org/pypy/pypy/changeset/92ee15da58e2/ Log: Fix for a few tests in lib-python/3/test/test_coroutine diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -4,16 +4,24 @@ try: import cpyext except ImportError: - raise ImportError("No module named '_testcapi'") + pass # no 'cpyext', but we still have to define e.g. awaitType +else: + import _pypy_testcapi + cfile = '_testcapimodule.c' + thisdir = os.path.dirname(__file__) + output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) -import _pypy_testcapi -cfile = '_testcapimodule.c' -thisdir = os.path.dirname(__file__) -output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) + try: + fp, filename, description = imp.find_module('_testcapi', + path=[output_dir]) + with fp: + imp.load_module('_testcapi', fp, filename, description) + except ImportError: + _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) -try: - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - with fp: - imp.load_module('_testcapi', fp, filename, description) -except ImportError: - _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) + +class awaitType: + def __init__(self, iterator): + self._iterator = iterator + def __await__(self): + return self._iterator From pypy.commits at gmail.com Tue Dec 6 09:40:23 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 06:40:23 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Skip reference counting checks (but still run the tests around them, to Message-ID: <5846cdd7.624fc20a.7231f.df07@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88905:50e74819bfe3 Date: 2016-12-06 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/50e74819bfe3/ Log: Skip reference counting checks (but still run the tests around them, to check that they don't crash) diff --git a/lib-python/3/test/test_coroutines.py b/lib-python/3/test/test_coroutines.py --- a/lib-python/3/test/test_coroutines.py +++ b/lib-python/3/test/test_coroutines.py @@ -8,6 +8,11 @@ import warnings from test import support +def _getrefcount(obj): + if hasattr(sys, 'getrefcount'): + return sys.getrefcount(obj) + return '' + class AsyncYieldFrom: def __init__(self, obj): @@ -1306,7 +1311,7 @@ def test_for_2(self): tup = (1, 2, 3) - refs_before = sys.getrefcount(tup) + refs_before = _getrefcount(tup) async def foo(): async for i in tup: @@ -1317,7 +1322,7 @@ run_async(foo()) - self.assertEqual(sys.getrefcount(tup), refs_before) + self.assertEqual(_getrefcount(tup), refs_before) def test_for_3(self): class I: @@ -1325,7 +1330,7 @@ return self aiter = I() - refs_before = sys.getrefcount(aiter) + refs_before = _getrefcount(aiter) async def foo(): async for i in aiter: @@ -1337,7 +1342,7 @@ run_async(foo()) - self.assertEqual(sys.getrefcount(aiter), refs_before) + self.assertEqual(_getrefcount(aiter), refs_before) def test_for_4(self): class I: @@ -1348,7 +1353,7 @@ return () aiter = I() - refs_before = sys.getrefcount(aiter) + refs_before = _getrefcount(aiter) async def foo(): async for i in aiter: @@ -1360,7 +1365,7 @@ run_async(foo()) - self.assertEqual(sys.getrefcount(aiter), refs_before) + self.assertEqual(_getrefcount(aiter), refs_before) def test_for_5(self): class I: @@ -1410,8 +1415,8 @@ manager = Manager() iterable = Iterable() - mrefs_before = sys.getrefcount(manager) - irefs_before = sys.getrefcount(iterable) + mrefs_before = _getrefcount(manager) + irefs_before = _getrefcount(iterable) async def main(): nonlocal I @@ -1428,8 +1433,8 @@ run_async(main()) self.assertEqual(I, 111011) - self.assertEqual(sys.getrefcount(manager), mrefs_before) - self.assertEqual(sys.getrefcount(iterable), irefs_before) + self.assertEqual(_getrefcount(manager), mrefs_before) + self.assertEqual(_getrefcount(iterable), irefs_before) ############## From pypy.commits at gmail.com Tue Dec 6 09:49:26 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 06:49:26 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fixes for test_coroutines Message-ID: <5846cff6.07941c0a.18ac8.6cff@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88906:f140ab6e4d2e Date: 2016-12-06 15:48 +0100 http://bitbucket.org/pypy/pypy/changeset/f140ab6e4d2e/ Log: fixes for test_coroutines diff --git a/lib-python/3/test/test_coroutines.py b/lib-python/3/test/test_coroutines.py --- a/lib-python/3/test/test_coroutines.py +++ b/lib-python/3/test/test_coroutines.py @@ -747,9 +747,12 @@ def test_corotype_1(self): ct = types.CoroutineType - self.assertIn('into coroutine', ct.send.__doc__) - self.assertIn('inside coroutine', ct.close.__doc__) - self.assertIn('in coroutine', ct.throw.__doc__) + self.assert_('into coroutine' in ct.send.__doc__ or + 'into generator/coroutine' in ct.send.__doc__) + self.assert_('inside coroutine' in ct.close.__doc__ or + 'inside generator/coroutine' in ct.close.__doc__) + self.assert_('in coroutine' in ct.throw.__doc__ or + 'in generator/coroutine' in ct.throw.__doc__) self.assertIn('of the coroutine', ct.__dict__['__name__'].__doc__) self.assertIn('of the coroutine', ct.__dict__['__qualname__'].__doc__) self.assertEqual(ct.__name__, 'coroutine') diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -298,7 +298,7 @@ raise oefmt(space.w_RuntimeError, "coroutine wrapper %R attempted " "to recursively wrap %R", - w_wrapper, w_gen) + w_wrapper, self) ec.in_coroutine_wrapper = True try: w_gen = space.call_function(w_wrapper, w_gen) diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -825,9 +825,11 @@ cr_code = interp_attrproperty_w('pycode', cls=Coroutine), cr_await = interp_attrproperty_w('w_yielded_from', cls=Coroutine), __name__ = GetSetProperty(Coroutine.descr__name__, - Coroutine.descr_set__name__), + Coroutine.descr_set__name__, + doc="name of the coroutine"), __qualname__ = GetSetProperty(Coroutine.descr__qualname__, - Coroutine.descr_set__qualname__), + Coroutine.descr_set__qualname__, + doc="qualified name of the coroutine"), __weakref__ = make_weakref_descr(Coroutine), ) assert not Coroutine.typedef.acceptable_as_base_class # no __new__ From pypy.commits at gmail.com Tue Dec 6 10:06:45 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 06 Dec 2016 07:06:45 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: more str unwrap_spec removed Message-ID: <5846d405.06891c0a.e1974.7658@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r88907:5016301ea5c3 Date: 2016-12-06 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/5016301ea5c3/ Log: more str unwrap_spec removed diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -7,6 +7,10 @@ def create_builder(name, strtype, builder_cls, newmethod): + if strtype is str: + unwrap = 'bytes' + else: + unwrap = unicode class W_Builder(W_Root): def __init__(self, space, size): if size < 0: @@ -23,12 +27,12 @@ def descr__new__(space, w_subtype, size=-1): return W_Builder(space, size) - @unwrap_spec(s=strtype) + @unwrap_spec(s=unwrap) def descr_append(self, space, s): self._check_done(space) self.builder.append(s) - @unwrap_spec(s=strtype, start=int, end=int) + @unwrap_spec(s=unwrap, start=int, end=int) def descr_append_slice(self, space, s, start, end): self._check_done(space) if not 0 <= start <= end <= len(s): diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -174,7 +174,7 @@ m1, s12, m2, s23, m3, w_x) - @unwrap_spec(module_name=str, _version=int, _types=str) + @unwrap_spec(module_name='text', _version=int, _types='text') def descr_init(self, module_name='?', _version=-1, _types='', w__globals=None, w__struct_unions=None, w__enums=None, w__typenames=None, w__includes=None): @@ -377,7 +377,7 @@ return w_cdata.with_gc(w_destructor) - @unwrap_spec(replace_with=str) + @unwrap_spec(replace_with='text') def descr_getctype(self, w_cdecl, replace_with=''): """\ Return a string giving the C type 'cdecl', which may be itself a @@ -614,7 +614,7 @@ lib.cdlopen_close() - @unwrap_spec(name=str) + @unwrap_spec(name='text') def descr_integer_const(self, name): """\ Get the value of an integer constant. diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -64,7 +64,7 @@ # ____________________________________________________________ - at unwrap_spec(w_ctype=ctypeobj.W_CType, replace_with=str) + at unwrap_spec(w_ctype=ctypeobj.W_CType, replace_with='text') def getcname(space, w_ctype, replace_with): p = w_ctype.name_position s = '%s%s%s' % (w_ctype.name[:p], replace_with, w_ctype.name[p:]) diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -38,7 +38,7 @@ space = self.space return space.newtext("" % self.name) - @unwrap_spec(w_ctype=W_CType, name=str) + @unwrap_spec(w_ctype=W_CType, name='text') def load_function(self, w_ctype, name): from pypy.module._cffi_backend import ctypefunc, ctypeptr, ctypevoid space = self.space @@ -61,7 +61,7 @@ name, self.name) return W_CData(space, rffi.cast(rffi.CCHARP, cdata), w_ctype) - @unwrap_spec(w_ctype=W_CType, name=str) + @unwrap_spec(w_ctype=W_CType, name='text') def read_variable(self, w_ctype, name): space = self.space try: @@ -72,7 +72,7 @@ name, self.name) return w_ctype.convert_to_object(rffi.cast(rffi.CCHARP, cdata)) - @unwrap_spec(w_ctype=W_CType, name=str) + @unwrap_spec(w_ctype=W_CType, name='text') def write_variable(self, w_ctype, name, w_value): space = self.space try: diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -132,7 +132,7 @@ eptypesize("int_fast64_t", 8, _WCTSigned) eptypesize("uint_fast64_t", 8, _WCTUnsign) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def new_primitive_type(space, name): return _new_primitive_type(space, name) @@ -259,11 +259,11 @@ # ____________________________________________________________ - at unwrap_spec(name=str) + at unwrap_spec(name='text') def new_struct_type(space, name): return ctypestruct.W_CTypeStruct(space, name) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def new_union_type(space, name): return ctypestruct.W_CTypeUnion(space, name) @@ -562,7 +562,7 @@ # ____________________________________________________________ - at unwrap_spec(name=str, w_basectype=ctypeobj.W_CType) + at unwrap_spec(name='text', w_basectype=ctypeobj.W_CType) def new_enum_type(space, name, w_enumerators, w_enumvalues, w_basectype): enumerators_w = space.fixedview(w_enumerators) enumvalues_w = space.fixedview(w_enumvalues) diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -12,7 +12,7 @@ """) # the following function is used e.g. in test_resource_warning - at unwrap_spec(regex=str, s=str) + at unwrap_spec(regex='text', s='text') def regex_search(space, regex, s): import re import textwrap diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -160,7 +160,7 @@ ) W_Hash.typedef.acceptable_as_base_class = False - at unwrap_spec(name=str, string='bufferstr') + at unwrap_spec(name='text', string='bufferstr') def new(space, name, string=''): w_hash = W_Hash(space, name) w_hash.update(space, string) @@ -181,7 +181,7 @@ HAS_FAST_PKCS5_PBKDF2_HMAC = ropenssl.PKCS5_PBKDF2_HMAC is not None if HAS_FAST_PKCS5_PBKDF2_HMAC: - @unwrap_spec(name=str, password=str, salt=str, rounds=int, + @unwrap_spec(name='text', password='bytes', salt='bytes', rounds=int, w_dklen=WrappedDefault(None)) def pbkdf2_hmac(space, name, password, salt, rounds, w_dklen): digest = ropenssl.EVP_get_digestbyname(name) diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py --- a/pypy/module/_locale/interp_locale.py +++ b/pypy/module/_locale/interp_locale.py @@ -148,7 +148,7 @@ _strxfrm = rlocale.external('strxfrm', [rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SIZE_T) - at unwrap_spec(s=str) + at unwrap_spec(s='text') def strxfrm(space, s): "string -> string. Returns a string that behaves for cmp locale-aware." n1 = len(s) + 1 @@ -193,7 +193,7 @@ if rlocale.HAVE_LIBINTL: _gettext = rlocale.external('gettext', [rffi.CCHARP], rffi.CCHARP) - @unwrap_spec(msg=str) + @unwrap_spec(msg='text') def gettext(space, msg): """gettext(msg) -> string Return translation of msg.""" @@ -205,7 +205,7 @@ _dgettext = rlocale.external('dgettext', [rffi.CCHARP, rffi.CCHARP], rffi.CCHARP) - @unwrap_spec(msg=str) + @unwrap_spec(msg='text') def dgettext(space, w_domain, msg): """dgettext(domain, msg) -> string Return translation of msg in domain.""" @@ -239,7 +239,7 @@ _dcgettext = rlocale.external('dcgettext', [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.CCHARP) - @unwrap_spec(msg=str, category=int) + @unwrap_spec(msg='text', category=int) def dcgettext(space, w_domain, msg, category): """dcgettext(domain, msg, category) -> string Return translation of msg in domain and category.""" @@ -301,7 +301,7 @@ rffi.CCHARP, save_err=rffi.RFFI_SAVE_ERRNO) - @unwrap_spec(domain=str) + @unwrap_spec(domain='text') def bindtextdomain(space, domain, w_dir): """bindtextdomain(domain, dir) -> string Bind the C library's domain to dir.""" @@ -332,7 +332,7 @@ [rffi.CCHARP, rffi.CCHARP], rffi.CCHARP) if rlocale.HAVE_BIND_TEXTDOMAIN_CODESET: - @unwrap_spec(domain=str) + @unwrap_spec(domain='text') def bind_textdomain_codeset(space, domain, w_codeset): """bind_textdomain_codeset(domain, codeset) -> string Bind the C library's domain to codeset.""" diff --git a/pypy/module/_minimal_curses/interp_curses.py b/pypy/module/_minimal_curses/interp_curses.py --- a/pypy/module/_minimal_curses/interp_curses.py +++ b/pypy/module/_minimal_curses/interp_curses.py @@ -75,7 +75,7 @@ except _curses.error as e: raise curses_error(e.args[0]) - at unwrap_spec(capname=str) + at unwrap_spec(capname='text') def tigetstr(space, capname): try: result = _curses_tigetstr(capname) @@ -85,7 +85,7 @@ raise convert_error(space, e) return space.newbytes(result) - at unwrap_spec(s=str) + at unwrap_spec(s='text') def tparm(space, s, args_w): args = [space.int_w(a) for a in args_w] try: diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -48,7 +48,7 @@ c_codecs.pypy_cjk_dec_free(self.decodebuf) self.decodebuf = lltype.nullptr(c_codecs.DECODEBUF_P.TO) - @unwrap_spec(object=str, final=bool) + @unwrap_spec(object='bytes', final=bool) def decode_w(self, object, final=False): space = self.space state = space.fromcache(CodecState) diff --git a/pypy/module/_multibytecodec/interp_multibytecodec.py b/pypy/module/_multibytecodec/interp_multibytecodec.py --- a/pypy/module/_multibytecodec/interp_multibytecodec.py +++ b/pypy/module/_multibytecodec/interp_multibytecodec.py @@ -11,7 +11,7 @@ self.name = name self.codec = codec - @unwrap_spec(input=str, errors="str_or_None") + @unwrap_spec(input='bytes', errors="str_or_None") def decode(self, space, input, errors=None): if errors is None: errors = 'strict' @@ -52,7 +52,7 @@ MultibyteCodec.typedef.acceptable_as_base_class = False - at unwrap_spec(name=str) + at unwrap_spec(name='text') def getcodec(space, name): try: codec = c_codecs.getcodec(name) diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -52,7 +52,7 @@ raise oefmt(space.w_TypeError, "function name must be a string or integer") else: - @unwrap_spec(name=str) + @unwrap_spec(name='text') def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): name = space.text_w(w_name) argtypes_w, argtypes, w_restype, restype = unpack_argtypes( @@ -287,7 +287,7 @@ restype = unwrap_ffitype(space, w_restype, allow_void=True) return argtypes_w, argtypes, w_restype, restype - at unwrap_spec(addr=r_uint, name=str, flags=int) + at unwrap_spec(addr=r_uint, name='text', flags=int) def descr_fromaddr(space, w_cls, addr, name, w_argtypes, w_restype, flags=libffi.FUNCFLAG_CDECL): argtypes_w, argtypes, w_restype, restype = unpack_argtypes(space, @@ -331,7 +331,7 @@ def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def getaddressindll(self, space, name): try: address_as_uint = rffi.cast(lltype.Unsigned, diff --git a/pypy/module/_rawffi/alt/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_rawffi/alt/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -21,7 +21,7 @@ def __repr__(self): return '' % (self.name, self.w_ffitype.name) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def descr_new_field(space, w_type, name, w_ffitype): w_ffitype = space.interp_w(W_FFIType, w_ffitype) return W_Field(name, w_ffitype) @@ -111,7 +111,7 @@ - at unwrap_spec(name=str) + at unwrap_spec(name='text') def descr_new_structdescr(space, w_type, name, w_fields=None): descr = W__StructDescr(name) if not space.is_none(w_fields): @@ -180,14 +180,14 @@ addr = rffi.cast(rffi.ULONG, self.rawmem) return space.newint(addr) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def getfield(self, space, name): w_ffitype, offset = self.structdescr.get_type_and_offset_for_field( space, name) field_getter = GetFieldConverter(space, self.rawmem, offset) return field_getter.do_and_wrap(w_ffitype) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def setfield(self, space, name, w_value): w_ffitype, offset = self.structdescr.get_type_and_offset_for_field( space, name) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -218,7 +218,7 @@ space.setitem(self.w_cache, w_key, w_funcptr) return w_funcptr - @unwrap_spec(name=str) + @unwrap_spec(name='text') def getaddressindll(self, space, name): try: address_as_uint = rffi.cast(lltype.Unsigned, @@ -562,7 +562,7 @@ W_FuncPtr.typedef.acceptable_as_base_class = False def _create_new_accessor(func_name, name): - @unwrap_spec(tp_letter=str) + @unwrap_spec(tp_letter='text') def accessor(space, tp_letter): if len(tp_letter) != 1: raise oefmt(space.w_ValueError, "Expecting string of length one") diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -204,12 +204,12 @@ def fromaddress(self, space, address): return W_StructureInstance(space, self, address) - @unwrap_spec(attr=str) + @unwrap_spec(attr='text') def descr_fieldoffset(self, space, attr): index = self.getindex(space, attr) return space.newint(self.ll_positions[index]) - @unwrap_spec(attr=str) + @unwrap_spec(attr='text') def descr_fieldsize(self, space, attr): index = self.getindex(space, attr) if self.ll_bitsizes and index < len(self.ll_bitsizes): @@ -351,7 +351,7 @@ addr = rffi.cast(lltype.Unsigned, self.ll_buffer) return space.newtext("<_rawffi struct %x>" % (addr,)) - @unwrap_spec(attr=str) + @unwrap_spec(attr='text') def getattr(self, space, attr): if not self.ll_buffer: raise segfault_exception(space, "accessing NULL pointer") @@ -359,7 +359,7 @@ _, tp, _ = self.shape.fields[i] return wrap_value(space, cast_pos, self, i, tp.itemcode) - @unwrap_spec(attr=str) + @unwrap_spec(attr='text') def setattr(self, space, attr, w_value): if not self.ll_buffer: raise segfault_exception(space, "accessing NULL pointer") @@ -367,7 +367,7 @@ _, tp, _ = self.shape.fields[i] unwrap_value(space, push_field, self, i, tp.itemcode, w_value) - @unwrap_spec(attr=str) + @unwrap_spec(attr='text') def descr_fieldaddress(self, space, attr): i = self.shape.getindex(space, attr) ptr = rffi.ptradd(self.ll_buffer, self.shape.ll_positions[i]) diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -40,7 +40,7 @@ space.newlist(aliases), space.newlist(address_list)]) - at unwrap_spec(host=str) + at unwrap_spec(host='text') def gethostbyname_ex(space, host): """gethostbyname_ex(host) -> (name, aliaslist, addresslist) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -16,7 +16,7 @@ from pypy.module._file.interp_file import W_File - at unwrap_spec(typecode=str) + at unwrap_spec(typecode='text') def w_array(space, w_cls, typecode, __args__): if len(__args__.arguments_w) > 1: raise oefmt(space.w_TypeError, "array() takes at most 2 arguments") diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -249,7 +249,7 @@ space = self.space raise oefmt(space.w_ValueError, "invalid mode: '%s'", mode) - @unwrap_spec(mode=str, buffering=int, compresslevel=int) + @unwrap_spec(mode='text', buffering=int, compresslevel=int) def direct_bz2__init__(self, w_name, mode='r', buffering=-1, compresslevel=9): self.direct_close() diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -28,7 +28,7 @@ def lt(self, a, b): return a.priority() < b.priority() - at unwrap_spec(name=str) + at unwrap_spec(name='text') def load_dictionary(space, name): try: cdll = capi.c_load_dictionary(name) @@ -63,11 +63,11 @@ state.w_nullptr = nullarr return state.w_nullptr - at unwrap_spec(name=str) + at unwrap_spec(name='text') def resolve_name(space, name): return space.newtext(capi.c_resolve_name(space, name)) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def scope_byname(space, name): true_name = capi.c_resolve_name(space, name) @@ -95,7 +95,7 @@ return None - at unwrap_spec(name=str) + at unwrap_spec(name='text') def template_byname(space, name): state = space.fromcache(State) try: @@ -873,9 +873,9 @@ W_CPPNamespace.typedef = TypeDef( 'CPPNamespace', get_method_names = interp2app(W_CPPNamespace.get_method_names), - get_overload = interp2app(W_CPPNamespace.get_overload, unwrap_spec=['self', str]), + get_overload = interp2app(W_CPPNamespace.get_overload, unwrap_spec=['self', 'text']), get_datamember_names = interp2app(W_CPPNamespace.get_datamember_names), - get_datamember = interp2app(W_CPPNamespace.get_datamember, unwrap_spec=['self', str]), + get_datamember = interp2app(W_CPPNamespace.get_datamember, unwrap_spec=['self', 'text']), is_namespace = interp2app(W_CPPNamespace.is_namespace), __dir__ = interp2app(W_CPPNamespace.ns__dir__), ) @@ -960,11 +960,11 @@ type_name = interp_attrproperty('name', W_CPPClass, wrapfn="newtext"), get_base_names = interp2app(W_CPPClass.get_base_names), get_method_names = interp2app(W_CPPClass.get_method_names), - get_overload = interp2app(W_CPPClass.get_overload, unwrap_spec=['self', str]), + get_overload = interp2app(W_CPPClass.get_overload, unwrap_spec=['self', 'text']), get_datamember_names = interp2app(W_CPPClass.get_datamember_names), - get_datamember = interp2app(W_CPPClass.get_datamember, unwrap_spec=['self', str]), + get_datamember = interp2app(W_CPPClass.get_datamember, unwrap_spec=['self', 'text']), is_namespace = interp2app(W_CPPClass.is_namespace), - dispatch = interp2app(W_CPPClass.dispatch, unwrap_spec=['self', str, str]) + dispatch = interp2app(W_CPPClass.dispatch, unwrap_spec=['self', 'text', 'text']) ) W_CPPClass.typedef.acceptable_as_base_class = False @@ -987,11 +987,11 @@ type_name = interp_attrproperty('name', W_CPPClass, wrapfn="newtext"), get_base_names = interp2app(W_ComplexCPPClass.get_base_names), get_method_names = interp2app(W_ComplexCPPClass.get_method_names), - get_overload = interp2app(W_ComplexCPPClass.get_overload, unwrap_spec=['self', str]), + get_overload = interp2app(W_ComplexCPPClass.get_overload, unwrap_spec=['self', 'text']), get_datamember_names = interp2app(W_ComplexCPPClass.get_datamember_names), - get_datamember = interp2app(W_ComplexCPPClass.get_datamember, unwrap_spec=['self', str]), + get_datamember = interp2app(W_ComplexCPPClass.get_datamember, unwrap_spec=['self', 'text']), is_namespace = interp2app(W_ComplexCPPClass.is_namespace), - dispatch = interp2app(W_CPPClass.dispatch, unwrap_spec=['self', str, str]) + dispatch = interp2app(W_CPPClass.dispatch, unwrap_spec=['self', 'text', 'text']) ) W_ComplexCPPClass.typedef.acceptable_as_base_class = False diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1505,7 +1505,7 @@ from pypy.module._cffi_backend import cffi1_module cffi1_module.load_cffi1_module(space, name, path, initptr) - at unwrap_spec(path=str, name=str) + at unwrap_spec(path='text', name='text') def load_extension_module(space, path, name): # note: this is used both to load CPython-API-style C extension # modules (cpyext) and to load CFFI-style extension modules diff --git a/pypy/module/crypt/interp_crypt.py b/pypy/module/crypt/interp_crypt.py --- a/pypy/module/crypt/interp_crypt.py +++ b/pypy/module/crypt/interp_crypt.py @@ -10,7 +10,7 @@ c_crypt = rffi.llexternal('crypt', [rffi.CCHARP, rffi.CCHARP], rffi.CCHARP, compilation_info=eci, releasegil=False) - at unwrap_spec(word=str, salt=str) + at unwrap_spec(word='text', salt='text') def crypt(space, word, salt): """word will usually be a user's password. salt is a 2-character string which will be used to select one of 4096 variations of DES. The characters diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py --- a/pypy/module/micronumpy/casting.py +++ b/pypy/module/micronumpy/casting.py @@ -119,7 +119,7 @@ return not all_scalars and max_array_kind >= max_scalar_kind - at unwrap_spec(casting=str) + at unwrap_spec(casting='text') def can_cast(space, w_from, w_totype, casting='safe'): try: target = as_dtype(space, w_totype, allow_None=False) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -519,7 +519,7 @@ return a - at unwrap_spec(s=str, count=int, sep=str, w_dtype=WrappedDefault(None)) + at unwrap_spec(s='text', count=int, sep='text', w_dtype=WrappedDefault(None)) def fromstring(space, s, w_dtype=None, count=-1, sep=''): dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -700,7 +700,7 @@ self.alignment = alignment self.flags = flags - @unwrap_spec(new_order=str) + @unwrap_spec(new_order='text') def descr_newbyteorder(self, space, new_order=NPY.SWAP): newendian = byteorder_converter(space, new_order) endian = self.byteorder diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -714,7 +714,7 @@ contig = self.implementation.astype(space, dtype, self.get_order()) return contig.argsort(space, w_axis) - @unwrap_spec(order=str, casting=str, subok=bool, copy=bool) + @unwrap_spec(order='text', casting='text', subok=bool, copy=bool) def descr_astype(self, space, w_dtype, order='K', casting='unsafe', subok=True, copy=True): cur_dtype = self.get_dtype() new_dtype = space.interp_w(descriptor.W_Dtype, space.call_function( @@ -857,7 +857,7 @@ raise oefmt(space.w_NotImplementedError, "getfield not implemented yet") - @unwrap_spec(new_order=str) + @unwrap_spec(new_order='text') def descr_newbyteorder(self, space, new_order=NPY.SWAP): return self.descr_view( space, self.get_dtype().descr_newbyteorder(space, new_order)) @@ -931,7 +931,7 @@ raise oefmt(space.w_NotImplementedError, "setflags not implemented yet") - @unwrap_spec(kind=str) + @unwrap_spec(kind='text') def descr_sort(self, space, w_axis=None, kind='quicksort', w_order=None): # happily ignore the kind # modify the array in-place diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -1422,8 +1422,8 @@ def get(space): return space.fromcache(UfuncState) - at unwrap_spec(nin=int, nout=int, signature=str, w_identity=WrappedDefault(None), - name=str, doc=str, stack_inputs=bool) + at unwrap_spec(nin=int, nout=int, signature='text', w_identity=WrappedDefault(None), + name='text', doc='text', stack_inputs=bool) def frompyfunc(space, w_func, nin, nout, w_dtypes=None, signature='', w_identity=None, name='', doc='', stack_inputs=False): ''' frompyfunc(func, nin, nout) #cpython numpy compatible diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -101,7 +101,7 @@ except RValueError as v: raise mmap_error(self.space, v) - @unwrap_spec(byte=str) + @unwrap_spec(byte='bytes') def write_byte(self, byte): self.check_valid() self.check_writeable() @@ -265,7 +265,7 @@ elif rmmap._MS_WINDOWS: - @unwrap_spec(fileno=int, length=int, tagname=str, + @unwrap_spec(fileno=int, length=int, tagname='text', access=int, offset=OFF_T) def mmap(space, w_subtype, fileno, length, tagname="", access=rmmap._ACCESS_DEFAULT, offset=0): diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -48,7 +48,7 @@ return self._build_app_tree(space, self.tree, space.newlist, line_info, col_info) - @unwrap_spec(filename=str) + @unwrap_spec(filename='text') def descr_compile(self, space, filename=""): info = pyparse.CompileInfo(filename, self.mode) try: @@ -85,12 +85,12 @@ return W_STType(tree, mode) - at unwrap_spec(source=str) + at unwrap_spec(source='text') def suite(space, source): return parse_python(space, source, 'exec') - at unwrap_spec(source=str) + at unwrap_spec(source='text') def expr(space, source): return parse_python(space, source, 'eval') diff --git a/pypy/module/pwd/interp_pwd.py b/pypy/module/pwd/interp_pwd.py --- a/pypy/module/pwd/interp_pwd.py +++ b/pypy/module/pwd/interp_pwd.py @@ -106,7 +106,7 @@ "%s: %d" % (msg, widen(uid)))) return make_struct_passwd(space, pw) - at unwrap_spec(name=str) + at unwrap_spec(name='text') def getpwnam(space, name): """ getpwnam(name) -> (pw_name,pw_passwd,pw_uid, diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -636,7 +636,7 @@ # Parse methods - @unwrap_spec(data=str, isfinal=bool) + @unwrap_spec(data='text', isfinal=bool) def Parse(self, space, data, isfinal=False): """Parse(data[, isfinal]) Parse XML data. `isfinal' should be true at end of input.""" @@ -663,7 +663,7 @@ w_res = self.Parse(space, data, isfinal=eof) return w_res - @unwrap_spec(base=str) + @unwrap_spec(base='text') def SetBase(self, space, base): XML_SetBase(self.itself, base) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -127,15 +127,15 @@ l_w.append(WrappedOp(name, ofs, logops.repr_of_resop(op))) return l_w - at unwrap_spec(offset=int, repr=str, name=str) + at unwrap_spec(offset=int, repr='text', name='text') def descr_new_resop(space, w_tp, name, offset=-1, repr=''): return WrappedOp(name, offset, repr) - at unwrap_spec(offset=int, repr=str, name=str, hash=r_uint) + at unwrap_spec(offset=int, repr='text', name='text', hash=r_uint) def descr_new_guardop(space, w_tp, name, offset=-1, repr='', hash=r_uint(0)): return GuardOp(name, offset, repr, hash) - at unwrap_spec(repr=str, name=str, jd_name=str, call_depth=int, call_id=int) + at unwrap_spec(repr='text', name='text', jd_name='text', call_depth=int, call_id=int) def descr_new_dmp(space, w_tp, name, repr, jd_name, call_depth, call_id, w_greenkey): @@ -291,7 +291,7 @@ @unwrap_spec(loopno=int, asmaddr=int, asmlen=int, loop_no=int, - type=str, jd_name=str, bridge_no=int) + type='text', jd_name='text', bridge_no=int) def descr_new_jit_loop_info(space, w_subtype, w_greenkey, w_ops, loopno, asmaddr, asmlen, loop_no, type, jd_name, bridge_no=-1): diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -591,7 +591,7 @@ # reset timezone, altzone, daylight and tzname _init_timezone(space) - at unwrap_spec(format=str) + at unwrap_spec(format='text') def strftime(space, format, w_tup=None): """strftime(format[, tuple]) -> string diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py --- a/pypy/module/unicodedata/interp_ucd.py +++ b/pypy/module/unicodedata/interp_ucd.py @@ -94,7 +94,7 @@ self.version = unicodedb.version - @unwrap_spec(name=str) + @unwrap_spec(name='text') def _get_code(self, space, name): try: code = self._lookup(name.upper()) @@ -103,7 +103,7 @@ raise OperationError(space.w_KeyError, msg) return space.newint(code) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def lookup(self, space, name): try: code = self._lookup(name.upper()) @@ -177,7 +177,7 @@ code = unichr_to_code_w(space, w_unichr) return space.newtext(self._decomposition(code)) - @unwrap_spec(form=str) + @unwrap_spec(form='text') def normalize(self, space, form, w_unistr): if not space.isinstance_w(w_unistr, space.w_unicode): raise oefmt( diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -46,7 +46,7 @@ # I don't care about speed of those, they're obscure anyway # THIS IS A TERRIBLE HACK TO BE CPYTHON COMPATIBLE - @unwrap_spec(name=str) + @unwrap_spec(name='text') def getitem(self, space, name): try: w_zipimporter = self.cache[name] @@ -86,14 +86,14 @@ def iteritems(self, space): return space.iter(self.items(space)) - @unwrap_spec(name=str) + @unwrap_spec(name='text') def contains(self, space, name): return space.newbool(name in self.cache) def clear(self, space): self.cache = {} - @unwrap_spec(name=str) + @unwrap_spec(name='text') def delitem(self, space, name): del self.cache[name] @@ -215,7 +215,7 @@ except KeyError: return False - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def find_module(self, space, fullname, w_path=None): filename = self.make_filename(fullname) for _, _, ext in ENUMERATE_EXTS: @@ -240,7 +240,7 @@ """ return self.filename + os.path.sep + filename - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def load_module(self, space, fullname): filename = self.make_filename(fullname) for compiled, is_package, ext in ENUMERATE_EXTS: @@ -274,7 +274,7 @@ raise raise oefmt(get_error(space), "can't find module '%s'", fullname) - @unwrap_spec(filename=str) + @unwrap_spec(filename='text') def get_data(self, space, filename): filename = self._find_relative_path(filename) try: @@ -287,7 +287,7 @@ # from the zlib module: let's to the same raise zlib_error(space, e.msg) - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def get_code(self, space, fullname): filename = self.make_filename(fullname) for compiled, _, ext in ENUMERATE_EXTS: @@ -311,7 +311,7 @@ "Cannot find source or code for %s in %s", filename, self.name) - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def get_source(self, space, fullname): filename = self.make_filename(fullname) found = False @@ -327,7 +327,7 @@ raise oefmt(get_error(space), "Cannot find source for %s in %s", filename, self.name) - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def get_filename(self, space, fullname): filename = self.make_filename(fullname) for _, is_package, ext in ENUMERATE_EXTS: @@ -337,7 +337,7 @@ raise oefmt(get_error(space), "Cannot find module %s in %s", filename, self.name) - @unwrap_spec(fullname=str) + @unwrap_spec(fullname='text') def is_package(self, space, fullname): filename = self.make_filename(fullname) for _, is_package, ext in ENUMERATE_EXTS: From pypy.commits at gmail.com Tue Dec 6 10:07:41 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:07:41 -0800 (PST) Subject: [pypy-commit] pypy py3.5: "Implement" sys.setswitchinterval(), by scaling the value and calling Message-ID: <5846d43d.c220c20a.5beea.e92b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88908:edba47b26837 Date: 2016-12-06 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/edba47b26837/ Log: "Implement" sys.setswitchinterval(), by scaling the value and calling sys.setcheckinterval(). diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -59,6 +59,8 @@ 'getrecursionlimit' : 'vm.getrecursionlimit', 'setcheckinterval' : 'vm.setcheckinterval', 'getcheckinterval' : 'vm.getcheckinterval', + 'setswitchinterval' : 'vm.setswitchinterval', + 'getswitchinterval' : 'vm.getswitchinterval', 'exc_info' : 'vm.exc_info', 'settrace' : 'vm.settrace', 'gettrace' : 'vm.gettrace', diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -433,6 +433,14 @@ sys.setcheckinterval(n) assert sys.getcheckinterval() == n + def test_setswitchinterval(self): + import sys + raises(TypeError, sys.setswitchinterval) + orig = sys.getswitchinterval() + for n in 1e-6, 0.1, orig: # orig last to restore starting state + sys.setswitchinterval(n) + assert sys.getswitchinterval() == n + def test_recursionlimit(self): import sys raises(TypeError, sys.getrecursionlimit, 42) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -93,6 +93,22 @@ result = 0 return space.wrap(result) + at unwrap_spec(interval=float) +def setswitchinterval(space, interval): + """For CPython compatibility, this maps to + sys.setcheckinterval(interval * 2000000) + """ + # The scaling factor is chosen so that with the default + # checkinterval value of 10000, it corresponds to 0.005, which is + # the default value of the switchinterval in CPython 3.5 + space.actionflag.setcheckinterval(int(interval * 2000000.0)) + +def getswitchinterval(space): + """For CPython compatibility, this maps to + sys.getcheckinterval() / 2000000 + """ + return space.wrap(space.actionflag.getcheckinterval() / 2000000.0) + def exc_info(space): """Return the (type, value, traceback) of the most recent exception caught by an except clause in the current stack frame or in an older stack From pypy.commits at gmail.com Tue Dec 6 10:26:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:26:16 -0800 (PST) Subject: [pypy-commit] pypy py3.5: may fix test_functools, but retranslation is needed to check Message-ID: <5846d898.46831c0a.ca407.7fd5@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88909:5cff42263511 Date: 2016-12-06 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/5cff42263511/ Log: may fix test_functools, but retranslation is needed to check diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -34,6 +34,7 @@ """ __slots__ = ('_func', '_args', '_keywords', '__dict__') + __module__ = 'functools' # instead of '_functools' def __init__(*args, **keywords): if len(args) < 2: From pypy.commits at gmail.com Tue Dec 6 10:41:08 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:41:08 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Add sys.hash_info.{hash_bits, seed_bits, cutoff} Message-ID: <5846dc14.8ab81c0a.1774f.7d3c@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88910:e14df011c01b Date: 2016-12-06 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/e14df011c01b/ Log: Add sys.hash_info.{hash_bits,seed_bits,cutoff} diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -40,6 +40,9 @@ nan = structseqfield(3) imag = structseqfield(4) algorithm = structseqfield(5) + hash_bits = structseqfield(6) + seed_bits = structseqfield(7) + cutoff = structseqfield(8) class thread_info(metaclass=structseqtype): name = structseqfield(0) @@ -76,6 +79,9 @@ return space.call_function(w_int_info, space.newtuple(info_w)) def get_hash_info(space): + HASH_HASH_BITS = 8 * rffi.sizeof(lltype.Signed) + HASH_SEED_BITS = 0 # XXX don't know what this is supposed to be + HASH_CUTOFF = 0 info_w = [ space.wrap(8 * rffi.sizeof(lltype.Signed)), space.wrap(HASH_MODULUS), @@ -83,6 +89,9 @@ space.wrap(HASH_NAN), space.wrap(HASH_IMAG), space.wrap(HASH_ALGORITHM), + space.wrap(HASH_HASH_BITS), + space.wrap(HASH_SEED_BITS), + space.wrap(HASH_CUTOFF), ] w_hash_info = app.wget(space, "hash_info") return space.call_function(w_hash_info, space.newtuple(info_w)) From pypy.commits at gmail.com Tue Dec 6 10:42:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:42:58 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <5846dc82.6249c20a.7805b.f85e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88911:18e8a3f27525 Date: 2016-12-06 16:42 +0100 http://bitbucket.org/pypy/pypy/changeset/18e8a3f27525/ Log: fix test diff --git a/lib-python/3/test/test_reprlib.py b/lib-python/3/test/test_reprlib.py --- a/lib-python/3/test/test_reprlib.py +++ b/lib-python/3/test/test_reprlib.py @@ -407,7 +407,8 @@ wrapped = MyContainer3.wrapped wrapper = MyContainer3.wrapper for name in assigned: - self.assertIs(getattr(wrapper, name), getattr(wrapped, name)) + # pypy fix: can't use assertIs() to compare two strings + self.assertEqual(getattr(wrapper, name), getattr(wrapped, name)) if __name__ == "__main__": unittest.main() From pypy.commits at gmail.com Tue Dec 6 10:44:21 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:44:21 -0800 (PST) Subject: [pypy-commit] pypy py3.5: trying harder to pretend this lives in the os module Message-ID: <5846dcd5.e576c20a.c0ecf.f4c3@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88912:82d53b0445a4 Date: 2016-12-06 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/82d53b0445a4/ Log: trying harder to pretend this lives in the os module diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -20,6 +20,7 @@ class stat_result(metaclass=structseqtype): name = "os.stat_result" + __module__ = "os" st_mode = structseqfield(0, "protection bits") st_ino = structseqfield(1, "inode") From pypy.commits at gmail.com Tue Dec 6 10:56:42 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 07:56:42 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Avoid wrap_oserror() with unwrapped filename arguments, because Message-ID: <5846dfba.41a3c20a.7de65.f9be@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88913:75de6890ce07 Date: 2016-12-06 16:56 +0100 http://bitbucket.org/pypy/pypy/changeset/75de6890ce07/ Log: Avoid wrap_oserror() with unwrapped filename arguments, because if the filename was provided as bytes we want to send it to the exception object as bytes too diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1162,11 +1162,9 @@ rposix.kill(os.getpid(), signal.SIGABRT) @unwrap_spec( - src='fsencode', dst='fsencode', # <- simpler: link() is never on Windows src_dir_fd=DirFD(rposix.HAVE_LINKAT), dst_dir_fd=DirFD(rposix.HAVE_LINKAT), follow_symlinks=bool) -def link( - space, src, dst, __kwonly__, +def link(space, w_src, w_dst, __kwonly__, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """\ @@ -1183,6 +1181,8 @@ src_dir_fd, dst_dir_fd, and follow_symlinks may not be implemented on your platform. If they are unavailable, using them will raise a NotImplementedError.""" + src = space.fsencode_w(w_src) + dst = space.fsencode_w(w_dst) try: if (rposix.HAVE_LINKAT and (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD @@ -1191,8 +1191,8 @@ else: rposix.link(src, dst) except OSError as e: - raise wrap_oserror(space, e, filename=src, filename2=dst, - eintr_retry=False) + raise wrap_oserror2(space, e, filename=w_src, filename2=w_dst, + eintr_retry=False) @unwrap_spec(dir_fd=DirFD(rposix.HAVE_SYMLINKAT)) @@ -1658,16 +1658,16 @@ except OSError as e: raise wrap_oserror(space, e, eintr_retry=False) - at unwrap_spec(path='fsencode') -def chroot(space, path): +def chroot(space, w_path): """ chroot(path) Change root directory to path. """ + w_path = space.fsencode_w(w_path) try: os.chroot(path) except OSError as e: - raise wrap_oserror(space, e, path, eintr_retry=False) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) return space.w_None def getgid(space): @@ -2030,17 +2030,18 @@ raise wrap_oserror2(space, e, w_path, eintr_retry=False) - at unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) -def lchown(space, path, uid, gid): + at unwrap_spec(uid=c_uid_t, gid=c_gid_t) +def lchown(space, w_path, uid, gid): """lchown(path, uid, gid) Change the owner and group id of path to the numeric uid and gid. This function will not follow symbolic links. Equivalent to os.chown(path, uid, gid, follow_symlinks=False).""" + path = space.fsencode_w(w_path) try: os.lchown(path, uid, gid) except OSError as e: - raise wrap_oserror(space, e, path, eintr_retry=False) + raise wrap_oserror2(space, e, w_path, eintr_retry=False) @unwrap_spec(uid=c_uid_t, gid=c_gid_t) def fchown(space, w_fd, uid, gid): From pypy.commits at gmail.com Tue Dec 6 11:27:53 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 08:27:53 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5846e709.c64bc20a.dbbb2.1882@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r829:075383c08e66 Date: 2016-12-06 17:27 +0100 http://bitbucket.org/pypy/pypy.org/changeset/075383c08e66/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $66386 of $105000 (63.2%) + $66429 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Tue Dec 6 12:56:12 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 09:56:12 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Bare 'raise' statements didn't work at all inside "hidden" app-level Message-ID: <5846fbbc.e6b0c20a.2bab1.361f@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88914:0fd0d21d3ab1 Date: 2016-12-06 18:55 +0100 http://bitbucket.org/pypy/pypy/changeset/0fd0d21d3ab1/ Log: Bare 'raise' statements didn't work at all inside "hidden" app-level helpers. Make them work at least a little bit. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -35,6 +35,7 @@ f_lineno = 0 # current lineno for tracing is_being_profiled = False w_locals = None + hidden_operationerr = None def __init__(self, pycode): self.f_lineno = pycode.co_firstlineno diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -699,7 +699,10 @@ if nbargs > 2: raise BytecodeCorruption("bad RAISE_VARARGS oparg") if nbargs == 0: - last_operr = self.space.getexecutioncontext().sys_exc_info() + if not self.hide(): + last_operr = self.space.getexecutioncontext().sys_exc_info() + else: + last_operr = self.getorcreatedebug().hidden_operationerr if last_operr is None: raise oefmt(space.w_RuntimeError, "No active exception to reraise") @@ -773,6 +776,10 @@ if operationerr is not None: # otherwise, don't change sys_exc_info if not self.hide(): ec.set_sys_exc_info(operationerr) + else: + # for hidden frames, a more limited solution should be + # enough: store away the exception on the frame + self.getorcreatedebug().hidden_operationerr = operationerr def end_finally(self): # unlike CPython, there are two statically distinct cases: the diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -840,6 +840,17 @@ assert ('unexpected internal exception (please ' 'report a bug): UnexpectedException') in err + def test_bare_raise_in_app_helper(self): + space = self.space + w = space.wrap + def app_g3(a): + try: + 1 / a + except ZeroDivisionError: + raise + g3 = gateway.app2interp(app_g3) + space.raises_w(space.w_ZeroDivisionError, g3, space, w(0)) + def test_unwrap_spec_default_bytes(self): space = self.space @gateway.unwrap_spec(s='bufferstr') From pypy.commits at gmail.com Tue Dec 6 13:11:06 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Dec 2016 10:11:06 -0800 (PST) Subject: [pypy-commit] pypy issue2446: test for issue #2446 (confirmed), issue #2445 (not confirmed), "fix" for #2446 Message-ID: <5846ff3a.ca06c20a.ecbba.46b1@mx.google.com> Author: Matti Picus Branch: issue2446 Changeset: r88915:66f21f2d6751 Date: 2016-12-06 20:09 +0200 http://bitbucket.org/pypy/pypy/changeset/66f21f2d6751/ Log: test for issue #2446 (confirmed), issue #2445 (not confirmed), "fix" for #2446 diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -77,6 +77,11 @@ "from a PyObject", w_type) raise + # XXX Assign some attributes of the w_type to the w_obj, + # i.e. w_type.w_doc => w_obj.__doc__ + # are there more? + if w_type.w_doc: + space.setattr(w_obj, space.wrap('__doc__'), w_type.w_doc) track_reference(space, obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -142,7 +142,7 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) - def test_multiple_inheritance(self): + def test_multiple_inheritance1(self): module = self.import_module(name='foo') obj = module.UnicodeSubtype(u'xyz') obj2 = module.UnicodeSubtype2() @@ -422,7 +422,7 @@ assert space.int_w(space.getattr(w_class, w_name)) == 1 space.delitem(w_dict, w_name) - def test_multiple_inheritance(self, space, api): + def test_multiple_inheritance2(self, space, api): w_class = space.appexec([], """(): class A(object): pass @@ -1167,3 +1167,43 @@ __metaclass__ = FooType print repr(X) X() + + def test_multiple_inheritance3(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + obj = PyObject_New(PyObject, &Foo12_Type); + return obj; + ''' + )], prologue=''' + static PyTypeObject Foo1_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo1", + }; + static PyTypeObject Foo2_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo2", + }; + static PyTypeObject Foo12_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo12", + }; + static char doc[]="The foo12 object"; + ''', more_init = ''' + Foo1_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Foo2_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Foo12_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo12_Type.tp_base = &Foo1_Type; + Foo12_Type.tp_doc = doc; + Foo12_Type.tp_bases = PyTuple_Pack(2, &Foo1_Type, &Foo2_Type); + if (PyType_Ready(&Foo1_Type) < 0) INITERROR; + if (PyType_Ready(&Foo2_Type) < 0) INITERROR; + if (PyType_Ready(&Foo12_Type) < 0) INITERROR; + ''') + obj = module.new_obj() + assert 'foo.foo12' in str(obj) + assert type(obj).__doc__ == "The foo12 object" + assert obj.__doc__ == "The foo12 object" + + From pypy.commits at gmail.com Tue Dec 6 13:21:24 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 10:21:24 -0800 (PST) Subject: [pypy-commit] pypy py3.5: translation fix Message-ID: <584701a4.ca10c20a.5d617.4a8e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88916:1db6128a92ee Date: 2016-12-06 19:20 +0100 http://bitbucket.org/pypy/pypy/changeset/1db6128a92ee/ Log: translation fix diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1191,7 +1191,7 @@ else: rposix.link(src, dst) except OSError as e: - raise wrap_oserror2(space, e, filename=w_src, filename2=w_dst, + raise wrap_oserror2(space, e, w_filename=w_src, w_filename2=w_dst, eintr_retry=False) @@ -1663,7 +1663,7 @@ Change root directory to path. """ - w_path = space.fsencode_w(w_path) + path = space.fsencode_w(w_path) try: os.chroot(path) except OSError as e: From pypy.commits at gmail.com Tue Dec 6 14:09:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 06 Dec 2016 11:09:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Test (annoyingly slow) and fix for BZ2Decompressor.decompress() with large max_length Message-ID: <58470cd7.61adc20a.f6b81.6244@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88917:8c6ea1cc81e6 Date: 2016-12-06 19:08 +0000 http://bitbucket.org/pypy/pypy/changeset/8c6ea1cc81e6/ Log: Test (annoyingly slow) and fix for BZ2Decompressor.decompress() with large max_length diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -206,7 +206,11 @@ self.left = 0 def get_data_size(self): - return self.current_size - rffi.getintfield(self.bzs, 'c_avail_out') + curr_out = self.current_size - rffi.getintfield(self.bzs, 'c_avail_out') + total_size = curr_out + for s in self.temp: + total_size += len(s) + return total_size def _allocate_chunk(self, size): self.raw_buf, self.gc_buf, self.case_num = rffi.alloc_buffer(size) @@ -231,6 +235,8 @@ newsize = size if self.max_length == -1: newsize = _new_buffer_size(size) + else: + newsize = min(newsize, self.max_length - self.get_data_size()) self._allocate_chunk(newsize) def make_result_string(self): diff --git a/pypy/module/bz2/test/test_interpbz2.py b/pypy/module/bz2/test/test_interpbz2.py new file mode 100644 --- /dev/null +++ b/pypy/module/bz2/test/test_interpbz2.py @@ -0,0 +1,16 @@ +import pytest +import py +from pypy.module.bz2.interp_bz2 import W_BZ2Decompressor, INITIAL_BUFFER_SIZE + + at pytest.yield_fixture +def w_decomp(space): + w_decomp = W_BZ2Decompressor(space) + yield w_decomp + + at pytest.mark.parametrize('size', [1234, INITIAL_BUFFER_SIZE, 12345]) +def test_decompress_max_length(space, w_decomp, size): + filename = py.path.local(__file__).new(basename='largetest.bz2') + with open(str(filename), 'rb') as f: + data = f.read() + result = w_decomp.decompress(data, size) + assert space.int_w(space.len(result)) == size From pypy.commits at gmail.com Tue Dec 6 17:00:09 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 06 Dec 2016 14:00:09 -0800 (PST) Subject: [pypy-commit] pypy issue2446: fix missing __doc__ attribute properly (arigato) Message-ID: <584734e9.ce181c0a.20bb1.2325@mx.google.com> Author: Matti Picus Branch: issue2446 Changeset: r88918:5432199e0bc7 Date: 2016-12-06 20:51 +0200 http://bitbucket.org/pypy/pypy/changeset/5432199e0bc7/ Log: fix missing __doc__ attribute properly (arigato) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -77,11 +77,6 @@ "from a PyObject", w_type) raise - # XXX Assign some attributes of the w_type to the w_obj, - # i.e. w_type.w_doc => w_obj.__doc__ - # are there more? - if w_type.w_doc: - space.setattr(w_obj, space.wrap('__doc__'), w_type.w_doc) track_reference(space, obj, w_obj) return w_obj diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -327,6 +327,8 @@ w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func, wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = space.wrap(w_obj) + if pto.c_tp_doc: + dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) From pypy.commits at gmail.com Tue Dec 6 17:19:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 06 Dec 2016 14:19:37 -0800 (PST) Subject: [pypy-commit] pypy raw-calloc: Replace malloc+memset with a single calloc. This might be useful for Message-ID: <58473979.c11d1c0a.c5e60.2cb4@mx.google.com> Author: Armin Rigo Branch: raw-calloc Changeset: r88919:9dfb829db9eb Date: 2016-12-06 23:18 +0100 http://bitbucket.org/pypy/pypy/changeset/9dfb829db9eb/ Log: Replace malloc+memset with a single calloc. This might be useful for large allocations. Also remove the deprecated stack_malloc. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -593,6 +593,8 @@ log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): + # NB. the operation 'raw_malloc' is not supported; this is for + # the operation 'malloc'/'malloc_varsize' with {flavor: 'gc'} d = op.args[1].value.copy() d.pop('flavor') add_memory_pressure = d.pop('add_memory_pressure', False) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -427,6 +427,13 @@ return result mh._ll_malloc_fixedsize = _ll_malloc_fixedsize + def _ll_malloc_fixedsize_zero(size): + result = mh.allocate(size, zero=True) + if not result: + raise MemoryError() + return result + mh._ll_malloc_fixedsize_zero = _ll_malloc_fixedsize_zero + def _ll_compute_size(length, size, itemsize): try: varsize = ovfcheck(itemsize * length) @@ -453,10 +460,9 @@ def _ll_malloc_varsize_no_length_zero(length, size, itemsize): tot_size = _ll_compute_size(length, size, itemsize) - result = mh.allocate(tot_size) + result = mh.allocate(tot_size, zero=True) if not result: raise MemoryError() - llmemory.raw_memclear(result, tot_size) return result mh.ll_malloc_varsize_no_length_zero = _ll_malloc_varsize_no_length_zero @@ -470,17 +476,16 @@ mh = mallocHelpers() mh.allocate = llmemory.raw_malloc ll_raw_malloc_fixedsize = mh._ll_malloc_fixedsize + ll_raw_malloc_fixedsize_zero = mh._ll_malloc_fixedsize_zero ll_raw_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_raw_malloc_varsize = mh.ll_malloc_varsize ll_raw_malloc_varsize_no_length_zero = mh.ll_malloc_varsize_no_length_zero - stack_mh = mallocHelpers() - stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address, size) - ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize - if self.translator: self.raw_malloc_fixedsize_ptr = self.inittime_helper( ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address) + self.raw_malloc_fixedsize_zero_ptr = self.inittime_helper( + ll_raw_malloc_fixedsize_zero, [lltype.Signed], llmemory.Address) self.raw_malloc_varsize_no_length_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.raw_malloc_varsize_ptr = self.inittime_helper( @@ -488,9 +493,6 @@ self.raw_malloc_varsize_no_length_zero_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length_zero, [lltype.Signed]*3, llmemory.Address, inline=False) - self.stack_malloc_fixedsize_ptr = self.inittime_helper( - ll_stack_malloc_fixedsize, [lltype.Signed], llmemory.Address) - def gct_malloc(self, hop, add_flags=None): TYPE = hop.spaceop.result.concretetype.TO assert not TYPE._is_varsize() @@ -503,21 +505,16 @@ hop.cast_result(v_raw) def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], + if flags.get('zero'): + ll_func = self.raw_malloc_fixedsize_zero_ptr + else: + ll_func = self.raw_malloc_fixedsize_ptr + v_raw = hop.genop("direct_call", [ll_func, c_size], resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) if flags.get('track_allocation', True): hop.genop("track_alloc_start", [v_raw]) return v_raw - def gct_fv_stack_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.stack_malloc_fixedsize_ptr, c_size], - resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) - return v_raw - def gct_malloc_varsize(self, hop, add_flags=None): flags = hop.spaceop.args[1].value if add_flags: diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -997,11 +997,14 @@ # __________________________________________________________ # operations on addresses - def op_raw_malloc(self, size): + def op_raw_malloc(self, size, zero): + assert lltype.typeOf(size) == lltype.Signed + return llmemory.raw_malloc(size, zero=zero) + + def op_boehm_malloc(self, size): assert lltype.typeOf(size) == lltype.Signed return llmemory.raw_malloc(size) - - op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc + op_boehm_malloc_atomic = op_boehm_malloc def op_boehm_register_finalizer(self, p, finalizer): pass @@ -1069,9 +1072,6 @@ assert offset.TYPE == ARGTYPE getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value - def op_stack_malloc(self, size): # mmh - raise NotImplementedError("backend only") - def op_track_alloc_start(self, addr): # we don't do tracking at this level checkadr(addr) diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -506,13 +506,17 @@ llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, sandboxsafe=True, _nowrapper=True) +llimpl_calloc = rffi.llexternal('calloc', [lltype.Signed, lltype.Signed], + llmemory.Address, + sandboxsafe=True, _nowrapper=True) llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, sandboxsafe=True, _nowrapper=True) def llimpl_arena_malloc(nbytes, zero): - addr = llimpl_malloc(nbytes) - if bool(addr): - llimpl_arena_reset(addr, nbytes, zero) + if zero: + addr = llimpl_calloc(nbytes, 1) + else: + addr = llimpl_malloc(nbytes) return addr llimpl_arena_malloc._always_inline_ = True register_external(arena_malloc, [int, int], llmemory.Address, diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -7,6 +7,7 @@ import weakref from rpython.annotator.bookkeeper import analyzer_for from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool +from rpython.annotator.model import SomeBool from rpython.rlib.objectmodel import Symbolic, specialize from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lltype import SomePtr @@ -936,14 +937,15 @@ # ____________________________________________________________ -def raw_malloc(size): +def raw_malloc(size, zero=False): if not isinstance(size, AddressOffset): raise NotImplementedError(size) - return size._raw_malloc([], zero=False) + return size._raw_malloc([], zero=zero) @analyzer_for(raw_malloc) -def ann_raw_malloc(s_size): +def ann_raw_malloc(s_size, s_zero=None): assert isinstance(s_size, SomeInteger) # XXX add noneg...? + assert s_zero is None or isinstance(s_zero, SomeBool) return SomeAddress() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,7 +396,6 @@ 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), - 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), 'adr_add': LLOp(canfold=True), diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -574,10 +574,14 @@ # memory addresses @typer_for(llmemory.raw_malloc) -def rtype_raw_malloc(hop): - v_size, = hop.inputargs(lltype.Signed) +def rtype_raw_malloc(hop, i_zero=None): + v_size = hop.inputarg(lltype.Signed, arg=0) + v_zero, = parse_kwds(hop, (i_zero, None)) + if v_zero is None: + v_zero = hop.inputconst(lltype.Bool, False) hop.exception_cannot_occur() - return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address) + return hop.genop('raw_malloc', [v_size, v_zero], + resulttype=llmemory.Address) @typer_for(llmemory.raw_malloc_usage) def rtype_raw_malloc_usage(hop): diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -372,19 +372,6 @@ result = interpret(getids, [i, j]) assert result -def test_stack_malloc(): - py.test.skip("stack-flavored mallocs no longer supported") - class A(object): - pass - def f(): - a = A() - a.i = 1 - return a.i - interp, graph = get_interpreter(f, []) - graph.startblock.operations[0].args[1] = inputconst(Void, {'flavor': "stack"}) - result = interp.eval_graph(graph, []) - assert result == 1 - def test_invalid_stack_access(): py.test.skip("stack-flavored mallocs no longer supported") class A(object): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -608,16 +608,6 @@ return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \ % (self.expr(op.args[0]), self.expr(op.args[1])) - def OP_RAW_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult) - - def OP_STACK_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_STACK_MALLOC(%s, %s, void *);" % (esize, eresult) - def OP_DIRECT_FIELDPTR(self, op): return self.OP_GETFIELD(op, ampersand='&') diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -8,11 +8,14 @@ #define OP_STACK_CURRENT(r) r = (Signed)&r -#define OP_RAW_MALLOC(size, r, restype) { \ - r = (restype) malloc(size); \ - if (r != NULL) { \ - COUNT_MALLOC; \ - } \ +#define OP_RAW_MALLOC(size, zero, result) { \ + if (zero) \ + result = calloc(size, 1); \ + else \ + result = malloc(size); \ + if (result != NULL) { \ + COUNT_MALLOC; \ + } \ } #define OP_RAW_FREE(p, r) free(p); COUNT_FREE; @@ -26,10 +29,6 @@ #define alloca _alloca #endif -#define OP_STACK_MALLOC(size,r,restype) \ - r = (restype) alloca(size); \ - if (r != NULL) memset((void*) r, 0, size); - #define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size); #define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size); diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -32,7 +32,29 @@ assert res == 42 res = fc(1) assert res == 1 - + +def test_memory_access_zero(): + def f(): + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=False) + addr.signed[1] = 10000 + i + blocks.append(addr) + for addr in blocks: + raw_free(addr) + result = 0 + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=True) + result |= addr.signed[1] + blocks.append(addr) + for addr in blocks: + raw_free(addr) + return result + fc = compile(f, []) + res = fc() + assert res == 0 + def test_memory_float(): S = lltype.GcStruct("S", ("x", lltype.Float), ("y", lltype.Float)) offset = FieldOffset(S, 'x') From pypy.commits at gmail.com Wed Dec 7 03:19:42 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:19:42 -0800 (PST) Subject: [pypy-commit] pypy raw-calloc: Remove deprecated test Message-ID: <5847c61e.41a3c20a.7de65.2591@mx.google.com> Author: Armin Rigo Branch: raw-calloc Changeset: r88920:f0257e7ab182 Date: 2016-12-07 09:17 +0100 http://bitbucket.org/pypy/pypy/changeset/f0257e7ab182/ Log: Remove deprecated test diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -177,18 +177,6 @@ fn = compile(f, [int]) assert fn(1) == 2 -def test_flavored_malloc_stack(): - class A(object): - _alloc_flavor_ = "stack" - def __init__(self, val): - self.val = val - def f(x): - a = A(x + 1) - result = a.val - return result - fn = compile(f, [int]) - assert fn(1) == 2 - def test_gcref(): if sys.platform == 'darwin': py.test.skip("'boehm' may crash") From pypy.commits at gmail.com Wed Dec 7 03:19:44 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:19:44 -0800 (PST) Subject: [pypy-commit] pypy raw-calloc: close branch, ready to merge Message-ID: <5847c620.c6bdc20a.bad00.2c1c@mx.google.com> Author: Armin Rigo Branch: raw-calloc Changeset: r88921:d8d114949719 Date: 2016-12-07 09:18 +0100 http://bitbucket.org/pypy/pypy/changeset/d8d114949719/ Log: close branch, ready to merge From pypy.commits at gmail.com Wed Dec 7 03:19:46 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:19:46 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge raw-calloc Message-ID: <5847c622.0777c20a.3feea.3156@mx.google.com> Author: Armin Rigo Branch: Changeset: r88922:e3f6864ebcdc Date: 2016-12-07 09:18 +0100 http://bitbucket.org/pypy/pypy/changeset/e3f6864ebcdc/ Log: hg merge raw-calloc Replace malloc+memset with a single calloc. This might be useful for large allocations. Also remove the deprecated stack_malloc. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -593,6 +593,8 @@ log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): + # NB. the operation 'raw_malloc' is not supported; this is for + # the operation 'malloc'/'malloc_varsize' with {flavor: 'gc'} d = op.args[1].value.copy() d.pop('flavor') add_memory_pressure = d.pop('add_memory_pressure', False) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -427,6 +427,13 @@ return result mh._ll_malloc_fixedsize = _ll_malloc_fixedsize + def _ll_malloc_fixedsize_zero(size): + result = mh.allocate(size, zero=True) + if not result: + raise MemoryError() + return result + mh._ll_malloc_fixedsize_zero = _ll_malloc_fixedsize_zero + def _ll_compute_size(length, size, itemsize): try: varsize = ovfcheck(itemsize * length) @@ -453,10 +460,9 @@ def _ll_malloc_varsize_no_length_zero(length, size, itemsize): tot_size = _ll_compute_size(length, size, itemsize) - result = mh.allocate(tot_size) + result = mh.allocate(tot_size, zero=True) if not result: raise MemoryError() - llmemory.raw_memclear(result, tot_size) return result mh.ll_malloc_varsize_no_length_zero = _ll_malloc_varsize_no_length_zero @@ -470,17 +476,16 @@ mh = mallocHelpers() mh.allocate = llmemory.raw_malloc ll_raw_malloc_fixedsize = mh._ll_malloc_fixedsize + ll_raw_malloc_fixedsize_zero = mh._ll_malloc_fixedsize_zero ll_raw_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_raw_malloc_varsize = mh.ll_malloc_varsize ll_raw_malloc_varsize_no_length_zero = mh.ll_malloc_varsize_no_length_zero - stack_mh = mallocHelpers() - stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address, size) - ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize - if self.translator: self.raw_malloc_fixedsize_ptr = self.inittime_helper( ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address) + self.raw_malloc_fixedsize_zero_ptr = self.inittime_helper( + ll_raw_malloc_fixedsize_zero, [lltype.Signed], llmemory.Address) self.raw_malloc_varsize_no_length_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.raw_malloc_varsize_ptr = self.inittime_helper( @@ -488,9 +493,6 @@ self.raw_malloc_varsize_no_length_zero_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length_zero, [lltype.Signed]*3, llmemory.Address, inline=False) - self.stack_malloc_fixedsize_ptr = self.inittime_helper( - ll_stack_malloc_fixedsize, [lltype.Signed], llmemory.Address) - def gct_malloc(self, hop, add_flags=None): TYPE = hop.spaceop.result.concretetype.TO assert not TYPE._is_varsize() @@ -503,21 +505,16 @@ hop.cast_result(v_raw) def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], + if flags.get('zero'): + ll_func = self.raw_malloc_fixedsize_zero_ptr + else: + ll_func = self.raw_malloc_fixedsize_ptr + v_raw = hop.genop("direct_call", [ll_func, c_size], resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) if flags.get('track_allocation', True): hop.genop("track_alloc_start", [v_raw]) return v_raw - def gct_fv_stack_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.stack_malloc_fixedsize_ptr, c_size], - resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) - return v_raw - def gct_malloc_varsize(self, hop, add_flags=None): flags = hop.spaceop.args[1].value if add_flags: diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -997,11 +997,14 @@ # __________________________________________________________ # operations on addresses - def op_raw_malloc(self, size): + def op_raw_malloc(self, size, zero): + assert lltype.typeOf(size) == lltype.Signed + return llmemory.raw_malloc(size, zero=zero) + + def op_boehm_malloc(self, size): assert lltype.typeOf(size) == lltype.Signed return llmemory.raw_malloc(size) - - op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc + op_boehm_malloc_atomic = op_boehm_malloc def op_boehm_register_finalizer(self, p, finalizer): pass @@ -1069,9 +1072,6 @@ assert offset.TYPE == ARGTYPE getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value - def op_stack_malloc(self, size): # mmh - raise NotImplementedError("backend only") - def op_track_alloc_start(self, addr): # we don't do tracking at this level checkadr(addr) diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -506,13 +506,17 @@ llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, sandboxsafe=True, _nowrapper=True) +llimpl_calloc = rffi.llexternal('calloc', [lltype.Signed, lltype.Signed], + llmemory.Address, + sandboxsafe=True, _nowrapper=True) llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, sandboxsafe=True, _nowrapper=True) def llimpl_arena_malloc(nbytes, zero): - addr = llimpl_malloc(nbytes) - if bool(addr): - llimpl_arena_reset(addr, nbytes, zero) + if zero: + addr = llimpl_calloc(nbytes, 1) + else: + addr = llimpl_malloc(nbytes) return addr llimpl_arena_malloc._always_inline_ = True register_external(arena_malloc, [int, int], llmemory.Address, diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -7,6 +7,7 @@ import weakref from rpython.annotator.bookkeeper import analyzer_for from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool +from rpython.annotator.model import SomeBool from rpython.rlib.objectmodel import Symbolic, specialize from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lltype import SomePtr @@ -936,14 +937,15 @@ # ____________________________________________________________ -def raw_malloc(size): +def raw_malloc(size, zero=False): if not isinstance(size, AddressOffset): raise NotImplementedError(size) - return size._raw_malloc([], zero=False) + return size._raw_malloc([], zero=zero) @analyzer_for(raw_malloc) -def ann_raw_malloc(s_size): +def ann_raw_malloc(s_size, s_zero=None): assert isinstance(s_size, SomeInteger) # XXX add noneg...? + assert s_zero is None or isinstance(s_zero, SomeBool) return SomeAddress() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,7 +396,6 @@ 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), - 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), 'adr_add': LLOp(canfold=True), diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -574,10 +574,14 @@ # memory addresses @typer_for(llmemory.raw_malloc) -def rtype_raw_malloc(hop): - v_size, = hop.inputargs(lltype.Signed) +def rtype_raw_malloc(hop, i_zero=None): + v_size = hop.inputarg(lltype.Signed, arg=0) + v_zero, = parse_kwds(hop, (i_zero, None)) + if v_zero is None: + v_zero = hop.inputconst(lltype.Bool, False) hop.exception_cannot_occur() - return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address) + return hop.genop('raw_malloc', [v_size, v_zero], + resulttype=llmemory.Address) @typer_for(llmemory.raw_malloc_usage) def rtype_raw_malloc_usage(hop): diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -372,19 +372,6 @@ result = interpret(getids, [i, j]) assert result -def test_stack_malloc(): - py.test.skip("stack-flavored mallocs no longer supported") - class A(object): - pass - def f(): - a = A() - a.i = 1 - return a.i - interp, graph = get_interpreter(f, []) - graph.startblock.operations[0].args[1] = inputconst(Void, {'flavor': "stack"}) - result = interp.eval_graph(graph, []) - assert result == 1 - def test_invalid_stack_access(): py.test.skip("stack-flavored mallocs no longer supported") class A(object): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -608,16 +608,6 @@ return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \ % (self.expr(op.args[0]), self.expr(op.args[1])) - def OP_RAW_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult) - - def OP_STACK_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_STACK_MALLOC(%s, %s, void *);" % (esize, eresult) - def OP_DIRECT_FIELDPTR(self, op): return self.OP_GETFIELD(op, ampersand='&') diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -8,11 +8,14 @@ #define OP_STACK_CURRENT(r) r = (Signed)&r -#define OP_RAW_MALLOC(size, r, restype) { \ - r = (restype) malloc(size); \ - if (r != NULL) { \ - COUNT_MALLOC; \ - } \ +#define OP_RAW_MALLOC(size, zero, result) { \ + if (zero) \ + result = calloc(size, 1); \ + else \ + result = malloc(size); \ + if (result != NULL) { \ + COUNT_MALLOC; \ + } \ } #define OP_RAW_FREE(p, r) free(p); COUNT_FREE; @@ -26,10 +29,6 @@ #define alloca _alloca #endif -#define OP_STACK_MALLOC(size,r,restype) \ - r = (restype) alloca(size); \ - if (r != NULL) memset((void*) r, 0, size); - #define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size); #define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size); diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -32,7 +32,29 @@ assert res == 42 res = fc(1) assert res == 1 - + +def test_memory_access_zero(): + def f(): + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=False) + addr.signed[1] = 10000 + i + blocks.append(addr) + for addr in blocks: + raw_free(addr) + result = 0 + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=True) + result |= addr.signed[1] + blocks.append(addr) + for addr in blocks: + raw_free(addr) + return result + fc = compile(f, []) + res = fc() + assert res == 0 + def test_memory_float(): S = lltype.GcStruct("S", ("x", lltype.Float), ("y", lltype.Float)) offset = FieldOffset(S, 'x') @@ -155,18 +177,6 @@ fn = compile(f, [int]) assert fn(1) == 2 -def test_flavored_malloc_stack(): - class A(object): - _alloc_flavor_ = "stack" - def __init__(self, val): - self.val = val - def f(x): - a = A(x + 1) - result = a.val - return result - fn = compile(f, [int]) - assert fn(1) == 2 - def test_gcref(): if sys.platform == 'darwin': py.test.skip("'boehm' may crash") From pypy.commits at gmail.com Wed Dec 7 03:19:48 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:19:48 -0800 (PST) Subject: [pypy-commit] pypy default: mark this merge Message-ID: <5847c624.820bc30a.52e99.33fd@mx.google.com> Author: Armin Rigo Branch: Changeset: r88923:ce9909ed1f52 Date: 2016-12-07 09:19 +0100 http://bitbucket.org/pypy/pypy/changeset/ce9909ed1f52/ Log: mark this merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -38,3 +38,5 @@ .. branch: desc-specialize Refactor FunctionDesc.specialize() and related code (RPython annotator). + +.. branch: raw-calloc From pypy.commits at gmail.com Wed Dec 7 03:33:40 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:33:40 -0800 (PST) Subject: [pypy-commit] pypy default: fix test Message-ID: <5847c964.313ac20a.8a7be.3c2d@mx.google.com> Author: Armin Rigo Branch: Changeset: r88924:e8f04195dde5 Date: 2016-12-07 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e8f04195dde5/ Log: fix test diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -401,7 +401,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" From pypy.commits at gmail.com Wed Dec 7 03:34:20 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:34:20 -0800 (PST) Subject: [pypy-commit] cffi default: test and fix for b6adad5f4ea3, actually reverting some C changes that Message-ID: <5847c98c.e626c20a.a1dc0.3b28@mx.google.com> Author: Armin Rigo Branch: Changeset: r2825:d0f7cb27f93f Date: 2016-12-06 22:29 +0100 http://bitbucket.org/cffi/cffi/changeset/d0f7cb27f93f/ Log: test and fix for b6adad5f4ea3, actually reverting some C changes that are not necessary and cause segfaults diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1605,13 +1605,6 @@ #endif } -static PyObject * -cdataowning_no_generic_alloc(PyTypeObject *type, Py_ssize_t nitems) -{ - PyErr_SetString(PyExc_SystemError, "cdataowning: no generic alloc"); - return NULL; -} - static void cdataowning_dealloc(CDataObject *cd) { assert(!(cd->c_type->ct_flags & (CT_IS_VOID_PTR | CT_FUNCTIONPTR))); @@ -2882,8 +2875,8 @@ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ - PyType_GenericAlloc, /* tp_alloc */ - PyType_GenericNew, /* tp_new */ + 0, /* tp_alloc */ + 0, /* tp_new */ PyObject_Del, /* tp_free */ }; @@ -2924,8 +2917,8 @@ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ - cdataowning_no_generic_alloc, /* tp_alloc */ - PyType_GenericNew, /* tp_new */ + 0, /* tp_alloc */ + 0, /* tp_new */ free, /* tp_free */ }; @@ -2967,8 +2960,8 @@ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ - PyType_GenericAlloc, /* tp_alloc */ - PyType_GenericNew, /* tp_new */ + 0, /* tp_alloc */ + 0, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -493,3 +493,15 @@ def test_negative_array_size(self): ffi = FFI() py.test.raises(ValueError, ffi.cast, "int[-5]", 0) + + def test_cannot_instantiate_manually(self): + ffi = FFI() + ct = type(ffi.typeof("void *")) + py.test.raises(TypeError, ct) + py.test.raises(TypeError, ct, ffi.NULL) + for cd in [type(ffi.cast("void *", 0)), + type(ffi.new("char[]", 3)), + type(ffi.gc(ffi.NULL, lambda x: None))]: + py.test.raises(TypeError, cd) + py.test.raises(TypeError, cd, ffi.NULL) + py.test.raises(TypeError, cd, ffi.typeof("void *")) From pypy.commits at gmail.com Wed Dec 7 03:34:22 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:34:22 -0800 (PST) Subject: [pypy-commit] cffi default: fix test Message-ID: <5847c98e.46bb1c0a.96c8b.bcca@mx.google.com> Author: Armin Rigo Branch: Changeset: r2826:cc764123ad33 Date: 2016-12-07 09:33 +0100 http://bitbucket.org/cffi/cffi/changeset/cc764123ad33/ Log: fix test diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -360,7 +360,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" From pypy.commits at gmail.com Wed Dec 7 03:35:06 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 00:35:06 -0800 (PST) Subject: [pypy-commit] pypy default: import cffi/cc764123ad33 (updated tests) Message-ID: <5847c9ba.a285c20a.2931c.36da@mx.google.com> Author: Armin Rigo Branch: Changeset: r88925:d294e2cacd59 Date: 2016-12-07 09:34 +0100 http://bitbucket.org/pypy/pypy/changeset/d294e2cacd59/ Log: import cffi/cc764123ad33 (updated tests) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -494,3 +494,15 @@ def test_negative_array_size(self): ffi = FFI() py.test.raises(ValueError, ffi.cast, "int[-5]", 0) + + def test_cannot_instantiate_manually(self): + ffi = FFI() + ct = type(ffi.typeof("void *")) + py.test.raises(TypeError, ct) + py.test.raises(TypeError, ct, ffi.NULL) + for cd in [type(ffi.cast("void *", 0)), + type(ffi.new("char[]", 3)), + type(ffi.gc(ffi.NULL, lambda x: None))]: + py.test.raises(TypeError, cd) + py.test.raises(TypeError, cd, ffi.NULL) + py.test.raises(TypeError, cd, ffi.typeof("void *")) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -361,7 +361,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" From pypy.commits at gmail.com Wed Dec 7 04:17:30 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 01:17:30 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix the pickling tests for functools.partial Message-ID: <5847d3aa.06891c0a.e1974.d139@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88926:54f2cae1a873 Date: 2016-12-07 10:16 +0100 http://bitbucket.org/pypy/pypy/changeset/54f2cae1a873/ Log: Fix the pickling tests for functools.partial diff --git a/lib-python/3/test/test_functools.py b/lib-python/3/test/test_functools.py --- a/lib-python/3/test/test_functools.py +++ b/lib-python/3/test/test_functools.py @@ -16,7 +16,12 @@ import functools py_functools = support.import_fresh_module('functools', blocked=['_functools']) -c_functools = support.import_fresh_module('functools', fresh=['_functools']) +c_functools = functools +# pypy: was: +# c_functools = support.import_fresh_module('functools', fresh=['_functools']) +# but this creates confusion for pickle because on pypy, _functools is a +# pure python module, whereas on CPython it is C (and so not really +# re-importable) decimal = support.import_fresh_module('decimal', fresh=['_decimal']) diff --git a/pypy/module/test_lib_pypy/test_functools.py b/pypy/module/test_lib_pypy/test_functools.py --- a/pypy/module/test_lib_pypy/test_functools.py +++ b/pypy/module/test_lib_pypy/test_functools.py @@ -17,6 +17,8 @@ assert partial.func == test_partial_setstate def test_partial_pickle(): + pytest.skip("can't run this test: _functools.partial now has " + "__module__=='functools', in this case confusing pickle") import pickle partial1 = _functools.partial(test_partial_pickle) string = pickle.dumps(partial1) From pypy.commits at gmail.com Wed Dec 7 04:23:36 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 01:23:36 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Skip this test class on pypy (_testcapi exists, but doesn't contain the Message-ID: <5847d518.46bb1c0a.7bc9c.be1b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88927:9f9c864141b1 Date: 2016-12-07 10:22 +0100 http://bitbucket.org/pypy/pypy/changeset/9f9c864141b1/ Log: Skip this test class on pypy (_testcapi exists, but doesn't contain the internals needed here) diff --git a/lib-python/3/test/test_time.py b/lib-python/3/test/test_time.py --- a/lib-python/3/test/test_time.py +++ b/lib-python/3/test/test_time.py @@ -734,8 +734,7 @@ self.assertIs(lt.tm_zone, None) - at unittest.skipUnless(_testcapi is not None, - 'need the _testcapi module') + at support.cpython_only class TestPyTime_t(unittest.TestCase): def test_FromSeconds(self): from _testcapi import PyTime_FromSeconds From pypy.commits at gmail.com Wed Dec 7 04:25:25 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 01:25:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: test and fix Message-ID: <5847d585.ca57c20a.eb3d8.4e18@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88928:96c74a5d5b0a Date: 2016-12-07 10:24 +0100 http://bitbucket.org/pypy/pypy/changeset/96c74a5d5b0a/ Log: test and fix diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -440,6 +440,7 @@ for n in 1e-6, 0.1, orig: # orig last to restore starting state sys.setswitchinterval(n) assert sys.getswitchinterval() == n + raises(ValueError, sys.setswitchinterval, 0.0) def test_recursionlimit(self): import sys diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -101,6 +101,9 @@ # The scaling factor is chosen so that with the default # checkinterval value of 10000, it corresponds to 0.005, which is # the default value of the switchinterval in CPython 3.5 + if interval <= 0.0: + raise oefmt(space.w_ValueError, + "switch interval must be strictly positive") space.actionflag.setcheckinterval(int(interval * 2000000.0)) def getswitchinterval(space): From pypy.commits at gmail.com Wed Dec 7 04:32:38 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 01:32:38 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <5847d736.12921c0a.54464.dc38@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88929:696132c95aa3 Date: 2016-12-07 10:31 +0100 http://bitbucket.org/pypy/pypy/changeset/696132c95aa3/ Log: fix test diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,14 +72,16 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", + "getfield_gc_r", # dead load "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] # ops = entry_bridge.ops_by_id('call', opcode='LOAD_GLOBAL') - assert log.opnames(ops) == [] + assert log.opnames(ops) == ["getfield_gc_r"] # assert entry_bridge.match_by_id('call', """ + p30 = getfield_gc_r(ConstPtr(ptr29), descr=) p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() @@ -130,7 +132,8 @@ ops = entry_bridge.ops_by_id('meth1', opcode='LOOKUP_METHOD') assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', 'guard_value', - 'guard_not_invalidated'] + 'guard_not_invalidated', + 'getfield_gc_r'] # the second LOOKUP_METHOD is folded away assert list(entry_bridge.ops_by_id('meth2', opcode='LOOKUP_METHOD')) == [] # @@ -439,6 +442,7 @@ i22 = getfield_gc_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) + p21 = getfield_gc_r(ConstPtr(ptr20), descr=) guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) From pypy.commits at gmail.com Wed Dec 7 05:48:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 02:48:19 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: several changes to the memoryview object to pass the tests, most of the problems were related to wrong index calculations Message-ID: <5847e8f3.cf3fc20a.2de93.7486@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88930:c9097583272c Date: 2016-12-07 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c9097583272c/ Log: several changes to the memoryview object to pass the tests, most of the problems were related to wrong index calculations diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -115,8 +115,6 @@ return ''.join(self.copy_buffer()) def copy_buffer(self): - buf = self.buf - n_bytes = buf.getlength() data = [] self._copy_rec(0, data, 0) return data @@ -130,7 +128,6 @@ self._copy_base(data,off) return - # TODO add a test that has at least 2 dims for i in range(shape): self._copy_rec(idim+1,data,off) off += strides[idim] @@ -140,18 +137,21 @@ step = shapes[0] strides = self.getstrides() itemsize = self.getitemsize() + bytesize = self.getlength() + copiedbytes = 0 for i in range(step): bytes = self.buf.getslice(off, off+itemsize, 1, itemsize) data.append(bytes) + copiedbytes += len(bytes) off += strides[0] # do notcopy data if the sub buffer is out of bounds - if off >= self.buf.getlength(): + if copiedbytes >= bytesize: break def getlength(self): if self.length != -1: - return self.length // self.itemsize - return self.buf.getlength() // self.itemsize + return self.length + return self.buf.getlength() def descr_tobytes(self, space): self._check_released(space) @@ -198,8 +198,9 @@ return self._tolist(space, buf, bytecount, itemsize, fmt, [stride]) items = [None] * dimshape + orig_buf = buf for i in range(dimshape): - buf = SubBuffer(buf, start, stride) + buf = SubBuffer(orig_buf, start, stride) item = self._tolist_rec(space, buf, start, idim+1, fmt) items[i] = item start += stride @@ -232,9 +233,8 @@ if index < 0 or index >= nitems: raise oefmt(space.w_IndexError, "index out of bounds on dimension %d", dim+1) - start += strides[dim] * index # TODO suboffsets? - return start + return start + strides[dim] * index def _getitem_tuple_indexed(self, space, w_index): view = self.buf @@ -259,54 +259,60 @@ fmtiter.interpret(fmt) return fmtiter.result_w[0] + def _decode_index(self, space, w_index, is_slice): + shape = self.getshape() + if len(shape) == 0: + count = 1 + else: + count = shape[0] + return space.decode_index4(w_index, count) def descr_getitem(self, space, w_index): self._check_released(space) if space.isinstance_w(w_index, space.w_tuple): return self._getitem_tuple_indexed(space, w_index) - - shape = self.getshape() - start, stop, step, slicelength = space.decode_index4(w_index, shape[0]) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) # ^^^ for a non-slice index, this returns (index, 0, 0, 1) if step == 0: # index only itemsize = self.getitemsize() dim = self.getndim() - if itemsize == 1: - if dim == 0: - raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") - elif dim == 1: - idx = self.lookup_dimension(space, self, 0, 0, start) + if dim == 0: + raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") + elif dim == 1: + idx = self.lookup_dimension(space, self, 0, 0, start) + if itemsize == 1: ch = self.buf.getitem(idx) return space.newint(ord(ch)) else: - raise oefmt(space.w_NotImplementedError, "multi-dimensional sub-views are not implemented") + # TODO: this probably isn't very fast + buf = SubBuffer(self.buf, idx, itemsize) + fmtiter = UnpackFormatIterator(space, buf) + fmtiter.length = buf.getlength() + fmtiter.interpret(self.format) + return fmtiter.result_w[0] else: - # TODO: this probably isn't very fast - buf = SubBuffer(self.buf, start*itemsize, itemsize) - fmtiter = UnpackFormatIterator(space, buf) - fmtiter.interpret(self.format) - return fmtiter.result_w[0] - elif step == 1: - mv = W_MemoryView.copy(self) - mv.init_slice(start, stop, step, slicelength, 0) - mv._init_flags() - return mv - else: + raise oefmt(space.w_NotImplementedError, "multi-dimensional sub-views are not implemented") + elif is_slice: mv = W_MemoryView.copy(self) mv.init_slice(start, stop, step, slicelength, 0) mv.init_len() mv._init_flags() return mv + # multi index is handled at the top of this function + else: + raise TypeError("memoryview: invalid slice key") def init_slice(self, start, stop, step, slicelength, dim): # modifies the buffer, shape and stride to allow step to be > 1 - # TODO subbuffer self.strides = strides = self.getstrides()[:] self.shape = shape = self.getshape()[:] - self.buf = SubBuffer(self.buf, strides[dim] * start, slicelength) + bytesize = self.getitemsize() * slicelength + self.buf = SubBuffer(self.buf, strides[dim] * start, bytesize) shape[dim] = slicelength strides[dim] = strides[dim] * step + # TODO subbuffer def init_len(self): self.length = self.bytecount_from_shape() @@ -334,6 +340,8 @@ if space.isinstance_w(w_index, space.w_tuple): raise oefmt(space.w_NotImplementedError, "") start, stop, step, size = space.decode_index4(w_index, self.getlength()) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) itemsize = self.getitemsize() if step == 0: # index only if itemsize == 1: @@ -351,7 +359,7 @@ self.buf.setslice(start * itemsize, fmtiter.result.build()) elif step == 1: value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size * itemsize: + if value.getlength() != slicelength * itemsize: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") self.buf.setslice(start * itemsize, value.as_str()) @@ -367,11 +375,11 @@ src = space.buffer_w(w_obj, space.BUF_CONTIG_RO) dst_strides = self.getstrides() dim = 0 - dst = SubBuffer(self.buf, start * itemsize, size * itemsize) + dst = SubBuffer(self.buf, start * itemsize, slicelength * itemsize) src_stride0 = dst_strides[dim] off = 0 - src_shape0 = size + src_shape0 = slicelength src_stride0 = src.getstrides()[0] if isinstance(w_obj, W_MemoryView): src_stride0 = w_obj.getstrides()[0] @@ -386,11 +394,15 @@ def descr_len(self, space): self._check_released(space) - return space.wrap(self.getlength()) + dim = self.getndim() + if dim == 0: + return 1 + shape = self.getshape() + return space.wrap(shape[0]) def w_get_nbytes(self, space): self._check_released(space) - return space.wrap(self.buf.getlength()) + return space.wrap(self.getlength()) def w_get_format(self, space): self._check_released(space) @@ -398,11 +410,11 @@ def w_get_itemsize(self, space): self._check_released(space) - return space.wrap(self.itemsize) + return space.wrap(self.getitemsize()) def w_get_ndim(self, space): self._check_released(space) - return space.wrap(self.buf.getndim()) + return space.wrap(self.getndim()) def w_is_readonly(self, space): self._check_released(space) @@ -410,13 +422,13 @@ def w_get_shape(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getshape()]) def w_get_strides(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getstrides()]) diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -43,13 +43,13 @@ def test_extended_slice(self): data = bytearray(b'abcefg') v = memoryview(data) - w = v[0:2:2] # failing for now: NotImplementedError + w = v[0:2:2] assert len(w) == 1 assert list(w) == [97] v[::2] = b'ABC' assert data == bytearray(eval("b'AbBeCg'")) - assert v[::2] == b'ABC' - assert v[::-2] == b'geb' + assert v[::2].tobytes() == b'ABC' + assert v[::-2].tobytes() == b'geb' def test_memoryview_attrs(self): v = memoryview(b"a"*100) From pypy.commits at gmail.com Wed Dec 7 05:53:21 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 02:53:21 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: merge py3.5 Message-ID: <5847ea21.12921c0a.54464.fc20@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88931:d4951c9a2236 Date: 2016-12-07 11:52 +0100 http://bitbucket.org/pypy/pypy/changeset/d4951c9a2236/ Log: merge py3.5 diff too long, truncating to 2000 out of 33334 lines diff --git a/extra_tests/README.txt b/extra_tests/README.txt new file mode 100644 --- /dev/null +++ b/extra_tests/README.txt @@ -0,0 +1,5 @@ +The tests in this directory are a complement to lib-python/3/test/. + +They are meant to run on top of a compiled pypy3 or CPython3.5 in an +environment containing at least pytest and hypothesis, using a command like +'pytest extra_tests/'. diff --git a/extra_tests/pytest.ini b/extra_tests/pytest.ini new file mode 100644 diff --git a/extra_tests/test_bufferedreader.py b/extra_tests/test_bufferedreader.py new file mode 100644 --- /dev/null +++ b/extra_tests/test_bufferedreader.py @@ -0,0 +1,99 @@ +import io +from cffi import FFI + +import pytest +from hypothesis import strategies as st +from hypothesis import given, assume, settings +from hypothesis.stateful import ( + RuleBasedStateMachine, Bundle, rule, run_state_machine_as_test, precondition) +ffi = FFI() + +MAX_READ_SIZE = 1024 +MIN_READ_SIZE = 1 +MAX_SIZE = 0xffff + + at st.composite +def data_and_sizes(draw, reads=st.lists(st.integers(MIN_READ_SIZE, MAX_READ_SIZE))): + reads = draw(reads) + total_size = sum(reads) + assume(0 < total_size < MAX_SIZE) + data = draw(st.binary(min_size=total_size, max_size=total_size)) + return data, reads + +class Stream(io.RawIOBase): + def __init__(self, data, read_sizes): + assert sum(read_sizes) == len(data) + self.data = data + self.n = 0 + self.read_sizes = iter(read_sizes) + self.partial_read = 0 + + def readinto(self, buf): + if self.n == len(self.data): + return 0 + if self.partial_read: + read_size = self.partial_read + else: + read_size = next(self.read_sizes) + if len(buf) < read_size: + self.partial_read = read_size - len(buf) + read_size = len(buf) + else: + self.partial_read = 0 + self.update_buffer(buf, self.data[self.n:self.n + read_size]) + self.n += read_size + return read_size + + def update_buffer(self, buf, data): + n = len(data) + buf[:n] = data + + def readable(self): + return True + +class StreamCFFI(Stream): + def update_buffer(self, buf, data): + n = len(data) + ffi.buffer(ffi.from_buffer(buf), n)[:] = data + + + at pytest.mark.parametrize('StreamCls', [Stream, StreamCFFI]) + at given(params=data_and_sizes(), chunk_size=st.integers(MIN_READ_SIZE, 8192)) +def test_buf(params, chunk_size, StreamCls): + data, sizes = params + stream = StreamCls(data, sizes) + assert io.BufferedReader(stream, chunk_size).read(len(data)) == data + +class StateMachine(RuleBasedStateMachine): + def __init__(self, stream, reference): + super().__init__() + self.stream = stream + self.reference = reference + + @rule(size=st.integers(MIN_READ_SIZE, MAX_READ_SIZE)) + def read(self, size): + expected = self.reference.read(size) + assert self.stream.read(size) == expected + + @rule(size=st.integers(MIN_READ_SIZE, MAX_READ_SIZE)) + def readinto(self, size): + expected = self.reference.read(size) + buf = bytearray(size) + n = self.stream.readinto(buf) + assert buf[:n] == expected + + @rule() + def readline(self): + expected = self.reference.readline(80) + assert self.stream.readline(80) == expected + + at pytest.mark.parametrize('StreamCls', [Stream, StreamCFFI]) + at settings(max_examples=50) + at given(params=data_and_sizes(), chunk_size=st.integers(MIN_READ_SIZE, 8192)) +def test_stateful(params, chunk_size, StreamCls): + data, sizes = params + raw_stream = StreamCls(data, sizes) + reference = io.BytesIO(data) + stream = io.BufferedReader(raw_stream, chunk_size) + sm = StateMachine(stream, reference) + run_state_machine_as_test(lambda: sm) diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -156,7 +156,7 @@ __slots__ = () @abstractmethod - async def __aiter__(self): + def __aiter__(self): return AsyncIterator() @classmethod @@ -176,7 +176,7 @@ """Return the next item or raise StopAsyncIteration when exhausted.""" raise StopAsyncIteration - async def __aiter__(self): + def __aiter__(self): return self @classmethod diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -177,6 +177,13 @@ 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', }) REVERSE_IMPORT_MAPPING.update({ diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -151,13 +151,13 @@ # can only be found inside Xcode.app if the "Command Line Tools" # are not installed. # - # Futhermore, the compiler that can be used varies between + # Furthermore, the compiler that can be used varies between # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2' # as the compiler, after that 'clang' should be used because # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that # miscompiles Python. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -193,7 +193,7 @@ if cc != oldcc: # Found a replacement compiler. # Modify config vars using new compiler, if not already explicitly - # overriden by an env variable, preserving additional arguments. + # overridden by an env variable, preserving additional arguments. for cv in _COMPILER_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: cv_split = _config_vars[cv].split() @@ -207,7 +207,7 @@ """Remove all universal build arguments from config vars""" for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) @@ -228,7 +228,7 @@ # build extensions on OSX 10.7 and later with the prebuilt # 32-bit installer on the python.org website. - # skip checks if the compiler was overriden with a CC env variable + # skip checks if the compiler was overridden with a CC env variable if 'CC' in os.environ: return _config_vars @@ -244,7 +244,7 @@ # across Xcode and compiler versions, there is no reliable way # to be sure why it failed. Assume here it was due to lack of # PPC support and remove the related '-arch' flags from each - # config variables not explicitly overriden by an environment + # config variables not explicitly overridden by an environment # variable. If the error was for some other reason, we hope the # failure will show up again when trying to compile an extension # module. @@ -292,7 +292,7 @@ sdk = m.group(1) if not os.path.exists(sdk): for cv in _UNIVERSAL_CONFIG_VARS: - # Do not alter a config var explicitly overriden by env var + # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags) diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py --- a/lib-python/3/_pydecimal.py +++ b/lib-python/3/_pydecimal.py @@ -252,7 +252,7 @@ class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. - This occurs and signals invalid-operation if an string is being + This occurs and signals invalid-operation if a string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ @@ -1102,7 +1102,7 @@ def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. - Rounds the number (if more then precision digits) + Rounds the number (if more than precision digits) """ if self._is_special: ans = self._check_nans(context=context) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -296,8 +296,9 @@ called. The basic type used for binary data read from or written to a file is - bytes. bytearrays are accepted too, and in some cases (such as - readinto) needed. Text I/O classes work with str data. + bytes. Other bytes-like objects are accepted as method arguments too. In + some cases (such as readinto), a writable object is required. Text I/O + classes work with str data. Note that calling any method (even inquiries) on a closed stream is undefined. Implementations may raise OSError in this case. @@ -390,7 +391,7 @@ def seekable(self): """Return a bool indicating whether object supports random access. - If False, seek(), tell() and truncate() will raise UnsupportedOperation. + If False, seek(), tell() and truncate() will raise OSError. This method may need to do a test seek(). """ return False @@ -405,7 +406,7 @@ def readable(self): """Return a bool indicating whether object was opened for reading. - If False, read() will raise UnsupportedOperation. + If False, read() will raise OSError. """ return False @@ -419,7 +420,7 @@ def writable(self): """Return a bool indicating whether object was opened for writing. - If False, write() and truncate() will raise UnsupportedOperation. + If False, write() and truncate() will raise OSError. """ return False @@ -439,7 +440,7 @@ return self.__closed def _checkClosed(self, msg=None): - """Internal: raise an ValueError if file is closed + """Internal: raise a ValueError if file is closed """ if self.closed: raise ValueError("I/O operation on closed file." @@ -596,7 +597,7 @@ return data def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Returns an int representing the number of bytes read (0 for EOF), or None if the object is set not to block and has no data to read. @@ -606,7 +607,8 @@ def write(self, b): """Write the given buffer to the IO stream. - Returns the number of bytes written, which may be less than len(b). + Returns the number of bytes written, which may be less than the + length of b in bytes. """ self._unsupported("write") @@ -659,7 +661,7 @@ self._unsupported("read1") def readinto(self, b): - """Read up to len(b) bytes into bytearray b. + """Read bytes into a pre-allocated bytes-like object b. Like read(), this may issue multiple reads to the underlying raw stream, unless the latter is 'interactive'. @@ -673,7 +675,7 @@ return self._readinto(b, read1=False) def readinto1(self, b): - """Read up to len(b) bytes into *b*, using at most one system call + """Read bytes into buffer *b*, using at most one system call Returns an int representing the number of bytes read (0 for EOF). @@ -701,8 +703,8 @@ def write(self, b): """Write the given bytes buffer to the IO stream. - Return the number of bytes written, which is never less than - len(b). + Return the number of bytes written, which is always the length of b + in bytes. Raises BlockingIOError if the buffer is full and the underlying raw stream cannot accept more data at the moment. @@ -787,12 +789,6 @@ def seekable(self): return self.raw.seekable() - def readable(self): - return self.raw.readable() - - def writable(self): - return self.raw.writable() - @property def raw(self): return self._raw @@ -890,7 +886,8 @@ raise ValueError("write to closed file") if isinstance(b, str): raise TypeError("can't write str to binary stream") - n = len(b) + with memoryview(b) as view: + n = view.nbytes # Size of any bytes-like object if n == 0: return 0 pos = self._pos @@ -982,6 +979,9 @@ self._reset_read_buf() self._read_lock = Lock() + def readable(self): + return self.raw.readable() + def _reset_read_buf(self): self._read_buf = b"" self._read_pos = 0 @@ -1043,7 +1043,7 @@ break avail += len(chunk) chunks.append(chunk) - # n is more then avail only when an EOF occurred or when + # n is more than avail only when an EOF occurred or when # read() would have blocked. n = min(n, avail) out = b"".join(chunks) @@ -1093,14 +1093,13 @@ def _readinto(self, buf, read1): """Read data into *buf* with at most one system call.""" - if len(buf) == 0: - return 0 - # Need to create a memoryview object of type 'b', otherwise # we may not be able to assign bytes to it, and slicing it # would create a new object. if not isinstance(buf, memoryview): buf = memoryview(buf) + if buf.nbytes == 0: + return 0 buf = buf.cast('B') written = 0 @@ -1170,6 +1169,9 @@ self._write_buf = bytearray() self._write_lock = Lock() + def writable(self): + return self.raw.writable() + def write(self, b): if self.closed: raise ValueError("write to closed file") diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -307,13 +311,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: @@ -456,6 +462,10 @@ week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) + if julian <= 0: + year -= 1 + yday = 366 if calendar.isleap(year) else 365 + julian += yday # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py --- a/lib-python/3/asyncio/base_events.py +++ b/lib-python/3/asyncio/base_events.py @@ -52,6 +52,12 @@ # before cleanup of cancelled handles is performed. _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 +# Exceptions which must not call the exception handler in fatal error +# methods (_fatal_error()) +_FATAL_ERROR_IGNORE = (BrokenPipeError, + ConnectionResetError, ConnectionAbortedError) + + def _format_handle(handle): cb = handle._callback if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): @@ -70,49 +76,89 @@ return repr(fd) -def _check_resolved_address(sock, address): - # Ensure that the address is already resolved to avoid the trap of hanging - # the entire event loop when the address requires doing a DNS lookup. - # - # getaddrinfo() is slow (around 10 us per call): this function should only - # be called in debug mode - family = sock.family +# Linux's sock.type is a bitmask that can include extra info about socket. +_SOCKET_TYPE_MASK = 0 +if hasattr(socket, 'SOCK_NONBLOCK'): + _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK +if hasattr(socket, 'SOCK_CLOEXEC'): + _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC - if family == socket.AF_INET: - host, port = address - elif family == socket.AF_INET6: - host, port = address[:2] - else: + +def _ipaddr_info(host, port, family, type, proto): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): return - # On Windows, socket.inet_pton() is only available since Python 3.4 - if hasattr(socket, 'inet_pton'): - # getaddrinfo() is slow and has known issue: prefer inet_pton() - # if available + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + type &= ~_SOCKET_TYPE_MASK + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes): + if port == b'': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like b"http". + port = socket.getservbyname(port.decode('ascii')) + elif isinstance(port, str): + if port == '': + port = 0 + else: + try: + port = int(port) + except ValueError: + # Might be a service name like "http". + port = socket.getservbyname(port) + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET, socket.AF_INET6] + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: try: - socket.inet_pton(family, host) - except OSError as exc: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, exc)) + socket.inet_pton(af, host) + # The host has already been resolved. + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _ensure_resolved(address, *, family=0, type=socket.SOCK_STREAM, proto=0, + flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto) + if info is not None: + # "host" is already a resolved IP. + fut = loop.create_future() + fut.set_result([info]) + return fut else: - # Use getaddrinfo(flags=AI_NUMERICHOST) to ensure that the address is - # already resolved. - type_mask = 0 - if hasattr(socket, 'SOCK_NONBLOCK'): - type_mask |= socket.SOCK_NONBLOCK - if hasattr(socket, 'SOCK_CLOEXEC'): - type_mask |= socket.SOCK_CLOEXEC - try: - socket.getaddrinfo(host, port, - family=family, - type=(sock.type & ~type_mask), - proto=sock.proto, - flags=socket.AI_NUMERICHOST) - except socket.gaierror as err: - raise ValueError("address must be resolved (IP address), " - "got host %r: %s" - % (host, err)) + return loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) def _run_until_complete_cb(fut): @@ -167,7 +213,7 @@ def wait_closed(self): if self.sockets is None or self._waiters is None: return - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._waiters.append(waiter) yield from waiter @@ -201,6 +247,10 @@ % (self.__class__.__name__, self.is_running(), self.is_closed(), self.get_debug())) + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + def create_task(self, coro): """Schedule a coroutine object. @@ -494,7 +544,7 @@ assert not args assert not isinstance(func, events.TimerHandle) if func._cancelled: - f = futures.Future(loop=self) + f = self.create_future() f.set_result(None) return f func, args = func._callback, func._args @@ -584,14 +634,14 @@ raise ValueError( 'host/port and sock can not be specified at the same time') - f1 = self.getaddrinfo( - host, port, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f1 = _ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs = [f1] if local_addr is not None: - f2 = self.getaddrinfo( - *local_addr, family=family, - type=socket.SOCK_STREAM, proto=proto, flags=flags) + f2 = _ensure_resolved(local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) fs.append(f2) else: f2 = None @@ -673,7 +723,7 @@ def _create_connection_transport(self, sock, protocol_factory, ssl, server_hostname): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( @@ -726,9 +776,9 @@ assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) + infos = yield from _ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) if not infos: raise OSError('getaddrinfo() returned empty list') @@ -793,7 +843,7 @@ raise exceptions[0] protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_datagram_transport( sock, protocol, r_addr, waiter) if self._debug: @@ -816,9 +866,9 @@ @coroutine def _create_server_getaddrinfo(self, host, port, family, flags): - infos = yield from self.getaddrinfo(host, port, family=family, + infos = yield from _ensure_resolved((host, port), family=family, type=socket.SOCK_STREAM, - flags=flags) + flags=flags, loop=self) if not infos: raise OSError('getaddrinfo({!r}) returned empty list'.format(host)) return infos @@ -839,7 +889,10 @@ to host and port. The host parameter can also be a sequence of strings and in that case - the TCP server is bound to all hosts of the sequence. + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. Return a Server object which can be used to stop the service. @@ -868,7 +921,7 @@ flags=flags) for host in hosts] infos = yield from tasks.gather(*fs, loop=self) - infos = itertools.chain.from_iterable(infos) + infos = set(itertools.chain.from_iterable(infos)) completed = False try: @@ -929,7 +982,7 @@ @coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_read_pipe_transport(pipe, protocol, waiter) try: @@ -946,7 +999,7 @@ @coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() transport = self._make_write_pipe_transport(pipe, protocol, waiter) try: @@ -1028,6 +1081,11 @@ logger.info('%s: %r' % (debug_log, transport)) return transport, protocol + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py --- a/lib-python/3/asyncio/base_subprocess.py +++ b/lib-python/3/asyncio/base_subprocess.py @@ -210,6 +210,10 @@ logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() @@ -227,7 +231,7 @@ if self._returncode is not None: return self._returncode - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._exit_waiters.append(waiter) return (yield from waiter) diff --git a/lib-python/3/asyncio/compat.py b/lib-python/3/asyncio/compat.py --- a/lib-python/3/asyncio/compat.py +++ b/lib-python/3/asyncio/compat.py @@ -4,6 +4,7 @@ PY34 = sys.version_info >= (3, 4) PY35 = sys.version_info >= (3, 5) +PY352 = sys.version_info >= (3, 5, 2) def flatten_list_bytes(list_of_data): diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py --- a/lib-python/3/asyncio/coroutines.py +++ b/lib-python/3/asyncio/coroutines.py @@ -27,8 +27,8 @@ # before you define your coroutines. A downside of using this feature # is that tracebacks show entries for the CoroWrapper.__next__ method # when _DEBUG is true. -_DEBUG = (not sys.flags.ignore_environment - and bool(os.environ.get('PYTHONASYNCIODEBUG'))) +_DEBUG = (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) try: @@ -86,7 +86,7 @@ def __init__(self, gen, func=None): assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen self.gen = gen - self.func = func # Used to unwrap @coroutine decorator + self.func = func # Used to unwrap @coroutine decorator self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.__name__ = getattr(gen, '__name__', None) self.__qualname__ = getattr(gen, '__qualname__', None) @@ -204,7 +204,8 @@ @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) - if isinstance(res, futures.Future) or inspect.isgenerator(res): + if isinstance(res, futures.Future) or inspect.isgenerator(res) or \ + isinstance(res, CoroWrapper): res = yield from res elif _AwaitableABC is not None: # If 'func' returns an Awaitable (new in 3.5) we @@ -283,10 +284,13 @@ coro_frame = coro.cr_frame filename = coro_code.co_filename - if (isinstance(coro, CoroWrapper) - and not inspect.isgeneratorfunction(coro.func) - and coro.func is not None): - filename, lineno = events._get_function_source(coro.func) + lineno = 0 + if (isinstance(coro, CoroWrapper) and + not inspect.isgeneratorfunction(coro.func) and + coro.func is not None): + source = events._get_function_source(coro.func) + if source is not None: + filename, lineno = source if coro_frame is None: coro_repr = ('%s done, defined at %s:%s' % (coro_name, filename, lineno)) diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -266,6 +266,9 @@ def time(self): raise NotImplementedError + def create_future(self): + raise NotImplementedError + # Method scheduling a coroutine object: create a task. def create_task(self, coro): @@ -484,6 +487,9 @@ # Error handlers. + def get_exception_handler(self): + raise NotImplementedError + def set_exception_handler(self, handler): raise NotImplementedError diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py --- a/lib-python/3/asyncio/futures.py +++ b/lib-python/3/asyncio/futures.py @@ -142,7 +142,7 @@ def __init__(self, *, loop=None): """Initialize the future. - The optional event_loop argument allows to explicitly set the event + The optional event_loop argument allows explicitly setting the event loop object used by the future. If it's not provided, the future uses the default event loop. """ @@ -341,6 +341,9 @@ raise InvalidStateError('{}: {!r}'.format(self._state, self)) if isinstance(exception, type): exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") self._exception = exception self._state = _FINISHED self._schedule_callbacks() @@ -448,6 +451,8 @@ return future assert isinstance(future, concurrent.futures.Future), \ 'concurrent.futures.Future is expected, got {!r}'.format(future) - new_future = Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() _chain_future(future, new_future) return new_future diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py --- a/lib-python/3/asyncio/locks.py +++ b/lib-python/3/asyncio/locks.py @@ -111,7 +111,7 @@ acquire() is a coroutine and should be called with 'yield from'. Locks also support the context management protocol. '(yield from lock)' - should be used as context manager expression. + should be used as the context manager expression. Usage: @@ -170,7 +170,7 @@ self._locked = True return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -258,7 +258,7 @@ if self._value: return True - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -320,7 +320,7 @@ self.release() try: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut @@ -329,7 +329,13 @@ self._waiters.remove(fut) finally: - yield from self.acquire() + # Must reacquire lock even if wait is cancelled + while True: + try: + yield from self.acquire() + break + except futures.CancelledError: + pass @coroutine def wait_for(self, predicate): @@ -433,7 +439,7 @@ True. """ while self._value <= 0: - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() self._waiters.append(fut) try: yield from fut diff --git a/lib-python/3/asyncio/proactor_events.py b/lib-python/3/asyncio/proactor_events.py --- a/lib-python/3/asyncio/proactor_events.py +++ b/lib-python/3/asyncio/proactor_events.py @@ -90,7 +90,7 @@ self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -440,15 +440,7 @@ return self._proactor.send(sock, data) def sock_connect(self, sock, address): - try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut = futures.Future(loop=self) - fut.set_exception(err) - return fut - else: - return self._proactor.connect(sock, address) + return self._proactor.connect(sock, address) def sock_accept(self, sock): return self._proactor.accept(sock) diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py --- a/lib-python/3/asyncio/queues.py +++ b/lib-python/3/asyncio/queues.py @@ -128,7 +128,7 @@ This method is a coroutine. """ while self.full(): - putter = futures.Future(loop=self._loop) + putter = self._loop.create_future() self._putters.append(putter) try: yield from putter @@ -162,7 +162,7 @@ This method is a coroutine. """ while self.empty(): - getter = futures.Future(loop=self._loop) + getter = self._loop.create_future() self._getters.append(getter) try: yield from getter diff --git a/lib-python/3/asyncio/selector_events.py b/lib-python/3/asyncio/selector_events.py --- a/lib-python/3/asyncio/selector_events.py +++ b/lib-python/3/asyncio/selector_events.py @@ -196,7 +196,7 @@ transport = None try: protocol = protocol_factory() - waiter = futures.Future(loop=self) + waiter = self.create_future() if sslcontext: transport = self._make_ssl_transport( conn, protocol, sslcontext, waiter=waiter, @@ -314,7 +314,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_recv(fut, False, sock, n) return fut @@ -352,7 +352,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() if data: self._sock_sendall(fut, False, sock, data) else: @@ -385,25 +385,28 @@ def sock_connect(self, sock, address): """Connect to a remote socket at address. - The address must be already resolved to avoid the trap of hanging the - entire event loop when the address requires doing a DNS lookup. For - example, it must be an IP address, not an hostname, for AF_INET and - AF_INET6 address families. Use getaddrinfo() to resolve the hostname - asynchronously. - This method is a coroutine. """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + + fut = self.create_future() + if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX: + self._sock_connect(fut, sock, address) + else: + resolved = base_events._ensure_resolved(address, loop=self) + resolved.add_done_callback( + lambda resolved: self._on_resolved(fut, sock, resolved)) + + return fut + + def _on_resolved(self, fut, sock, resolved): try: - if self._debug: - base_events._check_resolved_address(sock, address) - except ValueError as err: - fut.set_exception(err) + _, _, _, _, address = resolved.result()[0] + except Exception as exc: + fut.set_exception(exc) else: self._sock_connect(fut, sock, address) - return fut def _sock_connect(self, fut, sock, address): fd = sock.fileno() @@ -454,7 +457,7 @@ """ if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") - fut = futures.Future(loop=self) + fut = self.create_future() self._sock_accept(fut, False, sock) return fut @@ -566,6 +569,7 @@ self._loop.remove_reader(self._sock_fd) if not self._buffer: self._conn_lost += 1 + self._loop.remove_writer(self._sock_fd) self._loop.call_soon(self._call_connection_lost, None) # On Python 3.3 and older, objects with a destructor part of a reference @@ -579,8 +583,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, - ConnectionResetError, ConnectionAbortedError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: @@ -660,6 +663,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): @@ -683,8 +688,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if not data: @@ -719,6 +724,8 @@ def _write_ready(self): assert self._buffer, 'Data should not be empty' + if self._conn_lost: + return try: n = self._sock.send(self._buffer) except (BlockingIOError, InterruptedError): @@ -889,6 +896,8 @@ logger.debug("%r resumes reading", self) def _read_ready(self): + if self._conn_lost: + return if self._write_wants_read: self._write_wants_read = False self._write_ready() @@ -921,6 +930,8 @@ self.close() def _write_ready(self): + if self._conn_lost: + return if self._read_wants_write: self._read_wants_write = False self._read_ready() @@ -955,8 +966,8 @@ def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return @@ -998,6 +1009,8 @@ return sum(len(data) for data, _ in self._buffer) def _read_ready(self): + if self._conn_lost: + return try: data, addr = self._sock.recvfrom(self.max_size) except (BlockingIOError, InterruptedError): @@ -1011,8 +1024,8 @@ def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError('data argument must be byte-ish (%r)', - type(data)) + raise TypeError('data argument must be a bytes-like object, ' + 'not %r' % type(data).__name__) if not data: return diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py --- a/lib-python/3/asyncio/sslproto.py +++ b/lib-python/3/asyncio/sslproto.py @@ -603,7 +603,7 @@ self._wakeup_waiter() self._session_established = True # In case transport.write() was already called. Don't call - # immediatly _process_write_backlog(), but schedule it: + # immediately _process_write_backlog(), but schedule it: # _on_handshake_complete() can be called indirectly from # _process_write_backlog(), and _process_write_backlog() is not # reentrant. @@ -655,7 +655,7 @@ def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py --- a/lib-python/3/asyncio/streams.py +++ b/lib-python/3/asyncio/streams.py @@ -3,6 +3,7 @@ __all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol', 'open_connection', 'start_server', 'IncompleteReadError', + 'LimitOverrunError', ] import socket @@ -13,13 +14,12 @@ from . import coroutines from . import compat from . import events -from . import futures from . import protocols from .coroutines import coroutine from .log import logger -_DEFAULT_LIMIT = 2**16 +_DEFAULT_LIMIT = 2 ** 16 class IncompleteReadError(EOFError): @@ -27,15 +27,26 @@ Incomplete read error. Attributes: - partial: read bytes string before the end of stream was reached - - expected: total number of expected bytes + - expected: total number of expected bytes (or None if unknown) """ def __init__(self, partial, expected): - EOFError.__init__(self, "%s bytes read on a total of %s expected bytes" - % (len(partial), expected)) + super().__init__("%d bytes read on a total of %r expected bytes" + % (len(partial), expected)) self.partial = partial self.expected = expected +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + @coroutine def open_connection(host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -118,7 +129,6 @@ writer = StreamWriter(transport, protocol, reader, loop) return reader, writer - @coroutine def start_unix_server(client_connected_cb, path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): @@ -196,7 +206,7 @@ return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() - waiter = futures.Future(loop=self._loop) + waiter = self._loop.create_future() self._drain_waiter = waiter yield from waiter @@ -215,9 +225,11 @@ self._stream_reader = stream_reader self._stream_writer = None self._client_connected_cb = client_connected_cb + self._over_ssl = False def connection_made(self, transport): self._stream_reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: self._stream_writer = StreamWriter(transport, self, self._stream_reader, @@ -228,17 +240,25 @@ self._loop.create_task(res) def connection_lost(self, exc): - if exc is None: - self._stream_reader.feed_eof() - else: - self._stream_reader.set_exception(exc) + if self._stream_reader is not None: + if exc is None: + self._stream_reader.feed_eof() + else: + self._stream_reader.set_exception(exc) super().connection_lost(exc) + self._stream_reader = None + self._stream_writer = None def data_received(self, data): self._stream_reader.feed_data(data) def eof_received(self): self._stream_reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False return True @@ -318,6 +338,10 @@ def __init__(self, limit=_DEFAULT_LIMIT, loop=None): # The line length limit is a security feature; # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + self._limit = limit if loop is None: self._loop = events.get_event_loop() @@ -361,7 +385,7 @@ waiter.set_exception(exc) def _wakeup_waiter(self): - """Wakeup read() or readline() function waiting for data or EOF.""" + """Wakeup read*() functions waiting for data or EOF.""" waiter = self._waiter if waiter is not None: self._waiter = None @@ -395,8 +419,8 @@ self._wakeup_waiter() if (self._transport is not None and - not self._paused and - len(self._buffer) > 2*self._limit): + not self._paused and + len(self._buffer) > 2 * self._limit): try: self._transport.pause_reading() except NotImplementedError: @@ -409,7 +433,10 @@ @coroutine def _wait_for_data(self, func_name): - """Wait until feed_data() or feed_eof() is called.""" + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know @@ -418,7 +445,14 @@ raise RuntimeError('%s() called while another coroutine is ' 'already waiting for incoming data' % func_name) - self._waiter = futures.Future(loop=self._loop) + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() try: yield from self._waiter finally: @@ -426,43 +460,154 @@ @coroutine def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = yield from self.readuntil(sep) + except IncompleteReadError as e: + return e.partial + except LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + @coroutine + def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + if self._exception is not None: raise self._exception - line = bytearray() - not_enough = True + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) - while not_enough: - while self._buffer and not_enough: - ichar = self._buffer.find(b'\n') - if ichar < 0: - line.extend(self._buffer) - self._buffer.clear() - else: - ichar += 1 - line.extend(self._buffer[:ichar]) - del self._buffer[:ichar] - not_enough = False + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 - if len(line) > self._limit: - self._maybe_resume_transport() - raise ValueError('Line is too long') + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. if self._eof: - break + chunk = bytes(self._buffer) + self._buffer.clear() + raise IncompleteReadError(chunk, None) - if not_enough: - yield from self._wait_for_data('readline') + # _wait_for_data() will resume reading if stream was paused. + yield from self._wait_for_data('readuntil') + if isep > self._limit: + raise LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] self._maybe_resume_transport() - return bytes(line) + return bytes(chunk) @coroutine def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediatelly. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if self._exception is not None: raise self._exception - if not n: + if n == 0: return b'' if n < 0: @@ -477,26 +622,42 @@ break blocks.append(block) return b''.join(blocks) - else: - if not self._buffer and not self._eof: - yield from self._wait_for_data('read') - if n < 0 or len(self._buffer) <= n: - data = bytes(self._buffer) - self._buffer.clear() - else: - # n > 0 and len(self._buffer) > n - data = bytes(self._buffer[:n]) - del self._buffer[:n] + if not self._buffer and not self._eof: + yield from self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] self._maybe_resume_transport() return data @coroutine def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + if self._exception is not None: raise self._exception + if n == 0: + return b'' + # There used to be "optimized" code here. It created its own # Future and waited until self._buffer had at least the n # bytes, then called read(n). Unfortunately, this could pause @@ -513,6 +674,8 @@ blocks.append(block) n -= len(block) + assert n == 0 + return b''.join(blocks) if compat.PY35: @@ -526,3 +689,9 @@ if val == b'': raise StopAsyncIteration return val + + if compat.PY352: + # In Python 3.5.2 and greater, __aiter__ should return + # the asynchronous iterator directly. + def __aiter__(self): + return self diff --git a/lib-python/3/asyncio/subprocess.py b/lib-python/3/asyncio/subprocess.py --- a/lib-python/3/asyncio/subprocess.py +++ b/lib-python/3/asyncio/subprocess.py @@ -166,7 +166,7 @@ @coroutine def communicate(self, input=None): - if input: + if input is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py --- a/lib-python/3/asyncio/tasks.py +++ b/lib-python/3/asyncio/tasks.py @@ -251,7 +251,13 @@ else: if isinstance(result, futures.Future): # Yielded Future must come from Future.__iter__(). - if result._blocking: + if result._loop is not self._loop: + self._loop.call_soon( + self._step, + RuntimeError( + 'Task {!r} got Future {!r} attached to a ' + 'different loop'.format(self, result))) + elif result._blocking: result._blocking = False result.add_done_callback(self._wakeup) self._fut_waiter = result @@ -366,7 +372,7 @@ if timeout is None: return (yield from fut) - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = loop.call_later(timeout, _release_waiter, waiter) cb = functools.partial(_release_waiter, waiter) @@ -394,12 +400,12 @@ @coroutine def _wait(fs, timeout, return_when, loop): - """Internal helper for wait() and _wait_for(). + """Internal helper for wait() and wait_for(). The fs argument must be a collection of Futures. """ assert fs, 'Set of Futures is empty.' - waiter = futures.Future(loop=loop) + waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) @@ -500,7 +506,9 @@ yield return result - future = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + future = loop.create_future() h = future._loop.call_later(delay, futures._set_result_unless_cancelled, future, result) @@ -597,7 +605,9 @@ be cancelled.) """ if not coros_or_futures: - outer = futures.Future(loop=loop) + if loop is None: + loop = events.get_event_loop() + outer = loop.create_future() outer.set_result([]) return outer @@ -685,7 +695,7 @@ # Shortcut. return inner loop = inner._loop - outer = futures.Future(loop=loop) + outer = loop.create_future() def _done_callback(inner): if outer.cancelled(): diff --git a/lib-python/3/asyncio/test_utils.py b/lib-python/3/asyncio/test_utils.py --- a/lib-python/3/asyncio/test_utils.py +++ b/lib-python/3/asyncio/test_utils.py @@ -446,9 +446,14 @@ finally: logger.setLevel(old_level) -def mock_nonblocking_socket(): + +def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM, + family=socket.AF_INET): """Create a mock of a non-blocking socket.""" - sock = mock.Mock(socket.socket) + sock = mock.MagicMock(socket.socket) + sock.proto = proto + sock.type = type + sock.family = family sock.gettimeout.return_value = 0.0 return sock diff --git a/lib-python/3/asyncio/unix_events.py b/lib-python/3/asyncio/unix_events.py --- a/lib-python/3/asyncio/unix_events.py +++ b/lib-python/3/asyncio/unix_events.py @@ -177,7 +177,7 @@ stdin, stdout, stderr, bufsize, extra=None, **kwargs): with events.get_child_watcher() as watcher: - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _UnixSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -329,14 +329,17 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_READ) if polling: info.append('polling') else: info.append('idle') + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -453,9 +456,10 @@ elif self._closing: info.append('closing') info.append('fd=%s' % self._fileno) - if self._pipe is not None: + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: polling = selector_events._test_selector_event( - self._loop._selector, + selector, self._fileno, selectors.EVENT_WRITE) if polling: info.append('polling') @@ -464,6 +468,8 @@ bufsize = self.get_write_buffer_size() info.append('bufsize=%s' % bufsize) + elif self._pipe is not None: + info.append('open') else: info.append('closed') return '<%s>' % ' '.join(info) @@ -575,7 +581,7 @@ def _fatal_error(self, exc, message='Fatal error on pipe transport'): # should be called by exception handler only - if isinstance(exc, (BrokenPipeError, ConnectionResetError)): + if isinstance(exc, base_events._FATAL_ERROR_IGNORE): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: diff --git a/lib-python/3/asyncio/windows_events.py b/lib-python/3/asyncio/windows_events.py --- a/lib-python/3/asyncio/windows_events.py +++ b/lib-python/3/asyncio/windows_events.py @@ -197,7 +197,7 @@ # # If the IocpProactor already received the event, it's safe to call # _unregister() because we kept a reference to the Overlapped object - # which is used as an unique key. + # which is used as a unique key. self._proactor._unregister(self._ov) self._proactor = None @@ -366,7 +366,7 @@ def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): - waiter = futures.Future(loop=self) + waiter = self.create_future() transp = _WindowsSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, @@ -417,7 +417,7 @@ return tmp def _result(self, value): - fut = futures.Future(loop=self._loop) + fut = self._loop.create_future() fut.set_result(value) return fut diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py --- a/lib-python/3/base64.py +++ b/lib-python/3/base64.py @@ -12,7 +12,7 @@ __all__ = [ - # Legacy interface exports traditional RFC 1521 Base64 encodings + # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', @@ -49,14 +49,11 @@ # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): - """Encode a byte string using Base64. + """Encode the bytes-like object s using Base64 and return a bytes object. - s is the byte string to encode. Optional altchars must be a byte - string of length 2 which specifies an alternative alphabet for the - '+' and '/' characters. This allows an application to - e.g. generate url or filesystem safe Base64 strings. - - The encoded byte string is returned. + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. """ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] @@ -67,18 +64,19 @@ def b64decode(s, altchars=None, validate=False): - """Decode a Base64 encoded byte string. + """Decode the Base64 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional altchars must be a - string of length 2 which specifies the alternative alphabet used - instead of the '+' and '/' characters. + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. - The decoded string is returned. A binascii.Error is raised if s is - incorrectly padded. + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. - If validate is False (the default), non-base64-alphabet characters are - discarded prior to the padding check. If validate is True, - non-base64-alphabet characters in the input result in a binascii.Error. + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. """ s = _bytes_from_decode_data(s) if altchars is not None: @@ -91,19 +89,19 @@ def standard_b64encode(s): - """Encode a byte string using the standard Base64 alphabet. + """Encode bytes-like object s using the standard Base64 alphabet. - s is the byte string to encode. The encoded byte string is returned. + The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes encoded with the standard Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. """ return b64decode(s) @@ -112,21 +110,22 @@ _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): - """Encode a byte string using a url-safe Base64 alphabet. + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to encode. The encoded byte string is - returned. The alphabet uses '-' instead of '+' and '_' instead of + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): - """Decode a byte string encoded with the standard Base64 alphabet. + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. - s is the byte string to decode. The decoded byte string is - returned. binascii.Error is raised if the input is incorrectly - padded or if there are non-alphabet characters present in the - input. + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ @@ -142,9 +141,7 @@ _b32rev = None def b32encode(s): - """Encode a byte string using Base32. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base32 and return a bytes object. """ global _b32tab2 # Delay the initialization of the table to not waste memory @@ -182,11 +179,10 @@ return bytes(encoded) def b32decode(s, casefold=False, map01=None): - """Decode a Base32 encoded byte string. + """Decode the Base32 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag - specifying whether a lowercase alphabet is acceptable as input. - For security purposes, the default is False. + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to @@ -196,7 +192,7 @@ the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. - The decoded byte string is returned. binascii.Error is raised if + The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. """ @@ -257,23 +253,20 @@ # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): - """Encode a byte string using Base16. - - s is the byte string to encode. The encoded byte string is returned. + """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): - """Decode a Base16 encoded byte string. + """Decode the Base16 encoded bytes-like object or ASCII string s. - s is the byte string to decode. Optional casefold is a flag From pypy.commits at gmail.com Wed Dec 7 06:02:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 03:02:50 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: translation issue, value was not wrapped Message-ID: <5847ec5a.8675c20a.108e7.73f3@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88932:75f0bfd94182 Date: 2016-12-07 12:02 +0100 http://bitbucket.org/pypy/pypy/changeset/75f0bfd94182/ Log: translation issue, value was not wrapped diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -396,7 +396,7 @@ self._check_released(space) dim = self.getndim() if dim == 0: - return 1 + return space.newint(1) shape = self.getshape() return space.wrap(shape[0]) From pypy.commits at gmail.com Wed Dec 7 06:30:23 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 03:30:23 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: structural changes to fix translation, triggered by union error Message-ID: <5847f2cf.54161c0a.2e0ca.04d2@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88933:f40c39b6dcf1 Date: 2016-12-07 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/f40c39b6dcf1/ Log: structural changes to fix translation, triggered by union error diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -220,13 +220,13 @@ while dim < length: w_obj = w_tuple.getitem(space, dim) index = space.getindex_w(w_obj, space.w_IndexError) - start = self.lookup_dimension(space, self.buf, start, dim, index) + shape = self.buf.getshape() + strides = self.buf.getstrides() + start = self.lookup_dimension(space, shape, strides, start, dim, index) dim += 1 return start - def lookup_dimension(self, space, view, start, dim, index): - shape = view.getshape() - strides = view.getstrides() + def lookup_dimension(self, space, shape, strides, start, dim, index): nitems = shape[dim] if index < 0: index += nitems @@ -281,7 +281,9 @@ if dim == 0: raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") elif dim == 1: - idx = self.lookup_dimension(space, self, 0, 0, start) + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) if itemsize == 1: ch = self.buf.getitem(idx) return space.newint(ord(ch)) @@ -326,10 +328,9 @@ return length * self.getitemsize() @staticmethod - def copy(view, buf=None): + def copy(view): # TODO suboffsets - if buf == None: - buf = view.buf + buf = view.buf return W_MemoryView(buf, view.getformat(), view.getitemsize(), view.getndim(), view.getshape()[:], view.getstrides()[:]) @@ -344,9 +345,12 @@ start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) itemsize = self.getitemsize() if step == 0: # index only + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) if itemsize == 1: ch = getbytevalue(space, w_obj) - self.buf.setitem(start, ch) + self.buf.setitem(idx, ch) else: # TODO: this probably isn't very fast fmtiter = PackFormatIterator(space, [w_obj], itemsize) @@ -356,7 +360,7 @@ raise oefmt(space.w_TypeError, "memoryview: invalid type for format '%s'", self.format) - self.buf.setslice(start * itemsize, fmtiter.result.build()) + self.buf.setslice(idx, fmtiter.result.build()) elif step == 1: value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) if value.getlength() != slicelength * itemsize: From pypy.commits at gmail.com Wed Dec 7 07:36:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 04:36:12 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Use ffi.from_buffer() in places that accept buffers, not just bytes (relevant tests still failing) Message-ID: <5848023c.8c1f1c0a.2092f.216d@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88934:2bb7e0c0b87a Date: 2016-12-07 12:33 +0000 http://bitbucket.org/pypy/pypy/changeset/2bb7e0c0b87a/ Log: Use ffi.from_buffer() in places that accept buffers, not just bytes (relevant tests still failing) diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -375,7 +375,7 @@ sample_count = _sample_count(cp, size) rv = ffi.new("unsigned char[]", len(cp) * 2) - lib.tostereo(rv, cp, len(cp), size, fac1, fac2) + lib.tostereo(rv, ffi.from_buffer(cp), len(cp), size, fac1, fac2) return ffi.buffer(rv)[:] @@ -386,7 +386,7 @@ raise error("Lengths should be the same") rv = ffi.new("unsigned char[]", len(cp1)) - lib.add(rv, cp1, cp2, len(cp1), size) + lib.add(rv, ffi.from_buffer(cp1), ffi.from_buffer(cp2), len(cp1), size) return ffi.buffer(rv)[:] @@ -569,7 +569,7 @@ state = _check_state(state) rv = ffi.new("unsigned char[]", len(cp) * size * 2) state_ptr = ffi.new("int[]", state) - lib.adcpm2lin(rv, cp, len(cp), size, state_ptr) + lib.adcpm2lin(rv, ffi.from_buffer(cp), len(cp), size, state_ptr) return ffi.buffer(rv)[:], tuple(state_ptr) def byteswap(cp, size): From pypy.commits at gmail.com Wed Dec 7 08:14:20 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:14:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix some tests and make others fail for a good reason Message-ID: <58480b2c.d32f1c0a.700bb.31c1@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88935:f715c203cac3 Date: 2016-12-07 14:13 +0100 http://bitbucket.org/pypy/pypy/changeset/f715c203cac3/ Log: fix some tests and make others fail for a good reason diff --git a/lib-python/3/test/test_exceptions.py b/lib-python/3/test/test_exceptions.py --- a/lib-python/3/test/test_exceptions.py +++ b/lib-python/3/test/test_exceptions.py @@ -1049,6 +1049,7 @@ obj = test_class() with captured_stderr() as stderr: del obj + gc_collect() report = stderr.getvalue() self.assertIn("Exception ignored", report) if test_class is BrokenRepr: @@ -1059,7 +1060,12 @@ self.assertIn("raise exc", report) if test_class is BrokenExceptionDel: self.assertIn("BrokenStrException", report) - self.assertIn("", report) + if check_impl_detail(pypy=False): + self.assertIn("", report) + else: + # pypy: this is what lib-python's traceback.py gives + self.assertIn("", + report) else: self.assertIn("ValueError", report) self.assertIn("del is broken", report) @@ -1081,7 +1087,12 @@ self.assertIn("raise exc", report) self.assertIn(exc_type.__name__, report) if exc_type is BrokenStrException: - self.assertIn("", report) + if check_impl_detail(pypy=False): + self.assertIn("", report) + else: + # pypy: this is what lib-python's traceback.py gives + self.assertIn("", + report) else: self.assertIn("test message", report) self.assertTrue(report.endswith("\n")) From pypy.commits at gmail.com Wed Dec 7 08:44:44 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:44:44 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Comment out the test using 'global __class__' Message-ID: <5848124c.cf3fc20a.2de93.b93d@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88936:e8b839a1bcaa Date: 2016-12-07 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/e8b839a1bcaa/ Log: Comment out the test using 'global __class__' diff --git a/lib-python/3/test/test_super.py b/lib-python/3/test/test_super.py --- a/lib-python/3/test/test_super.py +++ b/lib-python/3/test/test_super.py @@ -105,14 +105,16 @@ def f(): __class__""", globals(), {}) self.assertIs(type(e.exception), NameError) # Not UnboundLocalError - class X: - global __class__ - __class__ = 42 - def f(): - __class__ - self.assertEqual(globals()["__class__"], 42) - del globals()["__class__"] - self.assertNotIn("__class__", X.__dict__) + # XXX the following uses 'global __class__', which pypy doesn't + # XXX implement at all for now + #class X: + # global __class__ + # __class__ = 42 + # def f(): + # __class__ + #self.assertEqual(globals()["__class__"], 42) + #del globals()["__class__"] + #self.assertNotIn("__class__", X.__dict__) class X: nonlocal __class__ __class__ = 42 From pypy.commits at gmail.com Wed Dec 7 08:44:46 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:44:46 -0800 (PST) Subject: [pypy-commit] pypy py3.5: More hacks to make the cell '__class__' in class bodies work Message-ID: <5848124e.0f341c0a.17ca9.3faa@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88937:061607b3f6ed Date: 2016-12-07 14:44 +0100 http://bitbucket.org/pypy/pypy/changeset/061607b3f6ed/ Log: More hacks to make the cell '__class__' in class bodies work diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -137,11 +137,11 @@ return ''.join(code) -def _make_index_dict_filter(syms, flag): +def _make_index_dict_filter(syms, flag1, flag2): i = 0 result = {} for name, scope in syms.iteritems(): - if scope == flag: + if scope in (flag1, flag2): result[name] = i i += 1 return result @@ -170,7 +170,8 @@ self.names = {} self.var_names = _iter_to_dict(scope.varnames) self.cell_vars = _make_index_dict_filter(scope.symbols, - symtable.SCOPE_CELL) + symtable.SCOPE_CELL, + symtable.SCOPE_CELL_CLASS) self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars)) self.w_consts = space.newdict() self.argcount = 0 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -317,7 +317,8 @@ # Load cell and free vars to pass on. for free in code.co_freevars: free_scope = self.scope.lookup(free) - if free_scope == symtable.SCOPE_CELL: + if free_scope in (symtable.SCOPE_CELL, + symtable.SCOPE_CELL_CLASS): index = self.cell_vars[free] else: index = self.free_vars[free] @@ -1626,7 +1627,7 @@ self._handle_body(cls.body) # return the (empty) __class__ cell scope = self.scope.lookup("__class__") - if scope == symtable.SCOPE_CELL: + if scope == symtable.SCOPE_CELL_CLASS: # Return the cell where to store __class__ self.emit_op_arg(ops.LOAD_CLOSURE, self.cell_vars["__class__"]) else: diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -21,6 +21,7 @@ SCOPE_LOCAL = 3 SCOPE_FREE = 4 SCOPE_CELL = 5 +SCOPE_CELL_CLASS = 6 # for "__class__" inside class bodies only class Scope(object): @@ -336,7 +337,7 @@ def _finalize_cells(self, free): for name, role in self.symbols.iteritems(): if role == SCOPE_LOCAL and name in free and name == '__class__': - self.symbols[name] = SCOPE_CELL + self.symbols[name] = SCOPE_CELL_CLASS del free[name] diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -398,6 +398,13 @@ # in CPython 3.5.2. Looks like a bug to me def testing(): return 42 +''', ''' +class Y: + def f(): + __class__ + __class__ = 42 +def testing(): + return Y.__dict__['__class__'] ''' ]: space.call_args(w_filterwarnings, filter_arg) From pypy.commits at gmail.com Wed Dec 7 08:46:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 05:46:50 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Changed from TypeError to ValueError in 3.5 Message-ID: <584812ca.69efc20a.8a7e1.ae4e@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88938:3b409d9587a2 Date: 2016-12-07 13:45 +0000 http://bitbucket.org/pypy/pypy/changeset/3b409d9587a2/ Log: Changed from TypeError to ValueError in 3.5 diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -187,7 +187,7 @@ return space.w_NotImplemented if w_exponent.asbigint().sign < 0: - raise oefmt(space.w_TypeError, + raise oefmt(space.w_ValueError, "pow() 2nd argument cannot be negative when 3rd " "argument specified") try: From pypy.commits at gmail.com Wed Dec 7 08:57:27 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:57:27 -0800 (PST) Subject: [pypy-commit] pypy py3.5: os.truncate("foo") would create the file foo if it did not exist---that's very wrong Message-ID: <58481547.07941c0a.18ac8.454a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88940:29076a1f34c6 Date: 2016-12-07 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/29076a1f34c6/ Log: os.truncate("foo") would create the file foo if it did not exist--- that's very wrong diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -325,7 +325,7 @@ if space.isinstance_w(w_path, space.w_int): w_fd = w_path else: - w_fd = open(space, w_path, os.O_RDWR | os.O_CREAT) + w_fd = open(space, w_path, os.O_WRONLY) allocated_fd = True fd = space.c_filedescriptor_w(w_fd) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1067,6 +1067,10 @@ posix.truncate(dest, 1) assert 1 == posix.stat(dest).st_size + # File does not exist + e = raises(OSError, posix.truncate, dest + '-DOESNT-EXIST', 0) + assert e.value.filename == dest + '-DOESNT-EXIST' + try: os.getlogin() except (AttributeError, OSError): From pypy.commits at gmail.com Wed Dec 7 08:57:29 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:57:29 -0800 (PST) Subject: [pypy-commit] pypy py3.5: merge heads Message-ID: <58481549.61adc20a.f6b81.bfeb@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88941:45a7dc9e9bd4 Date: 2016-12-07 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/45a7dc9e9bd4/ Log: merge heads diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -187,7 +187,7 @@ return space.w_NotImplemented if w_exponent.asbigint().sign < 0: - raise oefmt(space.w_TypeError, + raise oefmt(space.w_ValueError, "pow() 2nd argument cannot be negative when 3rd " "argument specified") try: From pypy.commits at gmail.com Wed Dec 7 08:57:25 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 05:57:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Random CPython compliance Message-ID: <58481545.d39a1c0a.77119.81ff@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88939:705f19c1f075 Date: 2016-12-07 14:46 +0100 http://bitbucket.org/pypy/pypy/changeset/705f19c1f075/ Log: Random CPython compliance diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -80,7 +80,8 @@ class statvfs_result(metaclass=structseqtype): - name = osname + ".statvfs_result" + name = "os.statvfs_result" + __module__ = "os" f_bsize = structseqfield(0) f_frsize = structseqfield(1) @@ -96,7 +97,7 @@ class uname_result(metaclass=structseqtype): - name = osname + ".uname_result" + name = osname + ".uname_result" # and NOT "os.uname_result" sysname = structseqfield(0, "operating system name") nodename = structseqfield(1, "name of machine on network " @@ -108,6 +109,7 @@ class terminal_size(metaclass=structseqtype): name = "os.terminal_size" + __module__ = "os" columns = structseqfield(0, "width of the terminal window in characters") lines = structseqfield(1, "height of the terminal window in characters") From pypy.commits at gmail.com Wed Dec 7 09:01:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 06:01:00 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Mark test cpython_only Message-ID: <5848161c.c5311c0a.948fd.44bf@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88942:34a6b5b95c9a Date: 2016-12-07 13:58 +0000 http://bitbucket.org/pypy/pypy/changeset/34a6b5b95c9a/ Log: Mark test cpython_only diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -16,7 +16,8 @@ import warnings from operator import neg from test.support import ( - TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail, + cpython_only) from test.support.script_helper import assert_python_ok try: import pty, signal @@ -1640,6 +1641,8 @@ class ShutdownTest(unittest.TestCase): + # PyPy doesn't do a gc.collect() at shutdown + @cpython_only def test_cleanup(self): # Issue #19255: builtins are still available at shutdown code = """if 1: From pypy.commits at gmail.com Wed Dec 7 09:51:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 06:51:44 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix allowed types in __slots__ for CPython compatibility Message-ID: <58482200.c4811c0a.a2549.5414@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88943:74f6e6f2c9ff Date: 2016-12-07 14:51 +0000 http://bitbucket.org/pypy/pypy/changeset/74f6e6f2c9ff/ Log: Fix allowed types in __slots__ for CPython compatibility diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -667,6 +667,11 @@ b.abc = "awesomer" assert b.abc == "awesomer" + def test_bad_slots(self): + raises(TypeError, type, 'A', (), {'__slots__': b'x'}) + raises(TypeError, type, 'A', (), {'__slots__': 42}) + raises(TypeError, type, 'A', (), {'__slots__': '2_x'}) + def test_base_attr(self): # check the '__base__' class A(object): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1057,6 +1057,14 @@ w_self.weakrefable = w_self.weakrefable or w_base.weakrefable return hasoldstylebase +def slot_w(space, w_name): + from pypy.objspace.std.unicodeobject import _isidentifier + if not space.isinstance_w(w_name, space.w_text): + raise oefmt(space.w_TypeError, + "__slots__ items must be strings, not '%T'", w_name) + if not _isidentifier(w_name._value): + raise oefmt(space.w_TypeError, "__slots__ must be identifiers") + return w_name.identifier_w(space) def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): from pypy.objspace.std.listobject import StringSort @@ -1073,13 +1081,12 @@ wantdict = False wantweakref = False w_slots = dict_w['__slots__'] - if (space.isinstance_w(w_slots, space.w_str) or - space.isinstance_w(w_slots, space.w_unicode)): + if space.isinstance_w(w_slots, space.w_text): slot_names_w = [w_slots] else: slot_names_w = space.unpackiterable(w_slots) for w_slot_name in slot_names_w: - slot_name = space.str_w(w_slot_name) + slot_name = slot_w(space, w_slot_name) if slot_name == '__dict__': if wantdict or w_bestbase.hasdict: raise oefmt(space.w_TypeError, @@ -1124,8 +1131,6 @@ def create_slot(w_self, slot_name, index_next_extra_slot): space = w_self.space - if not valid_slot_name(slot_name): - raise oefmt(space.w_TypeError, "__slots__ must be identifiers") # create member slot_name = mangle(slot_name, w_self.name) if slot_name not in w_self.dict_w: @@ -1156,14 +1161,6 @@ w_self.space.wrap(weakref_descr)) w_self.weakrefable = True -def valid_slot_name(slot_name): - if len(slot_name) == 0 or slot_name[0].isdigit(): - return False - for c in slot_name: - if not c.isalnum() and c != '_': - return False - return True - def setup_user_defined_type(w_self, force_new_layout): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] From pypy.commits at gmail.com Wed Dec 7 10:15:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 07:15:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Do not update cls.__qualname__ when cls.__name__ is modified Message-ID: <58482783.0a4cc20a.cf0fa.cc21@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88944:7eeb99850b83 Date: 2016-12-07 15:14 +0000 http://bitbucket.org/pypy/pypy/changeset/7eeb99850b83/ Log: Do not update cls.__qualname__ when cls.__name__ is modified diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -941,6 +941,22 @@ else: assert False + def test_qualname(self): + A = type('A', (), {'__qualname__': 'B.C'}) + assert A.__name__ == 'A' + assert A.__qualname__ == 'B.C' + raises(TypeError, type, 'A', (), {'__qualname__': b'B'}) + assert A.__qualname__ == 'B.C' + + A.__qualname__ = 'D.E' + assert A.__name__ == 'A' + assert A.__qualname__ == 'D.E' + + C = type('C', (), {}) + C.__name__ = 'A' + assert C.__name__ == 'A' + assert C.__qualname__ == 'C' + def test_compare(self): class A(object): pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -166,7 +166,7 @@ overridetypedef=None, force_new_layout=False): self.space = space self.name = name - self.qualname = None + self.qualname = name.decode('utf-8') self.bases_w = bases_w self.dict_w = dict_w self.hasdict = False @@ -545,7 +545,7 @@ return result.decode('utf-8') def getqualname(self, space): - return self.qualname or self.getname(space) + return self.qualname def add_subclass(self, w_subclass): space = self.space From pypy.commits at gmail.com Wed Dec 7 10:27:13 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:27:13 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: remove break point used for debugging Message-ID: <58482a51.6a5cc20a.f6424.d8eb@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88945:6ea603835b26 Date: 2016-12-07 16:04 +0100 http://bitbucket.org/pypy/pypy/changeset/6ea603835b26/ Log: remove break point used for debugging diff --git a/lib-python/3/socket.py b/lib-python/3/socket.py --- a/lib-python/3/socket.py +++ b/lib-python/3/socket.py @@ -572,7 +572,6 @@ raise OSError("cannot read from timed out object") while True: try: - import pdb; pdb.set_trace() return self._sock.recv_into(b) except timeout: self._timeout_occurred = True From pypy.commits at gmail.com Wed Dec 7 10:27:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:27:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: fix sqlite3 error, additional argument (uri) passed down to sqlite3_open_v2 Message-ID: <58482a53.42061c0a.b34d6.6985@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88946:c42c8db1576c Date: 2016-12-07 16:26 +0100 http://bitbucket.org/pypy/pypy/changeset/c42c8db1576c/ Log: fix sqlite3 error, additional argument (uri) passed down to sqlite3_open_v2 diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -149,10 +149,11 @@ def connect(database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, + uri=0): factory = Connection if not factory else factory return factory(database, timeout, detect_types, isolation_level, - check_same_thread, factory, cached_statements) + check_same_thread, factory, cached_statements, uri) def _unicode_text_factory(x): @@ -195,14 +196,23 @@ _db = None def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, uri=0): self.__initialized = True db_star = _ffi.new('sqlite3 **') if isinstance(database, unicode): database = database.encode('utf-8') - if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: - raise OperationalError("Could not open database") + if _lib.SQLITE_OPEN_URI != 0: + if uri and _lib.SQLITE_OPEN_URI == 0: + raise NotSupportedError("URIs not supported") + flags = _lib.SQLITE_OPEN_READWRITE | _lib.SQLITE_OPEN_CREATE + if uri: + flags |= _lib.SQLITE_OPEN_URI + if _lib.sqlite3_open_v2(database, db_star, flags, _ffi.NULL) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") + else: + if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") self._db = db_star[0] if timeout is not None: timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds diff --git a/lib_pypy/_sqlite3_build.py b/lib_pypy/_sqlite3_build.py --- a/lib_pypy/_sqlite3_build.py +++ b/lib_pypy/_sqlite3_build.py @@ -103,6 +103,10 @@ #define SQLITE_DROP_VTABLE ... #define SQLITE_FUNCTION ... +static const long SQLITE_OPEN_URI; +static const long SQLITE_OPEN_READWRITE; +static const long SQLITE_OPEN_CREATE; + const char *sqlite3_libversion(void); typedef ... sqlite3; @@ -117,6 +121,13 @@ sqlite3 **ppDb /* OUT: SQLite db handle */ ); +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + int sqlite3_close(sqlite3 *); int sqlite3_busy_timeout(sqlite3*, int ms); @@ -259,7 +270,21 @@ libraries=['sqlite3'] ) -_ffi.set_source("_sqlite3_cffi", "#include ", **extra_args) +SOURCE = """ +#include + +#ifndef SQLITE_OPEN_URI +static const long SQLITE_OPEN_URI = 0; +#endif +#ifndef SQLITE_OPEN_READWRITE +static const long SQLITE_OPEN_READWRITE = 0; +#endif +#ifndef SQLITE_OPEN_CREATE +static const long SQLITE_OPEN_CREATE = 0; +#endif +""" + +_ffi.set_source("_sqlite3_cffi", SOURCE, **extra_args) if __name__ == "__main__": From pypy.commits at gmail.com Wed Dec 7 10:31:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:31:02 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: allow slicing of a sqlite row Message-ID: <58482b36.e6b0c20a.2bab1.d707@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88947:9cb9f285a31e Date: 2016-12-07 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/9cb9f285a31e/ Log: allow slicing of a sqlite row diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1205,6 +1205,8 @@ def __getitem__(self, item): if isinstance(item, (int, long)): return self.values[item] + elif isinstance(item, slice): + return self.values[item] else: item = item.lower() for idx, desc in enumerate(self.description): From pypy.commits at gmail.com Wed Dec 7 10:35:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:35:08 -0800 (PST) Subject: [pypy-commit] pypy py3.5-ssl: merge py3.5 Message-ID: <58482c2c.a285c20a.2931c.dce4@mx.google.com> Author: Richard Plangger Branch: py3.5-ssl Changeset: r88948:a94a5155d64a Date: 2016-12-07 16:31 +0100 http://bitbucket.org/pypy/pypy/changeset/a94a5155d64a/ Log: merge py3.5 diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -16,7 +16,8 @@ import warnings from operator import neg from test.support import ( - TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail, + cpython_only) from test.support.script_helper import assert_python_ok try: import pty, signal @@ -1640,6 +1641,8 @@ class ShutdownTest(unittest.TestCase): + # PyPy doesn't do a gc.collect() at shutdown + @cpython_only def test_cleanup(self): # Issue #19255: builtins are still available at shutdown code = """if 1: diff --git a/lib-python/3/test/test_exceptions.py b/lib-python/3/test/test_exceptions.py --- a/lib-python/3/test/test_exceptions.py +++ b/lib-python/3/test/test_exceptions.py @@ -1049,6 +1049,7 @@ obj = test_class() with captured_stderr() as stderr: del obj + gc_collect() report = stderr.getvalue() self.assertIn("Exception ignored", report) if test_class is BrokenRepr: @@ -1059,7 +1060,12 @@ self.assertIn("raise exc", report) if test_class is BrokenExceptionDel: self.assertIn("BrokenStrException", report) - self.assertIn("", report) + if check_impl_detail(pypy=False): + self.assertIn("", report) + else: + # pypy: this is what lib-python's traceback.py gives + self.assertIn("", + report) else: self.assertIn("ValueError", report) self.assertIn("del is broken", report) @@ -1081,7 +1087,12 @@ self.assertIn("raise exc", report) self.assertIn(exc_type.__name__, report) if exc_type is BrokenStrException: - self.assertIn("", report) + if check_impl_detail(pypy=False): + self.assertIn("", report) + else: + # pypy: this is what lib-python's traceback.py gives + self.assertIn("", + report) else: self.assertIn("test message", report) self.assertTrue(report.endswith("\n")) diff --git a/lib-python/3/test/test_super.py b/lib-python/3/test/test_super.py --- a/lib-python/3/test/test_super.py +++ b/lib-python/3/test/test_super.py @@ -105,14 +105,16 @@ def f(): __class__""", globals(), {}) self.assertIs(type(e.exception), NameError) # Not UnboundLocalError - class X: - global __class__ - __class__ = 42 - def f(): - __class__ - self.assertEqual(globals()["__class__"], 42) - del globals()["__class__"] - self.assertNotIn("__class__", X.__dict__) + # XXX the following uses 'global __class__', which pypy doesn't + # XXX implement at all for now + #class X: + # global __class__ + # __class__ = 42 + # def f(): + # __class__ + #self.assertEqual(globals()["__class__"], 42) + #del globals()["__class__"] + #self.assertNotIn("__class__", X.__dict__) class X: nonlocal __class__ __class__ = 42 diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -375,7 +375,7 @@ sample_count = _sample_count(cp, size) rv = ffi.new("unsigned char[]", len(cp) * 2) - lib.tostereo(rv, cp, len(cp), size, fac1, fac2) + lib.tostereo(rv, ffi.from_buffer(cp), len(cp), size, fac1, fac2) return ffi.buffer(rv)[:] @@ -386,7 +386,7 @@ raise error("Lengths should be the same") rv = ffi.new("unsigned char[]", len(cp1)) - lib.add(rv, cp1, cp2, len(cp1), size) + lib.add(rv, ffi.from_buffer(cp1), ffi.from_buffer(cp2), len(cp1), size) return ffi.buffer(rv)[:] @@ -569,7 +569,7 @@ state = _check_state(state) rv = ffi.new("unsigned char[]", len(cp) * size * 2) state_ptr = ffi.new("int[]", state) - lib.adcpm2lin(rv, cp, len(cp), size, state_ptr) + lib.adcpm2lin(rv, ffi.from_buffer(cp), len(cp), size, state_ptr) return ffi.buffer(rv)[:], tuple(state_ptr) def byteswap(cp, size): diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -137,11 +137,11 @@ return ''.join(code) -def _make_index_dict_filter(syms, flag): +def _make_index_dict_filter(syms, flag1, flag2): i = 0 result = {} for name, scope in syms.iteritems(): - if scope == flag: + if scope in (flag1, flag2): result[name] = i i += 1 return result @@ -170,7 +170,8 @@ self.names = {} self.var_names = _iter_to_dict(scope.varnames) self.cell_vars = _make_index_dict_filter(scope.symbols, - symtable.SCOPE_CELL) + symtable.SCOPE_CELL, + symtable.SCOPE_CELL_CLASS) self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars)) self.w_consts = space.newdict() self.argcount = 0 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -317,7 +317,8 @@ # Load cell and free vars to pass on. for free in code.co_freevars: free_scope = self.scope.lookup(free) - if free_scope == symtable.SCOPE_CELL: + if free_scope in (symtable.SCOPE_CELL, + symtable.SCOPE_CELL_CLASS): index = self.cell_vars[free] else: index = self.free_vars[free] @@ -1626,7 +1627,7 @@ self._handle_body(cls.body) # return the (empty) __class__ cell scope = self.scope.lookup("__class__") - if scope == symtable.SCOPE_CELL: + if scope == symtable.SCOPE_CELL_CLASS: # Return the cell where to store __class__ self.emit_op_arg(ops.LOAD_CLOSURE, self.cell_vars["__class__"]) else: diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -21,6 +21,7 @@ SCOPE_LOCAL = 3 SCOPE_FREE = 4 SCOPE_CELL = 5 +SCOPE_CELL_CLASS = 6 # for "__class__" inside class bodies only class Scope(object): @@ -336,7 +337,7 @@ def _finalize_cells(self, free): for name, role in self.symbols.iteritems(): if role == SCOPE_LOCAL and name in free and name == '__class__': - self.symbols[name] = SCOPE_CELL + self.symbols[name] = SCOPE_CELL_CLASS del free[name] diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -398,6 +398,13 @@ # in CPython 3.5.2. Looks like a bug to me def testing(): return 42 +''', ''' +class Y: + def f(): + __class__ + __class__ = 42 +def testing(): + return Y.__dict__['__class__'] ''' ]: space.call_args(w_filterwarnings, filter_arg) diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -80,7 +80,8 @@ class statvfs_result(metaclass=structseqtype): - name = osname + ".statvfs_result" + name = "os.statvfs_result" + __module__ = "os" f_bsize = structseqfield(0) f_frsize = structseqfield(1) @@ -96,7 +97,7 @@ class uname_result(metaclass=structseqtype): - name = osname + ".uname_result" + name = osname + ".uname_result" # and NOT "os.uname_result" sysname = structseqfield(0, "operating system name") nodename = structseqfield(1, "name of machine on network " @@ -108,6 +109,7 @@ class terminal_size(metaclass=structseqtype): name = "os.terminal_size" + __module__ = "os" columns = structseqfield(0, "width of the terminal window in characters") lines = structseqfield(1, "height of the terminal window in characters") diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -325,7 +325,7 @@ if space.isinstance_w(w_path, space.w_int): w_fd = w_path else: - w_fd = open(space, w_path, os.O_RDWR | os.O_CREAT) + w_fd = open(space, w_path, os.O_WRONLY) allocated_fd = True fd = space.c_filedescriptor_w(w_fd) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1067,6 +1067,10 @@ posix.truncate(dest, 1) assert 1 == posix.stat(dest).st_size + # File does not exist + e = raises(OSError, posix.truncate, dest + '-DOESNT-EXIST', 0) + assert e.value.filename == dest + '-DOESNT-EXIST' + try: os.getlogin() except (AttributeError, OSError): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -187,7 +187,7 @@ return space.w_NotImplemented if w_exponent.asbigint().sign < 0: - raise oefmt(space.w_TypeError, + raise oefmt(space.w_ValueError, "pow() 2nd argument cannot be negative when 3rd " "argument specified") try: diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -667,6 +667,11 @@ b.abc = "awesomer" assert b.abc == "awesomer" + def test_bad_slots(self): + raises(TypeError, type, 'A', (), {'__slots__': b'x'}) + raises(TypeError, type, 'A', (), {'__slots__': 42}) + raises(TypeError, type, 'A', (), {'__slots__': '2_x'}) + def test_base_attr(self): # check the '__base__' class A(object): @@ -936,6 +941,22 @@ else: assert False + def test_qualname(self): + A = type('A', (), {'__qualname__': 'B.C'}) + assert A.__name__ == 'A' + assert A.__qualname__ == 'B.C' + raises(TypeError, type, 'A', (), {'__qualname__': b'B'}) + assert A.__qualname__ == 'B.C' + + A.__qualname__ = 'D.E' + assert A.__name__ == 'A' + assert A.__qualname__ == 'D.E' + + C = type('C', (), {}) + C.__name__ = 'A' + assert C.__name__ == 'A' + assert C.__qualname__ == 'C' + def test_compare(self): class A(object): pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -166,7 +166,7 @@ overridetypedef=None, force_new_layout=False): self.space = space self.name = name - self.qualname = None + self.qualname = name.decode('utf-8') self.bases_w = bases_w self.dict_w = dict_w self.hasdict = False @@ -545,7 +545,7 @@ return result.decode('utf-8') def getqualname(self, space): - return self.qualname or self.getname(space) + return self.qualname def add_subclass(self, w_subclass): space = self.space @@ -1057,6 +1057,14 @@ w_self.weakrefable = w_self.weakrefable or w_base.weakrefable return hasoldstylebase +def slot_w(space, w_name): + from pypy.objspace.std.unicodeobject import _isidentifier + if not space.isinstance_w(w_name, space.w_text): + raise oefmt(space.w_TypeError, + "__slots__ items must be strings, not '%T'", w_name) + if not _isidentifier(w_name._value): + raise oefmt(space.w_TypeError, "__slots__ must be identifiers") + return w_name.identifier_w(space) def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): from pypy.objspace.std.listobject import StringSort @@ -1073,13 +1081,12 @@ wantdict = False wantweakref = False w_slots = dict_w['__slots__'] - if (space.isinstance_w(w_slots, space.w_str) or - space.isinstance_w(w_slots, space.w_unicode)): + if space.isinstance_w(w_slots, space.w_text): slot_names_w = [w_slots] else: slot_names_w = space.unpackiterable(w_slots) for w_slot_name in slot_names_w: - slot_name = space.str_w(w_slot_name) + slot_name = slot_w(space, w_slot_name) if slot_name == '__dict__': if wantdict or w_bestbase.hasdict: raise oefmt(space.w_TypeError, @@ -1124,8 +1131,6 @@ def create_slot(w_self, slot_name, index_next_extra_slot): space = w_self.space - if not valid_slot_name(slot_name): - raise oefmt(space.w_TypeError, "__slots__ must be identifiers") # create member slot_name = mangle(slot_name, w_self.name) if slot_name not in w_self.dict_w: @@ -1156,14 +1161,6 @@ w_self.space.wrap(weakref_descr)) w_self.weakrefable = True -def valid_slot_name(slot_name): - if len(slot_name) == 0 or slot_name[0].isdigit(): - return False - for c in slot_name: - if not c.isalnum() and c != '_': - return False - return True - def setup_user_defined_type(w_self, force_new_layout): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] From pypy.commits at gmail.com Wed Dec 7 10:35:10 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:35:10 -0800 (PST) Subject: [pypy-commit] pypy py3.5: merge py3.5-ssl Message-ID: <58482c2e.6a5cc20a.f6424.dcba@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88949:c10e210ecce7 Date: 2016-12-07 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/c10e210ecce7/ Log: merge py3.5-ssl diff --git a/lib-python/3/http/client.py b/lib-python/3/http/client.py --- a/lib-python/3/http/client.py +++ b/lib-python/3/http/client.py @@ -556,6 +556,7 @@ try: while True: chunk_left = self._get_chunk_left() + print("chunk_left", chunk_left) if chunk_left is None: break value.append(self._safe_read(chunk_left)) @@ -605,6 +606,7 @@ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) + print("read chunk %d %d", len(chunk), min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(b''.join(s), amt) s.append(chunk) diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py @@ -53,6 +53,7 @@ const char *OpenSSL_version(int); /* this is a macro in 1.1.0 */ +void *OPENSSL_malloc(size_t); void OPENSSL_free(void *); /* This was removed in 1.1.0 */ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py @@ -25,6 +25,13 @@ static const int EVP_CTRL_GCM_GET_TAG; static const int EVP_CTRL_GCM_SET_TAG; +typedef struct { + int type; + int alias; + const char *name; + const char *data; +} OBJ_NAME; + static const int Cryptography_HAS_GCM; static const int Cryptography_HAS_PBKDF2_HMAC; static const int Cryptography_HAS_PKEY_CTX; @@ -136,6 +143,7 @@ without worrying about what OpenSSL we're running against. */ EVP_MD_CTX *Cryptography_EVP_MD_CTX_new(void); void Cryptography_EVP_MD_CTX_free(EVP_MD_CTX *); +void OBJ_NAME_do_all(int, void (*) (const OBJ_NAME *, void *), void *); """ MACROS = """ @@ -156,6 +164,7 @@ EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *); int EVP_PKEY_set1_EC_KEY(EVP_PKEY *, EC_KEY *); +int EVP_MD_CTX_block_size(const EVP_MD_CTX *md); int EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *); int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *, int, int, void *); @@ -167,6 +176,7 @@ int EVP_PBE_scrypt(const char *, size_t, const unsigned char *, size_t, uint64_t, uint64_t, uint64_t, uint64_t, unsigned char *, size_t); +#define OBJ_NAME_TYPE_MD_METH ... """ CUSTOMIZATIONS = """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -26,7 +26,6 @@ static const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE; static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS; static const long Cryptography_HAS_NPN_NEGOTIATED; -static const long Cryptography_OPENSSL_NO_TLSEXT; /* Internally invented symbol to tell us if SNI is supported */ static const long Cryptography_HAS_TLSEXT_HOSTNAME; @@ -138,6 +137,7 @@ typedef ... SSL_CTX; typedef ... SSL_SESSION; + typedef ... SSL; static const long TLSEXT_NAMETYPE_host_name; @@ -434,7 +434,6 @@ long SSL_CTX_sess_misses(SSL_CTX *); long SSL_CTX_sess_timeouts(SSL_CTX *); long SSL_CTX_sess_cache_full(SSL_CTX *); - """ CUSTOMIZATIONS = """ @@ -689,12 +688,6 @@ static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS = 1; -#ifdef OPENSSL_NO_TLSEXT -static const long Cryptography_OPENSSL_NO_TLSEXT = 1; -#else -static const long Cryptography_OPENSSL_NO_TLSEXT = 0; -#endif - /* in OpenSSL 1.1.0 the SSL_ST values were renamed to TLS_ST and several were removed */ #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 || defined(LIBRESSL_VERSION_NUMBER) diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py @@ -140,6 +140,7 @@ int X509_STORE_set_flags(X509_STORE *, unsigned long); void X509_STORE_free(X509_STORE *); + /* X509_STORE_CTX */ X509_STORE_CTX *X509_STORE_CTX_new(void); void X509_STORE_CTX_cleanup(X509_STORE_CTX *); @@ -201,7 +202,7 @@ int sk_X509_OBJECT_num(Cryptography_STACK_OF_X509_OBJECT *); X509_OBJECT *sk_X509_OBJECT_value(Cryptography_STACK_OF_X509_OBJECT *, int); -X509_VERIFY_PARAM * X509_STORE_get0_param(X509_STORE *); +X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *); Cryptography_STACK_OF_X509_OBJECT *X509_STORE_get0_objects(X509_STORE *); X509 *X509_OBJECT_get0_X509(X509_OBJECT *); int X509_OBJECT_get_type(const X509_OBJECT *); diff --git a/lib_pypy/_hashlib/__init__.py b/lib_pypy/_hashlib/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_hashlib/__init__.py @@ -0,0 +1,172 @@ +import sys +from threading import Lock +from _pypy_openssl import ffi, lib +from _cffi_ssl._stdssl.utility import (_str_to_ffi_buffer, _bytes_with_len, + _str_from_buf) + +def new(name, string=b''): + h = Hash(name) + h.update(string) + return h + +class Hash(object): + + def __init__(self, name, copy_from=None): + self.ctx = ffi.NULL + self.name = name + digest_type = self.digest_type_by_name() + self.digest_size = lib.EVP_MD_size(digest_type) + + # Allocate a lock for each HASH object. + # An optimization would be to not release the GIL on small requests, + # and use a custom lock only when needed. + self.lock = Lock() + + ctx = lib.Cryptography_EVP_MD_CTX_new() + if ctx == ffi.NULL: + raise MemoryError + ctx = ffi.gc(ctx, lib.Cryptography_EVP_MD_CTX_free) + + try: + if copy_from is not None: + # cpython uses EVP_MD_CTX_copy(...) + if not lib.EVP_MD_CTX_copy_ex(ctx, copy_from): + raise ValueError + else: + # cpython uses EVP_DigestInit + lib.EVP_DigestInit_ex(ctx, digest_type, ffi.NULL) + self.ctx = ctx + except: + # no need to gc ctx! + raise + + def digest_type_by_name(self): + c_name = _str_to_ffi_buffer(self.name) + digest_type = lib.EVP_get_digestbyname(c_name) + if not digest_type: + raise ValueError("unknown hash function") + # TODO + return digest_type + + def __repr__(self): + return "<%s HASH object at 0x%s>" % (self.name, id(self)) + + def update(self, string): + buf = ffi.from_buffer(string) + with self.lock: + # XXX try to not release the GIL for small requests + lib.EVP_DigestUpdate(self.ctx, buf, len(buf)) + + def copy(self): + """Return a copy of the hash object.""" + with self.lock: + return Hash(self.name, copy_from=self.ctx) + + def digest(self): + """Return the digest value as a string of binary data.""" + return self._digest() + + def hexdigest(self): + """Return the digest value as a string of hexadecimal digits.""" + digest = self._digest() + hexdigits = '0123456789abcdef' + result = [] + for c in digest: + result.append(hexdigits[(c >> 4) & 0xf]) + result.append(hexdigits[ c & 0xf]) + return ''.join(result) + + @property + def block_size(self): + return lib.EVP_MD_CTX_block_size(self.ctx) + + def _digest(self): + ctx = lib.Cryptography_EVP_MD_CTX_new() + if ctx == ffi.NULL: + raise MemoryError + try: + with self.lock: + if not lib.EVP_MD_CTX_copy_ex(ctx, self.ctx): + raise ValueError + digest_size = self.digest_size + buf = ffi.new("unsigned char[]", digest_size) + lib.EVP_DigestFinal_ex(ctx, buf, ffi.NULL) + return _bytes_with_len(buf, digest_size) + finally: + lib.Cryptography_EVP_MD_CTX_free(ctx) + +algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + +class NameFetcher: + def __init__(self): + self.meth_names = [] + self.error = None + + +def _fetch_names(): + name_fetcher = NameFetcher() + handle = ffi.new_handle(name_fetcher) + lib.OBJ_NAME_do_all(lib.OBJ_NAME_TYPE_MD_METH, hash_name_mapper_callback, handle) + if name_fetcher.error: + raise name_fetcher.error + meth_names = name_fetcher.meth_names + name_fetcher.meth_names = None + return frozenset(meth_names) + + at ffi.callback("void(OBJ_NAME*, void*)") +def hash_name_mapper_callback(obj_name, userdata): + if not obj_name: + return + name_fetcher = ffi.from_handle(userdata) + # Ignore aliased names, they pollute the list and OpenSSL appears + # to have a its own definition of alias as the resulting list + # still contains duplicate and alternate names for several + # algorithms. + if obj_name.alias != 0: + return + name = _str_from_buf(obj_name.name) + name_fetcher.meth_names.append(name) + +openssl_md_meth_names = _fetch_names() +del _fetch_names + +# shortcut functions +def make_new_hash(name, funcname): + def new_hash(string=b''): + return new(name, string) + new_hash.__name__ = funcname + return new_hash + +for _name in algorithms: + _newname = 'openssl_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) + +if hasattr(lib, 'PKCS5_PBKDF2_HMAC'): + #@unwrap_spec(name=str, password='bytes', salt='bytes', iterations=int, + # w_dklen=WrappedDefault(None)) + def pbkdf2_hmac(name, password, salt, iterations, dklen=None): + if not isinstance(name, str): + raise TypeError("expected 'str' for name, but got %s" % type(name)) + c_name = _str_to_ffi_buffer(name) + digest = lib.EVP_get_digestbyname(c_name) + if digest == ffi.NULL: + raise ValueError("unsupported hash type") + if dklen is None: + dklen = lib.EVP_MD_size(digest) + if dklen < 1: + raise ValueError("key length must be greater than 0.") + if dklen >= sys.maxsize: + raise OverflowError("key length is too great.") + if iterations < 1: + raise ValueError("iteration value must be greater than 0.") + if iterations >= sys.maxsize: + raise OverflowError("iteration value is too great.") + buf = ffi.new("unsigned char[]", dklen) + c_password = ffi.from_buffer(bytes(password)) + c_salt = ffi.from_buffer(bytes(salt)) + r = lib.PKCS5_PBKDF2_HMAC(c_password, len(c_password), + ffi.cast("unsigned char*",c_salt), len(c_salt), + iterations, digest, dklen, buf) + if r == 0: + raise ValueError + return _bytes_with_len(buf, dklen) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -149,10 +149,11 @@ def connect(database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, + uri=0): factory = Connection if not factory else factory return factory(database, timeout, detect_types, isolation_level, - check_same_thread, factory, cached_statements) + check_same_thread, factory, cached_statements, uri) def _unicode_text_factory(x): @@ -195,14 +196,23 @@ _db = None def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, uri=0): self.__initialized = True db_star = _ffi.new('sqlite3 **') if isinstance(database, unicode): database = database.encode('utf-8') - if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: - raise OperationalError("Could not open database") + if _lib.SQLITE_OPEN_URI != 0: + if uri and _lib.SQLITE_OPEN_URI == 0: + raise NotSupportedError("URIs not supported") + flags = _lib.SQLITE_OPEN_READWRITE | _lib.SQLITE_OPEN_CREATE + if uri: + flags |= _lib.SQLITE_OPEN_URI + if _lib.sqlite3_open_v2(database, db_star, flags, _ffi.NULL) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") + else: + if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") self._db = db_star[0] if timeout is not None: timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds @@ -1195,6 +1205,8 @@ def __getitem__(self, item): if isinstance(item, (int, long)): return self.values[item] + elif isinstance(item, slice): + return self.values[item] else: item = item.lower() for idx, desc in enumerate(self.description): diff --git a/lib_pypy/_sqlite3_build.py b/lib_pypy/_sqlite3_build.py --- a/lib_pypy/_sqlite3_build.py +++ b/lib_pypy/_sqlite3_build.py @@ -103,6 +103,10 @@ #define SQLITE_DROP_VTABLE ... #define SQLITE_FUNCTION ... +static const long SQLITE_OPEN_URI; +static const long SQLITE_OPEN_READWRITE; +static const long SQLITE_OPEN_CREATE; + const char *sqlite3_libversion(void); typedef ... sqlite3; @@ -117,6 +121,13 @@ sqlite3 **ppDb /* OUT: SQLite db handle */ ); +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + int sqlite3_close(sqlite3 *); int sqlite3_busy_timeout(sqlite3*, int ms); @@ -259,7 +270,21 @@ libraries=['sqlite3'] ) -_ffi.set_source("_sqlite3_cffi", "#include ", **extra_args) +SOURCE = """ +#include + +#ifndef SQLITE_OPEN_URI +static const long SQLITE_OPEN_URI = 0; +#endif +#ifndef SQLITE_OPEN_READWRITE +static const long SQLITE_OPEN_READWRITE = 0; +#endif +#ifndef SQLITE_OPEN_CREATE +static const long SQLITE_OPEN_CREATE = 0; +#endif +""" + +_ffi.set_source("_sqlite3_cffi", SOURCE, **extra_args) if __name__ == "__main__": diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ working_modules.update([ "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", - "zlib", "bz2", "struct", "_hashlib", "_md5", "_minimal_curses", + "zlib", "bz2", "struct", "_md5", "_minimal_curses", "thread", "itertools", "pyexpat", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", @@ -118,7 +118,6 @@ "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], "pyexpat" : ["pypy.module.pyexpat.interp_pyexpat"], - "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], "_vmprof" : ["pypy.module._vmprof.interp_vmprof"], diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -156,7 +156,7 @@ assert space1.str_w(w_str) == "hello" class TestMixedModuleUnfreeze: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_socket',)) def test_random_stuff_can_unfreeze(self): # When a module contains an "import" statement in applevel code, the @@ -167,13 +167,13 @@ # at runtime, like setting os.environ (posix module) or initializing # the winsock library (_socket module) w_socket = self.space.builtin_modules['_socket'] - w_ssl = self.space.builtin_modules['_ssl'] + # _ssl is not builtin anymore, this test also tried to _cleanup_ on + # the wrapped ssl object + # w_ssl = self.space.builtin_modules['_ssl'] # Uncomment this line for a workaround # space.getattr(w_ssl, space.wrap('SSLError')) w_socket._cleanup_() assert w_socket.startup_called == False - w_ssl._cleanup_() # w_ssl.appleveldefs['SSLError'] imports _socket - assert w_socket.startup_called == False diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py deleted file mode 100644 --- a/pypy/module/_hashlib/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule -from pypy.module._hashlib.interp_hashlib import ( - algorithms, fetch_names, HAS_FAST_PKCS5_PBKDF2_HMAC) - - -class Module(MixedModule): - interpleveldefs = { - 'new' : 'interp_hashlib.new', - } - - appleveldefs = { - } - - for name in algorithms: - interpleveldefs['openssl_' + name] = 'interp_hashlib.new_' + name - - if HAS_FAST_PKCS5_PBKDF2_HMAC: - interpleveldefs['pbkdf2_hmac'] = 'interp_hashlib.pbkdf2_hmac' - - def startup(self, space): - w_meth_names = fetch_names(space) - space.setattr(self, space.wrap('openssl_md_meth_names'), w_meth_names) diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/interp_hashlib.py +++ /dev/null @@ -1,209 +0,0 @@ -from __future__ import with_statement - -from rpython.rlib import rgc, ropenssl -from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rstring import StringBuilder -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.tool.sourcetools import func_renamer - -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, interp2app, WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.module.thread.os_lock import Lock - - -algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') - -def hash_name_mapper_callback(obj_name, userdata): - if not obj_name: - return - # Ignore aliased names, they pollute the list and OpenSSL appears - # to have a its own definition of alias as the resulting list - # still contains duplicate and alternate names for several - # algorithms. - if rffi.cast(lltype.Signed, obj_name[0].c_alias): - return - try: - space = global_name_fetcher.space - w_name = space.wrap(rffi.charp2str(obj_name[0].c_name)) - global_name_fetcher.meth_names.append(w_name) - except OperationError as e: - global_name_fetcher.w_error = e - -class NameFetcher: - def setup(self, space): - self.space = space - self.meth_names = [] - self.w_error = None - def _cleanup_(self): - self.__dict__.clear() -global_name_fetcher = NameFetcher() - -def fetch_names(space): - global_name_fetcher.setup(space) - ropenssl.init_digests() - ropenssl.OBJ_NAME_do_all(ropenssl.OBJ_NAME_TYPE_MD_METH, - hash_name_mapper_callback, None) - if global_name_fetcher.w_error: - raise global_name_fetcher.w_error - meth_names = global_name_fetcher.meth_names - global_name_fetcher.meth_names = None - return space.call_function(space.w_frozenset, space.newlist(meth_names)) - -class W_Hash(W_Root): - NULL_CTX = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ctx = NULL_CTX - - def __init__(self, space, name, copy_from=NULL_CTX): - self.name = name - digest_type = self.digest_type_by_name(space) - self.digest_size = ropenssl.EVP_MD_size(digest_type) - - # Allocate a lock for each HASH object. - # An optimization would be to not release the GIL on small requests, - # and use a custom lock only when needed. - self.lock = Lock(space) - - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size) - try: - if copy_from: - if not ropenssl.EVP_MD_CTX_copy(ctx, copy_from): - raise ValueError - else: - ropenssl.EVP_DigestInit(ctx, digest_type) - self.ctx = ctx - except: - ropenssl.EVP_MD_CTX_free(ctx) - raise - self.register_finalizer(space) - - def _finalize_(self): - ctx = self.ctx - if ctx: - self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ropenssl.EVP_MD_CTX_free(ctx) - - def digest_type_by_name(self, space): - digest_type = ropenssl.EVP_get_digestbyname(self.name) - if not digest_type: - raise oefmt(space.w_ValueError, "unknown hash function") - return digest_type - - def descr_repr(self, space): - addrstring = self.getaddrstring(space) - return space.wrap("<%s HASH object at 0x%s>" % ( - self.name, addrstring)) - - @unwrap_spec(string='bufferstr') - def update(self, space, string): - with rffi.scoped_nonmovingbuffer(string) as buf: - with self.lock: - # XXX try to not release the GIL for small requests - ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string)) - - def copy(self, space): - "Return a copy of the hash object." - with self.lock: - return W_Hash(space, self.name, copy_from=self.ctx) - - def digest(self, space): - "Return the digest value as a string of binary data." - digest = self._digest(space) - return space.newbytes(digest) - - def hexdigest(self, space): - "Return the digest value as a string of hexadecimal digits." - digest = self._digest(space) - hexdigits = '0123456789abcdef' - result = StringBuilder(self.digest_size * 2) - for c in digest: - result.append(hexdigits[(ord(c) >> 4) & 0xf]) - result.append(hexdigits[ ord(c) & 0xf]) - return space.wrap(result.build()) - - def get_digest_size(self, space): - return space.wrap(self.digest_size) - - def get_block_size(self, space): - digest_type = self.digest_type_by_name(space) - block_size = ropenssl.EVP_MD_block_size(digest_type) - return space.wrap(block_size) - - def get_name(self, space): - return space.wrap(self.name) - - def _digest(self, space): - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - try: - with self.lock: - if not ropenssl.EVP_MD_CTX_copy(ctx, self.ctx): - raise ValueError - digest_size = self.digest_size - with rffi.scoped_alloc_buffer(digest_size) as buf: - ropenssl.EVP_DigestFinal(ctx, buf.raw, None) - return buf.str(digest_size) - finally: - ropenssl.EVP_MD_CTX_free(ctx) - - -W_Hash.typedef = TypeDef( - 'HASH', - __repr__=interp2app(W_Hash.descr_repr), - update=interp2app(W_Hash.update), - copy=interp2app(W_Hash.copy), - digest=interp2app(W_Hash.digest), - hexdigest=interp2app(W_Hash.hexdigest), - # - digest_size=GetSetProperty(W_Hash.get_digest_size), - block_size=GetSetProperty(W_Hash.get_block_size), - name=GetSetProperty(W_Hash.get_name), -) -W_Hash.typedef.acceptable_as_base_class = False - - at unwrap_spec(name=str, string='bufferstr') -def new(space, name, string=''): - w_hash = W_Hash(space, name) - w_hash.update(space, string) - return space.wrap(w_hash) - -# shortcut functions -def make_new_hash(name, funcname): - @func_renamer(funcname) - @unwrap_spec(string='bufferstr') - def new_hash(space, string=''): - return new(space, name, string) - return new_hash - -for _name in algorithms: - _newname = 'new_%s' % (_name,) - globals()[_newname] = make_new_hash(_name, _newname) - - -HAS_FAST_PKCS5_PBKDF2_HMAC = ropenssl.PKCS5_PBKDF2_HMAC is not None -if HAS_FAST_PKCS5_PBKDF2_HMAC: - @unwrap_spec(name=str, password='bytes', salt='bytes', rounds=int, - w_dklen=WrappedDefault(None)) - def pbkdf2_hmac(space, name, password, salt, rounds, w_dklen): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise oefmt(space.w_ValueError, "unknown hash function") - if space.is_w(w_dklen, space.w_None): - dklen = ropenssl.EVP_MD_size(digest) - else: - dklen = space.int_w(w_dklen) - if dklen < 1: - raise oefmt(space.w_ValueError, - "key length must be greater than 0.") - with rffi.scoped_alloc_buffer(dklen) as buf: - r = ropenssl.PKCS5_PBKDF2_HMAC( - password, len(password), salt, len(salt), rounds, digest, - dklen, buf.raw) - if not r: - raise ValueError - return space.newbytes(buf.str(dklen)) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_hashlib.py +++ /dev/null @@ -1,100 +0,0 @@ -class AppTestHashlib: - spaceconfig = { - "usemodules": ['_hashlib', 'array', 'struct', 'binascii'], - } - - def test_method_names(self): - import _hashlib - assert isinstance(_hashlib.openssl_md_meth_names, frozenset) - assert "md5" in _hashlib.openssl_md_meth_names - - def test_simple(self): - import _hashlib - assert _hashlib.new('md5').__class__.__name__ == 'HASH' - assert len(_hashlib.new('md5').hexdigest()) == 32 - - def test_attributes(self): - import hashlib - for name, (expected_size, expected_block_size) in { - 'md5': (16, 64), - 'sha1': (20, 64), - 'sha224': (28, 64), - 'sha256': (32, 64), - 'sha384': (48, 128), - 'sha512': (64, 128), - }.items(): - h = hashlib.new(name) - assert h.name == name - assert h.digest_size == expected_size - assert h.block_size == expected_block_size - # - h.update(b'abc') - h2 = h.copy() - h.update(b'def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update(b'd') - h2.update(b'ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - assert len(digest) == h.digest_size - assert len(hexdigest) == h.digest_size * 2 - c_digest = digest - c_hexdigest = hexdigest - - - def test_shortcut(self): - import hashlib - assert repr(hashlib.md5()).startswith("= 1.1") - out = pbkdf2_hmac('sha1', b'password', b'salt', 1) - assert type(out) is bytes - assert out == '0c60c80f961f0e71f3a9b524af6012062fe037a6'.decode('hex') - out = pbkdf2_hmac('sha1', b'password', b'salt', 2, None) - assert out == 'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'.decode('hex') - raises(TypeError, pbkdf2_hmac, 'sha1', 'password', b'salt', 1) - raises(TypeError, pbkdf2_hmac, 'sha1', b'password', 'salt', 1) diff --git a/pypy/module/_hashlib/test/test_ztranslation.py b/pypy/module/_hashlib/test/test_ztranslation.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_ztranslation.py +++ /dev/null @@ -1,4 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test_checkmodule(): - checkmodule('_hashlib') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -534,7 +534,9 @@ """ rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() - if nbytes == 0 or nbytes > lgt: + if nbytes < 0: + raise oefmt(space.w_ValueError, "negative buffersize in recv_into") + if nbytes == 0: nbytes = lgt while True: try: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -868,6 +868,22 @@ posix.close(fileno) cli.close() + def test_recv_into_params(self): + import os + import _socket + cli = _socket.socket() + cli.connect(self.serv.getsockname()) + fileno, addr = self.serv._accept() + os.write(fileno, b"abcdef") + # + m = memoryview(bytearray(5)) + raises(ValueError, cli.recv_into, m, -1) + raises(ValueError, cli.recv_into, m, 6) + cli.recv_into(m,5) + assert m.tobytes() == b"abcde" + os.close(fileno) + cli.close() + class AppTestErrno: spaceconfig = {'usemodules': ['_socket', 'select']} diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -109,6 +109,7 @@ self.buf = buf self.length = buf.getlength() self.pos = 0 + self.strides = None self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -126,10 +127,15 @@ self.pos = (self.pos + mask) & ~mask def finished(self): - if self.pos != self.length: + value = self.pos + if self.strides and self.strides[0] < 0: + value = -self.pos + if value != self.length: raise StructError("unpack str size too long for format") def read(self, count): + if self.strides: + count = self.strides[0] end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") @@ -151,5 +157,14 @@ string, pos = self.buf.as_str_and_offset_maybe() return string, pos+self.pos - def skip(self, size): - self.read(size) # XXX, could avoid taking the slice + def skip(self, count): + # assumption: UnpackFormatIterator only iterates over + # flat structures (continous memory) either: forward (index + # grows) or reverse + if self.strides: + assert len(self.strides) == 1 + count = self.strides[0] + end = self.pos + count + if end > self.length: + raise StructError("unpack str size too short for format") + self.pos = end diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -115,8 +115,6 @@ return ''.join(self.copy_buffer()) def copy_buffer(self): - buf = self.buf - n_bytes = buf.getlength() data = [] self._copy_rec(0, data, 0) return data @@ -130,7 +128,6 @@ self._copy_base(data,off) return - # TODO add a test that has at least 2 dims for i in range(shape): self._copy_rec(idim+1,data,off) off += strides[idim] @@ -140,18 +137,21 @@ step = shapes[0] strides = self.getstrides() itemsize = self.getitemsize() + bytesize = self.getlength() + copiedbytes = 0 for i in range(step): bytes = self.buf.getslice(off, off+itemsize, 1, itemsize) data.append(bytes) + copiedbytes += len(bytes) off += strides[0] # do notcopy data if the sub buffer is out of bounds - if off >= self.buf.getlength(): + if copiedbytes >= bytesize: break def getlength(self): if self.length != -1: - return self.length // self.itemsize - return self.buf.getlength() // self.itemsize + return self.length + return self.buf.getlength() def descr_tobytes(self, space): self._check_released(space) @@ -167,13 +167,20 @@ raise NotImplementedError elif dim == 1: itemsize = self.getitemsize() - return self._tolist(space, buf, buf.getlength() // itemsize, fmt) + return self._tolist(space, buf, self.getlength(), itemsize, fmt, + self.getstrides()) else: return self._tolist_rec(space, buf, 0, 0, fmt) - def _tolist(self, space, buf, count, fmt): + def _tolist(self, space, buf, bytecount, itemsize, fmt, strides=None): # TODO: this probably isn't very fast + count = bytecount // itemsize fmtiter = UnpackFormatIterator(space, buf) + # patch the length, necessary buffer might have offset + # which leads to wrong length calculation if e.g. the + # memoryview is reversed + fmtiter.length = bytecount + fmtiter.strides = strides fmtiter.interpret(fmt * count) return space.newlist(fmtiter.result_w) @@ -188,12 +195,13 @@ # if dim >= self.getndim(): bytecount = (stride * dimshape) - count = bytecount // itemsize - return self._tolist(space, buf, count, fmt) + return self._tolist(space, buf, bytecount, itemsize, fmt, [stride]) items = [None] * dimshape + orig_buf = buf for i in range(dimshape): - item = self._tolist_rec(space, SubBuffer(buf, start, stride), start, idim+1, fmt) + buf = SubBuffer(orig_buf, start, stride) + item = self._tolist_rec(space, buf, start, idim+1, fmt) items[i] = item start += stride @@ -212,23 +220,21 @@ while dim < length: w_obj = w_tuple.getitem(space, dim) index = space.getindex_w(w_obj, space.w_IndexError) - start = self.lookup_dimension(space, start, dim, index) + shape = self.buf.getshape() + strides = self.buf.getstrides() + start = self.lookup_dimension(space, shape, strides, start, dim, index) dim += 1 return start - def lookup_dimension(self, space, start, dim, index): - view = self.buf - shape = view.getshape() - strides = view.getstrides() + def lookup_dimension(self, space, shape, strides, start, dim, index): nitems = shape[dim] if index < 0: index += nitems if index < 0 or index >= nitems: raise oefmt(space.w_IndexError, "index out of bounds on dimension %d", dim+1) - start += strides[dim] * index # TODO suboffsets? - return start + return start + strides[dim] * index def _getitem_tuple_indexed(self, space, w_index): view = self.buf @@ -253,50 +259,65 @@ fmtiter.interpret(fmt) return fmtiter.result_w[0] + def _decode_index(self, space, w_index, is_slice): + shape = self.getshape() + if len(shape) == 0: + count = 1 + else: + count = shape[0] + return space.decode_index4(w_index, count) def descr_getitem(self, space, w_index): self._check_released(space) if space.isinstance_w(w_index, space.w_tuple): return self._getitem_tuple_indexed(space, w_index) - - start, stop, step, size = space.decode_index4(w_index, self.getlength()) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) # ^^^ for a non-slice index, this returns (index, 0, 0, 1) if step == 0: # index only itemsize = self.getitemsize() - if itemsize == 1: - ch = self.buf.getitem(start) - return space.newint(ord(ch)) + dim = self.getndim() + if dim == 0: + raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") + elif dim == 1: + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) + if itemsize == 1: + ch = self.buf.getitem(idx) + return space.newint(ord(ch)) + else: + # TODO: this probably isn't very fast + buf = SubBuffer(self.buf, idx, itemsize) + fmtiter = UnpackFormatIterator(space, buf) + fmtiter.length = buf.getlength() + fmtiter.interpret(self.format) + return fmtiter.result_w[0] else: - # TODO: this probably isn't very fast - buf = SubBuffer(self.buf, start*itemsize, itemsize) - fmtiter = UnpackFormatIterator(space, buf) - fmtiter.interpret(self.format) - return fmtiter.result_w[0] - elif step == 1: + raise oefmt(space.w_NotImplementedError, "multi-dimensional sub-views are not implemented") + elif is_slice: mv = W_MemoryView.copy(self) - mv.slice(start, step, size) + mv.init_slice(start, stop, step, slicelength, 0) + mv.init_len() mv._init_flags() return mv + # multi index is handled at the top of this function else: - mv = W_MemoryView.copy(self) - mv.slice(start, step, size) - mv.length = mv.bytecount_from_shape() - mv._init_flags() - return mv + raise TypeError("memoryview: invalid slice key") - def slice(self, start, step, size): + def init_slice(self, start, stop, step, slicelength, dim): # modifies the buffer, shape and stride to allow step to be > 1 + self.strides = strides = self.getstrides()[:] + self.shape = shape = self.getshape()[:] + bytesize = self.getitemsize() * slicelength + self.buf = SubBuffer(self.buf, strides[dim] * start, bytesize) + shape[dim] = slicelength + strides[dim] = strides[dim] * step # TODO subbuffer - strides = self.getstrides()[:] - shape = self.getshape()[:] - itemsize = self.getitemsize() - dim = 0 - self.buf = SubBuffer(self.buf, strides[dim] * start, size*step*itemsize) - shape[dim] = size - strides[dim] = strides[dim] * step - self.strides = strides - self.shape = shape + + def init_len(self): + self.length = self.bytecount_from_shape() def bytecount_from_shape(self): dim = self.getndim() @@ -307,10 +328,9 @@ return length * self.getitemsize() @staticmethod - def copy(view, buf=None): + def copy(view): # TODO suboffsets - if buf == None: - buf = view.buf + buf = view.buf return W_MemoryView(buf, view.getformat(), view.getitemsize(), view.getndim(), view.getshape()[:], view.getstrides()[:]) @@ -321,11 +341,16 @@ if space.isinstance_w(w_index, space.w_tuple): raise oefmt(space.w_NotImplementedError, "") start, stop, step, size = space.decode_index4(w_index, self.getlength()) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) itemsize = self.getitemsize() if step == 0: # index only + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) if itemsize == 1: ch = getbytevalue(space, w_obj) - self.buf.setitem(start, ch) + self.buf.setitem(idx, ch) else: # TODO: this probably isn't very fast fmtiter = PackFormatIterator(space, [w_obj], itemsize) @@ -335,10 +360,10 @@ raise oefmt(space.w_TypeError, "memoryview: invalid type for format '%s'", self.format) - self.buf.setslice(start * itemsize, fmtiter.result.build()) + self.buf.setslice(idx, fmtiter.result.build()) elif step == 1: value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size * itemsize: + if value.getlength() != slicelength * itemsize: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") self.buf.setslice(start * itemsize, value.as_str()) @@ -354,11 +379,11 @@ src = space.buffer_w(w_obj, space.BUF_CONTIG_RO) dst_strides = self.getstrides() dim = 0 - dst = SubBuffer(self.buf, start * itemsize, size * itemsize) + dst = SubBuffer(self.buf, start * itemsize, slicelength * itemsize) src_stride0 = dst_strides[dim] off = 0 - src_shape0 = size + src_shape0 = slicelength src_stride0 = src.getstrides()[0] if isinstance(w_obj, W_MemoryView): src_stride0 = w_obj.getstrides()[0] @@ -373,11 +398,15 @@ def descr_len(self, space): self._check_released(space) - return space.wrap(self.getlength()) + dim = self.getndim() + if dim == 0: + return space.newint(1) + shape = self.getshape() + return space.wrap(shape[0]) def w_get_nbytes(self, space): self._check_released(space) - return space.wrap(self.buf.getlength()) + return space.wrap(self.getlength()) def w_get_format(self, space): self._check_released(space) @@ -385,11 +414,11 @@ def w_get_itemsize(self, space): self._check_released(space) - return space.wrap(self.itemsize) + return space.wrap(self.getitemsize()) def w_get_ndim(self, space): self._check_released(space) - return space.wrap(self.buf.getndim()) + return space.wrap(self.getndim()) def w_is_readonly(self, space): self._check_released(space) @@ -397,13 +426,13 @@ def w_get_shape(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getshape()]) def w_get_strides(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getstrides()]) @@ -615,8 +644,6 @@ return None def _cast_to_ND(self, space, shape, ndim): - buf = self.buf - self.ndim = ndim length = self.itemsize if ndim == 0: diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -1,4 +1,5 @@ import py +import pytest import struct from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app @@ -42,13 +43,13 @@ def test_extended_slice(self): data = bytearray(b'abcefg') v = memoryview(data) - w = v[0:2:2] # failing for now: NotImplementedError + w = v[0:2:2] assert len(w) == 1 assert list(w) == [97] v[::2] = b'ABC' assert data == bytearray(eval("b'AbBeCg'")) - assert v[::2] == b'ABC' - assert v[::-2] == b'geb' + assert v[::2].tobytes() == b'ABC' + assert v[::-2].tobytes() == b'geb' def test_memoryview_attrs(self): v = memoryview(b"a"*100) @@ -409,3 +410,33 @@ v = view.cast('h', shape=(3,2)) assert v.tolist() == [[2,3],[4,5],[6,7]] raises(TypeError, "view.cast('h', shape=(3,3))") + + def test_reversed(self): + bytes = b"\x01\x01\x02\x02\x03\x03" + view = memoryview(bytes) + revlist = list(reversed(view.tolist())) + assert view[::-1][0] == 3 + assert view[::-1][1] == 3 + assert view[::-1][2] == 2 + assert view[::-1][3] == 2 + assert view[::-1][4] == 1 + assert view[::-1][5] == 1 + assert view[::-1][-1] == 1 + assert view[::-1][-2] == 1 + assert list(reversed(view)) == revlist + assert list(reversed(view)) == view[::-1].tolist() + +class AppTestMemoryViewReversed(object): + spaceconfig = dict(usemodules=['array']) + def test_reversed_non_bytes(self): + import array + items = [1,2,3,9,7,5] + formats = ['h'] + for fmt in formats: + bytes = array.array(fmt, items) + view = memoryview(bytes) + bview = view.cast('b') + rview = bview.cast(fmt, shape=(2,3)) + raises(NotImplementedError, list, reversed(rview)) + assert rview.tolist() == [[1,2,3],[9,7,5]] + assert rview[::-1].tolist() == [[9,7,5], [1,2,3]] diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py --- a/pypy/tool/build_cffi_imports.py +++ b/pypy/tool/build_cffi_imports.py @@ -18,6 +18,7 @@ "lzma": "_lzma_build.py", "_decimal": "_decimal_build.py", "ssl": "_ssl_build.py", + # hashlib does not need to be built! It uses API calls from ssl "xx": None, # for testing: 'None' should be completely ignored } From pypy.commits at gmail.com Wed Dec 7 10:35:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 07:35:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5: remove two debug statements Message-ID: <58482c2f.0f341c0a.17ca9.6da8@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88950:a4750edf5273 Date: 2016-12-07 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/a4750edf5273/ Log: remove two debug statements diff --git a/lib-python/3/http/client.py b/lib-python/3/http/client.py --- a/lib-python/3/http/client.py +++ b/lib-python/3/http/client.py @@ -556,7 +556,6 @@ try: while True: chunk_left = self._get_chunk_left() - print("chunk_left", chunk_left) if chunk_left is None: break value.append(self._safe_read(chunk_left)) @@ -606,7 +605,6 @@ s = [] while amt > 0: chunk = self.fp.read(min(amt, MAXAMOUNT)) - print("read chunk %d %d", len(chunk), min(amt, MAXAMOUNT)) if not chunk: raise IncompleteRead(b''.join(s), amt) s.append(chunk) From pypy.commits at gmail.com Wed Dec 7 10:44:39 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 07:44:39 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix ctypes from_buffer() and from_buffer_copy(), only partially tested Message-ID: <58482e67.542e1c0a.59c97.6e17@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88951:c46dd5995acc Date: 2016-12-07 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c46dd5995acc/ Log: fix ctypes from_buffer() and from_buffer_copy(), only partially tested diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -85,29 +85,29 @@ def from_buffer(self, obj, offset=0): size = self._sizeofinstances() - buf = buffer(obj, offset, size) - if len(buf) < size: + buf = memoryview(obj) + if buf.nbytes < offset + size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" - % (len(buf) + offset, size + offset)) - raw_addr = buf._pypy_raw_address() + % (buf.nbytes, offset + size)) + raw_addr = buf._pypy_raw_address() + offset result = self.from_address(raw_addr) result._ensure_objects()['ffffffff'] = obj return result def from_buffer_copy(self, obj, offset=0): size = self._sizeofinstances() - buf = buffer(obj, offset, size) - if len(buf) < size: + buf = memoryview(obj) + if buf.nbytes < offset + size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" - % (len(buf) + offset, size + offset)) + % (buf.nbytes, offset + size)) result = self() dest = result._buffer.buffer try: - raw_addr = buf._pypy_raw_address() + raw_addr = buf._pypy_raw_address() + offset except ValueError: - _rawffi.rawstring2charp(dest, buf) + _rawffi.rawstring2charp(dest, buf, offset, size) else: from ctypes import memmove memmove(dest, raw_addr, size) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -606,12 +606,14 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) - at unwrap_spec(address=r_uint, newcontent='bufferstr') -def rawstring2charp(space, address, newcontent): + at unwrap_spec(address=r_uint, newcontent='bufferstr', offset=int, size=int) +def rawstring2charp(space, address, newcontent, offset=0, size=-1): from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw array = rffi.cast(rffi.CCHARP, address) - copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if size < 0: + size = len(newcontent) - offset + copy_string_to_raw(llstr(newcontent), array, offset, size) if _MS_WINDOWS: @unwrap_spec(code=int) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -365,6 +365,10 @@ assert b''.join([a[i] for i in range(10)]) == b"foobarxxxx" _rawffi.rawstring2charp(a.buffer, memoryview(b"baz")) assert b''.join([a[i] for i in range(10)]) == b"bazbarxxxx" + _rawffi.rawstring2charp(a.buffer, memoryview(b"ABCDEF"), 2) + assert b''.join([a[i] for i in range(10)]) == b"CDEFarxxxx" + _rawffi.rawstring2charp(a.buffer, memoryview(b"ZYXWVU"), 2, 3) + assert b''.join([a[i] for i in range(10)]) == b"XWVFarxxxx" a.free() def test_raw_callable(self): From pypy.commits at gmail.com Wed Dec 7 10:44:41 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 07:44:41 -0800 (PST) Subject: [pypy-commit] pypy py3.5: we have no ctypes.resize() Message-ID: <58482e69.a351c20a.59eaa.ef48@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88952:889b8033d3ef Date: 2016-12-07 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/889b8033d3ef/ Log: we have no ctypes.resize() diff --git a/lib-python/3/test/test_io.py b/lib-python/3/test/test_io.py --- a/lib-python/3/test/test_io.py +++ b/lib-python/3/test/test_io.py @@ -46,6 +46,8 @@ threading = None try: + if '__pypy__' in sys.builtin_module_names: + raise ImportError # don't use ctypes, missing ctypes.resize() import ctypes except ImportError: def byteslike(*pos, **kw): From pypy.commits at gmail.com Wed Dec 7 10:44:43 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 07:44:43 -0800 (PST) Subject: [pypy-commit] pypy py3.5: merge heads Message-ID: <58482e6b.113cc20a.638ff.e909@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88953:3ee658af45ba Date: 2016-12-07 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/3ee658af45ba/ Log: merge heads diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -16,7 +16,8 @@ import warnings from operator import neg from test.support import ( - TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail, + cpython_only) from test.support.script_helper import assert_python_ok try: import pty, signal @@ -1640,6 +1641,8 @@ class ShutdownTest(unittest.TestCase): + # PyPy doesn't do a gc.collect() at shutdown + @cpython_only def test_cleanup(self): # Issue #19255: builtins are still available at shutdown code = """if 1: diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/crypto.py @@ -53,6 +53,7 @@ const char *OpenSSL_version(int); /* this is a macro in 1.1.0 */ +void *OPENSSL_malloc(size_t); void OPENSSL_free(void *); /* This was removed in 1.1.0 */ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py @@ -25,6 +25,13 @@ static const int EVP_CTRL_GCM_GET_TAG; static const int EVP_CTRL_GCM_SET_TAG; +typedef struct { + int type; + int alias; + const char *name; + const char *data; +} OBJ_NAME; + static const int Cryptography_HAS_GCM; static const int Cryptography_HAS_PBKDF2_HMAC; static const int Cryptography_HAS_PKEY_CTX; @@ -136,6 +143,7 @@ without worrying about what OpenSSL we're running against. */ EVP_MD_CTX *Cryptography_EVP_MD_CTX_new(void); void Cryptography_EVP_MD_CTX_free(EVP_MD_CTX *); +void OBJ_NAME_do_all(int, void (*) (const OBJ_NAME *, void *), void *); """ MACROS = """ @@ -156,6 +164,7 @@ EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *); int EVP_PKEY_set1_EC_KEY(EVP_PKEY *, EC_KEY *); +int EVP_MD_CTX_block_size(const EVP_MD_CTX *md); int EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *); int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *, int, int, void *); @@ -167,6 +176,7 @@ int EVP_PBE_scrypt(const char *, size_t, const unsigned char *, size_t, uint64_t, uint64_t, uint64_t, uint64_t, unsigned char *, size_t); +#define OBJ_NAME_TYPE_MD_METH ... """ CUSTOMIZATIONS = """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -26,7 +26,6 @@ static const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE; static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS; static const long Cryptography_HAS_NPN_NEGOTIATED; -static const long Cryptography_OPENSSL_NO_TLSEXT; /* Internally invented symbol to tell us if SNI is supported */ static const long Cryptography_HAS_TLSEXT_HOSTNAME; @@ -138,6 +137,7 @@ typedef ... SSL_CTX; typedef ... SSL_SESSION; + typedef ... SSL; static const long TLSEXT_NAMETYPE_host_name; @@ -434,7 +434,6 @@ long SSL_CTX_sess_misses(SSL_CTX *); long SSL_CTX_sess_timeouts(SSL_CTX *); long SSL_CTX_sess_cache_full(SSL_CTX *); - """ CUSTOMIZATIONS = """ @@ -689,12 +688,6 @@ static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS = 1; -#ifdef OPENSSL_NO_TLSEXT -static const long Cryptography_OPENSSL_NO_TLSEXT = 1; -#else -static const long Cryptography_OPENSSL_NO_TLSEXT = 0; -#endif - /* in OpenSSL 1.1.0 the SSL_ST values were renamed to TLS_ST and several were removed */ #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 || defined(LIBRESSL_VERSION_NUMBER) diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/x509_vfy.py @@ -140,6 +140,7 @@ int X509_STORE_set_flags(X509_STORE *, unsigned long); void X509_STORE_free(X509_STORE *); + /* X509_STORE_CTX */ X509_STORE_CTX *X509_STORE_CTX_new(void); void X509_STORE_CTX_cleanup(X509_STORE_CTX *); @@ -201,7 +202,7 @@ int sk_X509_OBJECT_num(Cryptography_STACK_OF_X509_OBJECT *); X509_OBJECT *sk_X509_OBJECT_value(Cryptography_STACK_OF_X509_OBJECT *, int); -X509_VERIFY_PARAM * X509_STORE_get0_param(X509_STORE *); +X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *); Cryptography_STACK_OF_X509_OBJECT *X509_STORE_get0_objects(X509_STORE *); X509 *X509_OBJECT_get0_X509(X509_OBJECT *); int X509_OBJECT_get_type(const X509_OBJECT *); diff --git a/lib_pypy/_hashlib/__init__.py b/lib_pypy/_hashlib/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_hashlib/__init__.py @@ -0,0 +1,172 @@ +import sys +from threading import Lock +from _pypy_openssl import ffi, lib +from _cffi_ssl._stdssl.utility import (_str_to_ffi_buffer, _bytes_with_len, + _str_from_buf) + +def new(name, string=b''): + h = Hash(name) + h.update(string) + return h + +class Hash(object): + + def __init__(self, name, copy_from=None): + self.ctx = ffi.NULL + self.name = name + digest_type = self.digest_type_by_name() + self.digest_size = lib.EVP_MD_size(digest_type) + + # Allocate a lock for each HASH object. + # An optimization would be to not release the GIL on small requests, + # and use a custom lock only when needed. + self.lock = Lock() + + ctx = lib.Cryptography_EVP_MD_CTX_new() + if ctx == ffi.NULL: + raise MemoryError + ctx = ffi.gc(ctx, lib.Cryptography_EVP_MD_CTX_free) + + try: + if copy_from is not None: + # cpython uses EVP_MD_CTX_copy(...) + if not lib.EVP_MD_CTX_copy_ex(ctx, copy_from): + raise ValueError + else: + # cpython uses EVP_DigestInit + lib.EVP_DigestInit_ex(ctx, digest_type, ffi.NULL) + self.ctx = ctx + except: + # no need to gc ctx! + raise + + def digest_type_by_name(self): + c_name = _str_to_ffi_buffer(self.name) + digest_type = lib.EVP_get_digestbyname(c_name) + if not digest_type: + raise ValueError("unknown hash function") + # TODO + return digest_type + + def __repr__(self): + return "<%s HASH object at 0x%s>" % (self.name, id(self)) + + def update(self, string): + buf = ffi.from_buffer(string) + with self.lock: + # XXX try to not release the GIL for small requests + lib.EVP_DigestUpdate(self.ctx, buf, len(buf)) + + def copy(self): + """Return a copy of the hash object.""" + with self.lock: + return Hash(self.name, copy_from=self.ctx) + + def digest(self): + """Return the digest value as a string of binary data.""" + return self._digest() + + def hexdigest(self): + """Return the digest value as a string of hexadecimal digits.""" + digest = self._digest() + hexdigits = '0123456789abcdef' + result = [] + for c in digest: + result.append(hexdigits[(c >> 4) & 0xf]) + result.append(hexdigits[ c & 0xf]) + return ''.join(result) + + @property + def block_size(self): + return lib.EVP_MD_CTX_block_size(self.ctx) + + def _digest(self): + ctx = lib.Cryptography_EVP_MD_CTX_new() + if ctx == ffi.NULL: + raise MemoryError + try: + with self.lock: + if not lib.EVP_MD_CTX_copy_ex(ctx, self.ctx): + raise ValueError + digest_size = self.digest_size + buf = ffi.new("unsigned char[]", digest_size) + lib.EVP_DigestFinal_ex(ctx, buf, ffi.NULL) + return _bytes_with_len(buf, digest_size) + finally: + lib.Cryptography_EVP_MD_CTX_free(ctx) + +algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') + +class NameFetcher: + def __init__(self): + self.meth_names = [] + self.error = None + + +def _fetch_names(): + name_fetcher = NameFetcher() + handle = ffi.new_handle(name_fetcher) + lib.OBJ_NAME_do_all(lib.OBJ_NAME_TYPE_MD_METH, hash_name_mapper_callback, handle) + if name_fetcher.error: + raise name_fetcher.error + meth_names = name_fetcher.meth_names + name_fetcher.meth_names = None + return frozenset(meth_names) + + at ffi.callback("void(OBJ_NAME*, void*)") +def hash_name_mapper_callback(obj_name, userdata): + if not obj_name: + return + name_fetcher = ffi.from_handle(userdata) + # Ignore aliased names, they pollute the list and OpenSSL appears + # to have a its own definition of alias as the resulting list + # still contains duplicate and alternate names for several + # algorithms. + if obj_name.alias != 0: + return + name = _str_from_buf(obj_name.name) + name_fetcher.meth_names.append(name) + +openssl_md_meth_names = _fetch_names() +del _fetch_names + +# shortcut functions +def make_new_hash(name, funcname): + def new_hash(string=b''): + return new(name, string) + new_hash.__name__ = funcname + return new_hash + +for _name in algorithms: + _newname = 'openssl_%s' % (_name,) + globals()[_newname] = make_new_hash(_name, _newname) + +if hasattr(lib, 'PKCS5_PBKDF2_HMAC'): + #@unwrap_spec(name=str, password='bytes', salt='bytes', iterations=int, + # w_dklen=WrappedDefault(None)) + def pbkdf2_hmac(name, password, salt, iterations, dklen=None): + if not isinstance(name, str): + raise TypeError("expected 'str' for name, but got %s" % type(name)) + c_name = _str_to_ffi_buffer(name) + digest = lib.EVP_get_digestbyname(c_name) + if digest == ffi.NULL: + raise ValueError("unsupported hash type") + if dklen is None: + dklen = lib.EVP_MD_size(digest) + if dklen < 1: + raise ValueError("key length must be greater than 0.") + if dklen >= sys.maxsize: + raise OverflowError("key length is too great.") + if iterations < 1: + raise ValueError("iteration value must be greater than 0.") + if iterations >= sys.maxsize: + raise OverflowError("iteration value is too great.") + buf = ffi.new("unsigned char[]", dklen) + c_password = ffi.from_buffer(bytes(password)) + c_salt = ffi.from_buffer(bytes(salt)) + r = lib.PKCS5_PBKDF2_HMAC(c_password, len(c_password), + ffi.cast("unsigned char*",c_salt), len(c_salt), + iterations, digest, dklen, buf) + if r == 0: + raise ValueError + return _bytes_with_len(buf, dklen) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -149,10 +149,11 @@ def connect(database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, + uri=0): factory = Connection if not factory else factory return factory(database, timeout, detect_types, isolation_level, - check_same_thread, factory, cached_statements) + check_same_thread, factory, cached_statements, uri) def _unicode_text_factory(x): @@ -195,14 +196,23 @@ _db = None def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="", - check_same_thread=True, factory=None, cached_statements=100): + check_same_thread=True, factory=None, cached_statements=100, uri=0): self.__initialized = True db_star = _ffi.new('sqlite3 **') if isinstance(database, unicode): database = database.encode('utf-8') - if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: - raise OperationalError("Could not open database") + if _lib.SQLITE_OPEN_URI != 0: + if uri and _lib.SQLITE_OPEN_URI == 0: + raise NotSupportedError("URIs not supported") + flags = _lib.SQLITE_OPEN_READWRITE | _lib.SQLITE_OPEN_CREATE + if uri: + flags |= _lib.SQLITE_OPEN_URI + if _lib.sqlite3_open_v2(database, db_star, flags, _ffi.NULL) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") + else: + if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK: + raise OperationalError("Could not open database") self._db = db_star[0] if timeout is not None: timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds @@ -1195,6 +1205,8 @@ def __getitem__(self, item): if isinstance(item, (int, long)): return self.values[item] + elif isinstance(item, slice): + return self.values[item] else: item = item.lower() for idx, desc in enumerate(self.description): diff --git a/lib_pypy/_sqlite3_build.py b/lib_pypy/_sqlite3_build.py --- a/lib_pypy/_sqlite3_build.py +++ b/lib_pypy/_sqlite3_build.py @@ -103,6 +103,10 @@ #define SQLITE_DROP_VTABLE ... #define SQLITE_FUNCTION ... +static const long SQLITE_OPEN_URI; +static const long SQLITE_OPEN_READWRITE; +static const long SQLITE_OPEN_CREATE; + const char *sqlite3_libversion(void); typedef ... sqlite3; @@ -117,6 +121,13 @@ sqlite3 **ppDb /* OUT: SQLite db handle */ ); +int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + int sqlite3_close(sqlite3 *); int sqlite3_busy_timeout(sqlite3*, int ms); @@ -259,7 +270,21 @@ libraries=['sqlite3'] ) -_ffi.set_source("_sqlite3_cffi", "#include ", **extra_args) +SOURCE = """ +#include + +#ifndef SQLITE_OPEN_URI +static const long SQLITE_OPEN_URI = 0; +#endif +#ifndef SQLITE_OPEN_READWRITE +static const long SQLITE_OPEN_READWRITE = 0; +#endif +#ifndef SQLITE_OPEN_CREATE +static const long SQLITE_OPEN_CREATE = 0; +#endif +""" + +_ffi.set_source("_sqlite3_cffi", SOURCE, **extra_args) if __name__ == "__main__": diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ working_modules.update([ "_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd", "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", - "zlib", "bz2", "struct", "_hashlib", "_md5", "_minimal_curses", + "zlib", "bz2", "struct", "_md5", "_minimal_curses", "thread", "itertools", "pyexpat", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", @@ -118,7 +118,6 @@ "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], "pyexpat" : ["pypy.module.pyexpat.interp_pyexpat"], - "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], "_vmprof" : ["pypy.module._vmprof.interp_vmprof"], diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -156,7 +156,7 @@ assert space1.str_w(w_str) == "hello" class TestMixedModuleUnfreeze: - spaceconfig = dict(usemodules=('_ssl', '_socket')) + spaceconfig = dict(usemodules=('_socket',)) def test_random_stuff_can_unfreeze(self): # When a module contains an "import" statement in applevel code, the @@ -167,13 +167,13 @@ # at runtime, like setting os.environ (posix module) or initializing # the winsock library (_socket module) w_socket = self.space.builtin_modules['_socket'] - w_ssl = self.space.builtin_modules['_ssl'] + # _ssl is not builtin anymore, this test also tried to _cleanup_ on + # the wrapped ssl object + # w_ssl = self.space.builtin_modules['_ssl'] # Uncomment this line for a workaround # space.getattr(w_ssl, space.wrap('SSLError')) w_socket._cleanup_() assert w_socket.startup_called == False - w_ssl._cleanup_() # w_ssl.appleveldefs['SSLError'] imports _socket - assert w_socket.startup_called == False diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py deleted file mode 100644 --- a/pypy/module/_hashlib/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule -from pypy.module._hashlib.interp_hashlib import ( - algorithms, fetch_names, HAS_FAST_PKCS5_PBKDF2_HMAC) - - -class Module(MixedModule): - interpleveldefs = { - 'new' : 'interp_hashlib.new', - } - - appleveldefs = { - } - - for name in algorithms: - interpleveldefs['openssl_' + name] = 'interp_hashlib.new_' + name - - if HAS_FAST_PKCS5_PBKDF2_HMAC: - interpleveldefs['pbkdf2_hmac'] = 'interp_hashlib.pbkdf2_hmac' - - def startup(self, space): - w_meth_names = fetch_names(space) - space.setattr(self, space.wrap('openssl_md_meth_names'), w_meth_names) diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/interp_hashlib.py +++ /dev/null @@ -1,209 +0,0 @@ -from __future__ import with_statement - -from rpython.rlib import rgc, ropenssl -from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rstring import StringBuilder -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.tool.sourcetools import func_renamer - -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, interp2app, WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.module.thread.os_lock import Lock - - -algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') - -def hash_name_mapper_callback(obj_name, userdata): - if not obj_name: - return - # Ignore aliased names, they pollute the list and OpenSSL appears - # to have a its own definition of alias as the resulting list - # still contains duplicate and alternate names for several - # algorithms. - if rffi.cast(lltype.Signed, obj_name[0].c_alias): - return - try: - space = global_name_fetcher.space - w_name = space.wrap(rffi.charp2str(obj_name[0].c_name)) - global_name_fetcher.meth_names.append(w_name) - except OperationError as e: - global_name_fetcher.w_error = e - -class NameFetcher: - def setup(self, space): - self.space = space - self.meth_names = [] - self.w_error = None - def _cleanup_(self): - self.__dict__.clear() -global_name_fetcher = NameFetcher() - -def fetch_names(space): - global_name_fetcher.setup(space) - ropenssl.init_digests() - ropenssl.OBJ_NAME_do_all(ropenssl.OBJ_NAME_TYPE_MD_METH, - hash_name_mapper_callback, None) - if global_name_fetcher.w_error: - raise global_name_fetcher.w_error - meth_names = global_name_fetcher.meth_names - global_name_fetcher.meth_names = None - return space.call_function(space.w_frozenset, space.newlist(meth_names)) - -class W_Hash(W_Root): - NULL_CTX = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ctx = NULL_CTX - - def __init__(self, space, name, copy_from=NULL_CTX): - self.name = name - digest_type = self.digest_type_by_name(space) - self.digest_size = ropenssl.EVP_MD_size(digest_type) - - # Allocate a lock for each HASH object. - # An optimization would be to not release the GIL on small requests, - # and use a custom lock only when needed. - self.lock = Lock(space) - - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size) - try: - if copy_from: - if not ropenssl.EVP_MD_CTX_copy(ctx, copy_from): - raise ValueError - else: - ropenssl.EVP_DigestInit(ctx, digest_type) - self.ctx = ctx - except: - ropenssl.EVP_MD_CTX_free(ctx) - raise - self.register_finalizer(space) - - def _finalize_(self): - ctx = self.ctx - if ctx: - self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ropenssl.EVP_MD_CTX_free(ctx) - - def digest_type_by_name(self, space): - digest_type = ropenssl.EVP_get_digestbyname(self.name) - if not digest_type: - raise oefmt(space.w_ValueError, "unknown hash function") - return digest_type - - def descr_repr(self, space): - addrstring = self.getaddrstring(space) - return space.wrap("<%s HASH object at 0x%s>" % ( - self.name, addrstring)) - - @unwrap_spec(string='bufferstr') - def update(self, space, string): - with rffi.scoped_nonmovingbuffer(string) as buf: - with self.lock: - # XXX try to not release the GIL for small requests - ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string)) - - def copy(self, space): - "Return a copy of the hash object." - with self.lock: - return W_Hash(space, self.name, copy_from=self.ctx) - - def digest(self, space): - "Return the digest value as a string of binary data." - digest = self._digest(space) - return space.newbytes(digest) - - def hexdigest(self, space): - "Return the digest value as a string of hexadecimal digits." - digest = self._digest(space) - hexdigits = '0123456789abcdef' - result = StringBuilder(self.digest_size * 2) - for c in digest: - result.append(hexdigits[(ord(c) >> 4) & 0xf]) - result.append(hexdigits[ ord(c) & 0xf]) - return space.wrap(result.build()) - - def get_digest_size(self, space): - return space.wrap(self.digest_size) - - def get_block_size(self, space): - digest_type = self.digest_type_by_name(space) - block_size = ropenssl.EVP_MD_block_size(digest_type) - return space.wrap(block_size) - - def get_name(self, space): - return space.wrap(self.name) - - def _digest(self, space): - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - try: - with self.lock: - if not ropenssl.EVP_MD_CTX_copy(ctx, self.ctx): - raise ValueError - digest_size = self.digest_size - with rffi.scoped_alloc_buffer(digest_size) as buf: - ropenssl.EVP_DigestFinal(ctx, buf.raw, None) - return buf.str(digest_size) - finally: - ropenssl.EVP_MD_CTX_free(ctx) - - -W_Hash.typedef = TypeDef( - 'HASH', - __repr__=interp2app(W_Hash.descr_repr), - update=interp2app(W_Hash.update), - copy=interp2app(W_Hash.copy), - digest=interp2app(W_Hash.digest), - hexdigest=interp2app(W_Hash.hexdigest), - # - digest_size=GetSetProperty(W_Hash.get_digest_size), - block_size=GetSetProperty(W_Hash.get_block_size), - name=GetSetProperty(W_Hash.get_name), -) -W_Hash.typedef.acceptable_as_base_class = False - - at unwrap_spec(name=str, string='bufferstr') -def new(space, name, string=''): - w_hash = W_Hash(space, name) - w_hash.update(space, string) - return space.wrap(w_hash) - -# shortcut functions -def make_new_hash(name, funcname): - @func_renamer(funcname) - @unwrap_spec(string='bufferstr') - def new_hash(space, string=''): - return new(space, name, string) - return new_hash - -for _name in algorithms: - _newname = 'new_%s' % (_name,) - globals()[_newname] = make_new_hash(_name, _newname) - - -HAS_FAST_PKCS5_PBKDF2_HMAC = ropenssl.PKCS5_PBKDF2_HMAC is not None -if HAS_FAST_PKCS5_PBKDF2_HMAC: - @unwrap_spec(name=str, password='bytes', salt='bytes', rounds=int, - w_dklen=WrappedDefault(None)) - def pbkdf2_hmac(space, name, password, salt, rounds, w_dklen): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise oefmt(space.w_ValueError, "unknown hash function") - if space.is_w(w_dklen, space.w_None): - dklen = ropenssl.EVP_MD_size(digest) - else: - dklen = space.int_w(w_dklen) - if dklen < 1: - raise oefmt(space.w_ValueError, - "key length must be greater than 0.") - with rffi.scoped_alloc_buffer(dklen) as buf: - r = ropenssl.PKCS5_PBKDF2_HMAC( - password, len(password), salt, len(salt), rounds, digest, - dklen, buf.raw) - if not r: - raise ValueError - return space.newbytes(buf.str(dklen)) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_hashlib.py +++ /dev/null @@ -1,100 +0,0 @@ -class AppTestHashlib: - spaceconfig = { - "usemodules": ['_hashlib', 'array', 'struct', 'binascii'], - } - - def test_method_names(self): - import _hashlib - assert isinstance(_hashlib.openssl_md_meth_names, frozenset) - assert "md5" in _hashlib.openssl_md_meth_names - - def test_simple(self): - import _hashlib - assert _hashlib.new('md5').__class__.__name__ == 'HASH' - assert len(_hashlib.new('md5').hexdigest()) == 32 - - def test_attributes(self): - import hashlib - for name, (expected_size, expected_block_size) in { - 'md5': (16, 64), - 'sha1': (20, 64), - 'sha224': (28, 64), - 'sha256': (32, 64), - 'sha384': (48, 128), - 'sha512': (64, 128), - }.items(): - h = hashlib.new(name) - assert h.name == name - assert h.digest_size == expected_size - assert h.block_size == expected_block_size - # - h.update(b'abc') - h2 = h.copy() - h.update(b'def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update(b'd') - h2.update(b'ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - assert len(digest) == h.digest_size - assert len(hexdigest) == h.digest_size * 2 - c_digest = digest - c_hexdigest = hexdigest - - - def test_shortcut(self): - import hashlib - assert repr(hashlib.md5()).startswith("= 1.1") - out = pbkdf2_hmac('sha1', b'password', b'salt', 1) - assert type(out) is bytes - assert out == '0c60c80f961f0e71f3a9b524af6012062fe037a6'.decode('hex') - out = pbkdf2_hmac('sha1', b'password', b'salt', 2, None) - assert out == 'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'.decode('hex') - raises(TypeError, pbkdf2_hmac, 'sha1', 'password', b'salt', 1) - raises(TypeError, pbkdf2_hmac, 'sha1', b'password', 'salt', 1) diff --git a/pypy/module/_hashlib/test/test_ztranslation.py b/pypy/module/_hashlib/test/test_ztranslation.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_ztranslation.py +++ /dev/null @@ -1,4 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test_checkmodule(): - checkmodule('_hashlib') diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -534,7 +534,9 @@ """ rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() - if nbytes == 0 or nbytes > lgt: + if nbytes < 0: + raise oefmt(space.w_ValueError, "negative buffersize in recv_into") + if nbytes == 0: nbytes = lgt while True: try: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -868,6 +868,22 @@ posix.close(fileno) cli.close() + def test_recv_into_params(self): + import os + import _socket + cli = _socket.socket() + cli.connect(self.serv.getsockname()) + fileno, addr = self.serv._accept() + os.write(fileno, b"abcdef") + # + m = memoryview(bytearray(5)) + raises(ValueError, cli.recv_into, m, -1) + raises(ValueError, cli.recv_into, m, 6) + cli.recv_into(m,5) + assert m.tobytes() == b"abcde" + os.close(fileno) + cli.close() + class AppTestErrno: spaceconfig = {'usemodules': ['_socket', 'select']} diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -109,6 +109,7 @@ self.buf = buf self.length = buf.getlength() self.pos = 0 + self.strides = None self.result_w = [] # list of wrapped objects # See above comment on operate. @@ -126,10 +127,15 @@ self.pos = (self.pos + mask) & ~mask def finished(self): - if self.pos != self.length: + value = self.pos + if self.strides and self.strides[0] < 0: + value = -self.pos + if value != self.length: raise StructError("unpack str size too long for format") def read(self, count): + if self.strides: + count = self.strides[0] end = self.pos + count if end > self.length: raise StructError("unpack str size too short for format") @@ -151,5 +157,14 @@ string, pos = self.buf.as_str_and_offset_maybe() return string, pos+self.pos - def skip(self, size): - self.read(size) # XXX, could avoid taking the slice + def skip(self, count): + # assumption: UnpackFormatIterator only iterates over + # flat structures (continous memory) either: forward (index + # grows) or reverse + if self.strides: + assert len(self.strides) == 1 + count = self.strides[0] + end = self.pos + count + if end > self.length: + raise StructError("unpack str size too short for format") + self.pos = end diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -115,8 +115,6 @@ return ''.join(self.copy_buffer()) def copy_buffer(self): - buf = self.buf - n_bytes = buf.getlength() data = [] self._copy_rec(0, data, 0) return data @@ -130,7 +128,6 @@ self._copy_base(data,off) return - # TODO add a test that has at least 2 dims for i in range(shape): self._copy_rec(idim+1,data,off) off += strides[idim] @@ -140,18 +137,21 @@ step = shapes[0] strides = self.getstrides() itemsize = self.getitemsize() + bytesize = self.getlength() + copiedbytes = 0 for i in range(step): bytes = self.buf.getslice(off, off+itemsize, 1, itemsize) data.append(bytes) + copiedbytes += len(bytes) off += strides[0] # do notcopy data if the sub buffer is out of bounds - if off >= self.buf.getlength(): + if copiedbytes >= bytesize: break def getlength(self): if self.length != -1: - return self.length // self.itemsize - return self.buf.getlength() // self.itemsize + return self.length + return self.buf.getlength() def descr_tobytes(self, space): self._check_released(space) @@ -167,13 +167,20 @@ raise NotImplementedError elif dim == 1: itemsize = self.getitemsize() - return self._tolist(space, buf, buf.getlength() // itemsize, fmt) + return self._tolist(space, buf, self.getlength(), itemsize, fmt, + self.getstrides()) else: return self._tolist_rec(space, buf, 0, 0, fmt) - def _tolist(self, space, buf, count, fmt): + def _tolist(self, space, buf, bytecount, itemsize, fmt, strides=None): # TODO: this probably isn't very fast + count = bytecount // itemsize fmtiter = UnpackFormatIterator(space, buf) + # patch the length, necessary buffer might have offset + # which leads to wrong length calculation if e.g. the + # memoryview is reversed + fmtiter.length = bytecount + fmtiter.strides = strides fmtiter.interpret(fmt * count) return space.newlist(fmtiter.result_w) @@ -188,12 +195,13 @@ # if dim >= self.getndim(): bytecount = (stride * dimshape) - count = bytecount // itemsize - return self._tolist(space, buf, count, fmt) + return self._tolist(space, buf, bytecount, itemsize, fmt, [stride]) items = [None] * dimshape + orig_buf = buf for i in range(dimshape): - item = self._tolist_rec(space, SubBuffer(buf, start, stride), start, idim+1, fmt) + buf = SubBuffer(orig_buf, start, stride) + item = self._tolist_rec(space, buf, start, idim+1, fmt) items[i] = item start += stride @@ -212,23 +220,21 @@ while dim < length: w_obj = w_tuple.getitem(space, dim) index = space.getindex_w(w_obj, space.w_IndexError) - start = self.lookup_dimension(space, start, dim, index) + shape = self.buf.getshape() + strides = self.buf.getstrides() + start = self.lookup_dimension(space, shape, strides, start, dim, index) dim += 1 return start - def lookup_dimension(self, space, start, dim, index): - view = self.buf - shape = view.getshape() - strides = view.getstrides() + def lookup_dimension(self, space, shape, strides, start, dim, index): nitems = shape[dim] if index < 0: index += nitems if index < 0 or index >= nitems: raise oefmt(space.w_IndexError, "index out of bounds on dimension %d", dim+1) - start += strides[dim] * index # TODO suboffsets? - return start + return start + strides[dim] * index def _getitem_tuple_indexed(self, space, w_index): view = self.buf @@ -253,50 +259,65 @@ fmtiter.interpret(fmt) return fmtiter.result_w[0] + def _decode_index(self, space, w_index, is_slice): + shape = self.getshape() + if len(shape) == 0: + count = 1 + else: + count = shape[0] + return space.decode_index4(w_index, count) def descr_getitem(self, space, w_index): self._check_released(space) if space.isinstance_w(w_index, space.w_tuple): return self._getitem_tuple_indexed(space, w_index) - - start, stop, step, size = space.decode_index4(w_index, self.getlength()) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) # ^^^ for a non-slice index, this returns (index, 0, 0, 1) if step == 0: # index only itemsize = self.getitemsize() - if itemsize == 1: - ch = self.buf.getitem(start) - return space.newint(ord(ch)) + dim = self.getndim() + if dim == 0: + raise oefmt(space.w_TypeError, "invalid indexing of 0-dim memory") + elif dim == 1: + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) + if itemsize == 1: + ch = self.buf.getitem(idx) + return space.newint(ord(ch)) + else: + # TODO: this probably isn't very fast + buf = SubBuffer(self.buf, idx, itemsize) + fmtiter = UnpackFormatIterator(space, buf) + fmtiter.length = buf.getlength() + fmtiter.interpret(self.format) + return fmtiter.result_w[0] else: - # TODO: this probably isn't very fast - buf = SubBuffer(self.buf, start*itemsize, itemsize) - fmtiter = UnpackFormatIterator(space, buf) - fmtiter.interpret(self.format) - return fmtiter.result_w[0] - elif step == 1: + raise oefmt(space.w_NotImplementedError, "multi-dimensional sub-views are not implemented") + elif is_slice: mv = W_MemoryView.copy(self) - mv.slice(start, step, size) + mv.init_slice(start, stop, step, slicelength, 0) + mv.init_len() mv._init_flags() return mv + # multi index is handled at the top of this function else: - mv = W_MemoryView.copy(self) - mv.slice(start, step, size) - mv.length = mv.bytecount_from_shape() - mv._init_flags() - return mv + raise TypeError("memoryview: invalid slice key") - def slice(self, start, step, size): + def init_slice(self, start, stop, step, slicelength, dim): # modifies the buffer, shape and stride to allow step to be > 1 + self.strides = strides = self.getstrides()[:] + self.shape = shape = self.getshape()[:] + bytesize = self.getitemsize() * slicelength + self.buf = SubBuffer(self.buf, strides[dim] * start, bytesize) + shape[dim] = slicelength + strides[dim] = strides[dim] * step # TODO subbuffer - strides = self.getstrides()[:] - shape = self.getshape()[:] - itemsize = self.getitemsize() - dim = 0 - self.buf = SubBuffer(self.buf, strides[dim] * start, size*step*itemsize) - shape[dim] = size - strides[dim] = strides[dim] * step - self.strides = strides - self.shape = shape + + def init_len(self): + self.length = self.bytecount_from_shape() def bytecount_from_shape(self): dim = self.getndim() @@ -307,10 +328,9 @@ return length * self.getitemsize() @staticmethod - def copy(view, buf=None): + def copy(view): # TODO suboffsets - if buf == None: - buf = view.buf + buf = view.buf return W_MemoryView(buf, view.getformat(), view.getitemsize(), view.getndim(), view.getshape()[:], view.getstrides()[:]) @@ -321,11 +341,16 @@ if space.isinstance_w(w_index, space.w_tuple): raise oefmt(space.w_NotImplementedError, "") start, stop, step, size = space.decode_index4(w_index, self.getlength()) + is_slice = space.isinstance_w(w_index, space.w_slice) + start, stop, step, slicelength = self._decode_index(space, w_index, is_slice) itemsize = self.getitemsize() if step == 0: # index only + shape = self.getshape() + strides = self.getstrides() + idx = self.lookup_dimension(space, shape, strides, 0, 0, start) if itemsize == 1: ch = getbytevalue(space, w_obj) - self.buf.setitem(start, ch) + self.buf.setitem(idx, ch) else: # TODO: this probably isn't very fast fmtiter = PackFormatIterator(space, [w_obj], itemsize) @@ -335,10 +360,10 @@ raise oefmt(space.w_TypeError, "memoryview: invalid type for format '%s'", self.format) - self.buf.setslice(start * itemsize, fmtiter.result.build()) + self.buf.setslice(idx, fmtiter.result.build()) elif step == 1: value = space.buffer_w(w_obj, space.BUF_CONTIG_RO) - if value.getlength() != size * itemsize: + if value.getlength() != slicelength * itemsize: raise oefmt(space.w_ValueError, "cannot modify size of memoryview object") self.buf.setslice(start * itemsize, value.as_str()) @@ -354,11 +379,11 @@ src = space.buffer_w(w_obj, space.BUF_CONTIG_RO) dst_strides = self.getstrides() dim = 0 - dst = SubBuffer(self.buf, start * itemsize, size * itemsize) + dst = SubBuffer(self.buf, start * itemsize, slicelength * itemsize) src_stride0 = dst_strides[dim] off = 0 - src_shape0 = size + src_shape0 = slicelength src_stride0 = src.getstrides()[0] if isinstance(w_obj, W_MemoryView): src_stride0 = w_obj.getstrides()[0] @@ -373,11 +398,15 @@ def descr_len(self, space): self._check_released(space) - return space.wrap(self.getlength()) + dim = self.getndim() + if dim == 0: + return space.newint(1) + shape = self.getshape() + return space.wrap(shape[0]) def w_get_nbytes(self, space): self._check_released(space) - return space.wrap(self.buf.getlength()) + return space.wrap(self.getlength()) def w_get_format(self, space): self._check_released(space) @@ -385,11 +414,11 @@ def w_get_itemsize(self, space): self._check_released(space) - return space.wrap(self.itemsize) + return space.wrap(self.getitemsize()) def w_get_ndim(self, space): self._check_released(space) - return space.wrap(self.buf.getndim()) + return space.wrap(self.getndim()) def w_is_readonly(self, space): self._check_released(space) @@ -397,13 +426,13 @@ def w_get_shape(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getshape()]) def w_get_strides(self, space): self._check_released(space) - if self.buf.getndim() == 0: + if self.getndim() == 0: return space.w_None return space.newtuple([space.wrap(x) for x in self.getstrides()]) @@ -615,8 +644,6 @@ return None def _cast_to_ND(self, space, shape, ndim): - buf = self.buf - self.ndim = ndim length = self.itemsize if ndim == 0: diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -1,4 +1,5 @@ import py +import pytest import struct from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app @@ -42,13 +43,13 @@ def test_extended_slice(self): data = bytearray(b'abcefg') v = memoryview(data) - w = v[0:2:2] # failing for now: NotImplementedError + w = v[0:2:2] assert len(w) == 1 assert list(w) == [97] v[::2] = b'ABC' assert data == bytearray(eval("b'AbBeCg'")) - assert v[::2] == b'ABC' - assert v[::-2] == b'geb' + assert v[::2].tobytes() == b'ABC' + assert v[::-2].tobytes() == b'geb' def test_memoryview_attrs(self): v = memoryview(b"a"*100) @@ -409,3 +410,33 @@ v = view.cast('h', shape=(3,2)) assert v.tolist() == [[2,3],[4,5],[6,7]] raises(TypeError, "view.cast('h', shape=(3,3))") + + def test_reversed(self): + bytes = b"\x01\x01\x02\x02\x03\x03" + view = memoryview(bytes) + revlist = list(reversed(view.tolist())) + assert view[::-1][0] == 3 + assert view[::-1][1] == 3 + assert view[::-1][2] == 2 + assert view[::-1][3] == 2 + assert view[::-1][4] == 1 + assert view[::-1][5] == 1 + assert view[::-1][-1] == 1 + assert view[::-1][-2] == 1 + assert list(reversed(view)) == revlist + assert list(reversed(view)) == view[::-1].tolist() + +class AppTestMemoryViewReversed(object): + spaceconfig = dict(usemodules=['array']) + def test_reversed_non_bytes(self): + import array + items = [1,2,3,9,7,5] + formats = ['h'] + for fmt in formats: + bytes = array.array(fmt, items) + view = memoryview(bytes) + bview = view.cast('b') + rview = bview.cast(fmt, shape=(2,3)) + raises(NotImplementedError, list, reversed(rview)) + assert rview.tolist() == [[1,2,3],[9,7,5]] + assert rview[::-1].tolist() == [[9,7,5], [1,2,3]] diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -667,6 +667,11 @@ b.abc = "awesomer" assert b.abc == "awesomer" + def test_bad_slots(self): + raises(TypeError, type, 'A', (), {'__slots__': b'x'}) + raises(TypeError, type, 'A', (), {'__slots__': 42}) + raises(TypeError, type, 'A', (), {'__slots__': '2_x'}) + def test_base_attr(self): # check the '__base__' class A(object): @@ -936,6 +941,22 @@ else: assert False + def test_qualname(self): + A = type('A', (), {'__qualname__': 'B.C'}) + assert A.__name__ == 'A' + assert A.__qualname__ == 'B.C' + raises(TypeError, type, 'A', (), {'__qualname__': b'B'}) + assert A.__qualname__ == 'B.C' + + A.__qualname__ = 'D.E' + assert A.__name__ == 'A' + assert A.__qualname__ == 'D.E' + + C = type('C', (), {}) + C.__name__ = 'A' + assert C.__name__ == 'A' + assert C.__qualname__ == 'C' + def test_compare(self): class A(object): pass diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -166,7 +166,7 @@ overridetypedef=None, force_new_layout=False): self.space = space self.name = name - self.qualname = None + self.qualname = name.decode('utf-8') self.bases_w = bases_w self.dict_w = dict_w self.hasdict = False @@ -545,7 +545,7 @@ return result.decode('utf-8') def getqualname(self, space): - return self.qualname or self.getname(space) + return self.qualname def add_subclass(self, w_subclass): space = self.space @@ -1057,6 +1057,14 @@ w_self.weakrefable = w_self.weakrefable or w_base.weakrefable return hasoldstylebase +def slot_w(space, w_name): + from pypy.objspace.std.unicodeobject import _isidentifier + if not space.isinstance_w(w_name, space.w_text): + raise oefmt(space.w_TypeError, + "__slots__ items must be strings, not '%T'", w_name) + if not _isidentifier(w_name._value): + raise oefmt(space.w_TypeError, "__slots__ must be identifiers") + return w_name.identifier_w(space) def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): from pypy.objspace.std.listobject import StringSort @@ -1073,13 +1081,12 @@ wantdict = False wantweakref = False w_slots = dict_w['__slots__'] - if (space.isinstance_w(w_slots, space.w_str) or - space.isinstance_w(w_slots, space.w_unicode)): + if space.isinstance_w(w_slots, space.w_text): slot_names_w = [w_slots] else: slot_names_w = space.unpackiterable(w_slots) for w_slot_name in slot_names_w: - slot_name = space.str_w(w_slot_name) + slot_name = slot_w(space, w_slot_name) if slot_name == '__dict__': if wantdict or w_bestbase.hasdict: raise oefmt(space.w_TypeError, @@ -1124,8 +1131,6 @@ def create_slot(w_self, slot_name, index_next_extra_slot): space = w_self.space - if not valid_slot_name(slot_name): - raise oefmt(space.w_TypeError, "__slots__ must be identifiers") # create member slot_name = mangle(slot_name, w_self.name) if slot_name not in w_self.dict_w: @@ -1156,14 +1161,6 @@ w_self.space.wrap(weakref_descr)) w_self.weakrefable = True -def valid_slot_name(slot_name): - if len(slot_name) == 0 or slot_name[0].isdigit(): - return False - for c in slot_name: - if not c.isalnum() and c != '_': - return False - return True - def setup_user_defined_type(w_self, force_new_layout): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py --- a/pypy/tool/build_cffi_imports.py +++ b/pypy/tool/build_cffi_imports.py @@ -18,6 +18,7 @@ "lzma": "_lzma_build.py", "_decimal": "_decimal_build.py", "ssl": "_ssl_build.py", + # hashlib does not need to be built! It uses API calls from ssl "xx": None, # for testing: 'None' should be completely ignored } From pypy.commits at gmail.com Wed Dec 7 11:07:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 08:07:44 -0800 (PST) Subject: [pypy-commit] pypy default: update project idea list (jitviewer) Message-ID: <584833d0.6737c20a.352bb.ea96@mx.google.com> Author: Richard Plangger Branch: Changeset: r88954:3a73ffbe71c7 Date: 2016-12-07 17:07 +0100 http://bitbucket.org/pypy/pypy/changeset/3a73ffbe71c7/ Log: update project idea list (jitviewer) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -71,8 +71,11 @@ Analyzing performance of applications is always tricky. We have various tools, for example a `jitviewer`_ that help us analyze performance. -The jitviewer shows the code generated by the PyPy JIT in a hierarchical way, -as shown by the screenshot below: +The old tool was partly rewritten and combined with vmprof. The service is +hosted at `vmprof.com`_. + +The following shows an old image of the jitviewer. +The code generated by the PyPy JIT in a hierarchical way: - at the bottom level, it shows the Python source code of the compiled loops @@ -84,13 +87,17 @@ .. image:: image/jitviewer.png -The jitviewer is a web application based on flask and jinja2 (and jQuery on -the client): if you have great web developing skills and want to help PyPy, +The jitviewer is a web application based on django and angularjs: +if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep -knowledge of the internals. +knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and +`vmprof-integration`_ to find open issues and documentation. -.. _jitviewer: http://bitbucket.org/pypy/jitviewer - +.. _jitviewer: http://vmprof.com +.. _vmprof.com: http://vmprof.com +.. _vmprof-python: https://github.com/vmprof/vmprof-python +.. _vmprof-server: https://github.com/vmprof/vmprof-server +.. _vmprof-integration: https://github.com/vmprof/vmprof-integration Optimized Unicode Representation -------------------------------- From pypy.commits at gmail.com Wed Dec 7 11:32:05 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 07 Dec 2016 08:32:05 -0800 (PST) Subject: [pypy-commit] pypy py3.5: translation fix, w_name._value would deduce that the attr _value could also be SomeString not only SomeUnicodeString Message-ID: <58483985.913fc20a.ca4e8.0dbf@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88955:aad77a92e084 Date: 2016-12-07 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/aad77a92e084/ Log: translation fix, w_name._value would deduce that the attr _value could also be SomeString not only SomeUnicodeString diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1062,7 +1062,7 @@ if not space.isinstance_w(w_name, space.w_text): raise oefmt(space.w_TypeError, "__slots__ items must be strings, not '%T'", w_name) - if not _isidentifier(w_name._value): + if not _isidentifier(space.unicode_w(w_name)): raise oefmt(space.w_TypeError, "__slots__ must be identifiers") return w_name.identifier_w(space) From pypy.commits at gmail.com Wed Dec 7 12:16:46 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 07 Dec 2016 09:16:46 -0800 (PST) Subject: [pypy-commit] cffi default: Windows fixes in the example Message-ID: <584843fe.e6b0c20a.2bab1.0af5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2827:adbbfbe13351 Date: 2016-12-07 18:16 +0100 http://bitbucket.org/cffi/cffi/changeset/adbbfbe13351/ Log: Windows fixes in the example diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -47,6 +47,20 @@ typedef struct { int x, y; } point_t; extern int do_stuff(point_t *); +.. code-block:: c + + /* file plugin.h, Windows-friendly version */ + typedef struct { int x, y; } point_t; + + /* When including this file from ffibuilder.set_source(), + this macro is defined to __declspec(dllexport). When + including this file directly from your C program, we + define it to __declspec(dllimport) instead. */ + #ifndef CFFI_DLLEXPORT + # define CFFI_DLLEXPORT __declspec(dllimport) + #endif + CFFI_DLLEXPORT int do_stuff(point_t *); + .. code-block:: python # file plugin_build.py @@ -54,7 +68,11 @@ ffibuilder = cffi.FFI() with open('plugin.h') as f: - ffibuilder.embedding_api(f.read()) + # read plugin.h and pass it to embedding_api(), manually + # removing the '#' directives and the CFFI_DLLEXPORT + data = ''.join([line for line in f if not line.startswith('#')]) + data = data.replace('CFFI_DLLEXPORT', '') + ffibuilder.embedding_api(data) ffibuilder.set_source("my_plugin", r''' #include "plugin.h" From pypy.commits at gmail.com Wed Dec 7 13:08:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 10:08:24 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix qualname computation Message-ID: <58485018.031f1c0a.54932.b40e@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88956:dd254ac1fd69 Date: 2016-12-07 18:07 +0000 http://bitbucket.org/pypy/pypy/changeset/dd254ac1fd69/ Log: Fix qualname computation diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -166,7 +166,7 @@ overridetypedef=None, force_new_layout=False): self.space = space self.name = name - self.qualname = name.decode('utf-8') + self.qualname = None self.bases_w = bases_w self.dict_w = dict_w self.hasdict = False @@ -1181,6 +1181,8 @@ w_qualname = w_self.dict_w.pop('__qualname__', None) if w_qualname is not None: w_self.qualname = w_self.space.unicode_w(w_qualname) + else: + w_self.qualname = w_self.getname(w_self.space) ensure_common_attributes(w_self) return layout @@ -1189,6 +1191,7 @@ w_self.hasdict = instancetypedef.hasdict w_self.weakrefable = instancetypedef.weakrefable w_self.w_doc = w_self.space.wrap(instancetypedef.doc) + w_self.qualname = w_self.getname(w_self.space) ensure_common_attributes(w_self) w_self.flag_heaptype = instancetypedef.heaptype # From pypy.commits at gmail.com Wed Dec 7 13:19:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 10:19:42 -0800 (PST) Subject: [pypy-commit] pypy py3.5: TypeError -> ValueError here too Message-ID: <584852be.0a74c20a.f20e7.2de5@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88957:6c18e63ebf39 Date: 2016-12-07 18:19 +0000 http://bitbucket.org/pypy/pypy/changeset/6c18e63ebf39/ Log: TypeError -> ValueError here too diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -378,7 +378,7 @@ """Helper for pow""" if iw < 0: if iz != 0: - raise oefmt(space.w_TypeError, + raise oefmt(space.w_ValueError, "pow() 2nd argument cannot be negative when 3rd " "argument specified") # bounce it, since it always returns float From pypy.commits at gmail.com Wed Dec 7 16:27:02 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 07 Dec 2016 13:27:02 -0800 (PST) Subject: [pypy-commit] pypy default: merge issue2446 into default, fixes missing __doc__ attribute on PyObjects Message-ID: <58487ea6.c9b3c20a.3c7fe.783e@mx.google.com> Author: Matti Picus Branch: Changeset: r88958:d2d0200b99a6 Date: 2016-12-07 20:41 +0200 http://bitbucket.org/pypy/pypy/changeset/d2d0200b99a6/ Log: merge issue2446 into default, fixes missing __doc__ attribute on PyObjects diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -142,7 +142,7 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) - def test_multiple_inheritance(self): + def test_multiple_inheritance1(self): module = self.import_module(name='foo') obj = module.UnicodeSubtype(u'xyz') obj2 = module.UnicodeSubtype2() @@ -422,7 +422,7 @@ assert space.int_w(space.getattr(w_class, w_name)) == 1 space.delitem(w_dict, w_name) - def test_multiple_inheritance(self, space, api): + def test_multiple_inheritance2(self, space, api): w_class = space.appexec([], """(): class A(object): pass @@ -1167,3 +1167,43 @@ __metaclass__ = FooType print repr(X) X() + + def test_multiple_inheritance3(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + obj = PyObject_New(PyObject, &Foo12_Type); + return obj; + ''' + )], prologue=''' + static PyTypeObject Foo1_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo1", + }; + static PyTypeObject Foo2_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo2", + }; + static PyTypeObject Foo12_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "foo.foo12", + }; + static char doc[]="The foo12 object"; + ''', more_init = ''' + Foo1_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Foo2_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Foo12_Type.tp_flags = Py_TPFLAGS_DEFAULT; + Foo12_Type.tp_base = &Foo1_Type; + Foo12_Type.tp_doc = doc; + Foo12_Type.tp_bases = PyTuple_Pack(2, &Foo1_Type, &Foo2_Type); + if (PyType_Ready(&Foo1_Type) < 0) INITERROR; + if (PyType_Ready(&Foo2_Type) < 0) INITERROR; + if (PyType_Ready(&Foo12_Type) < 0) INITERROR; + ''') + obj = module.new_obj() + assert 'foo.foo12' in str(obj) + assert type(obj).__doc__ == "The foo12 object" + assert obj.__doc__ == "The foo12 object" + + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -327,6 +327,8 @@ w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func, wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = space.wrap(w_obj) + if pto.c_tp_doc: + dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) From pypy.commits at gmail.com Wed Dec 7 16:27:04 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 07 Dec 2016 13:27:04 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <58487ea8.113cc20a.638ff.7ae1@mx.google.com> Author: Matti Picus Branch: Changeset: r88959:45d8737398c9 Date: 2016-12-07 22:21 +0200 http://bitbucket.org/pypy/pypy/changeset/45d8737398c9/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -40,3 +40,8 @@ Refactor FunctionDesc.specialize() and related code (RPython annotator). .. branch: raw-calloc + +.. branch: issue2446 + +Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key +so it will be picked up by app-level objects of that type From pypy.commits at gmail.com Wed Dec 7 16:27:06 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 07 Dec 2016 13:27:06 -0800 (PST) Subject: [pypy-commit] pypy default: test, fix for issue #2245 - unecessarily creating new layouts for PyHeapTypeObjjects Message-ID: <58487eaa.c19d1c0a.b76b3.01c9@mx.google.com> Author: Matti Picus Branch: Changeset: r88960:c95db94fc6a8 Date: 2016-12-07 23:26 +0200 http://bitbucket.org/pypy/pypy/changeset/c95db94fc6a8/ Log: test, fix for issue #2245 - unecessarily creating new layouts for PyHeapTypeObjjects diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1173,37 +1173,32 @@ ("new_obj", "METH_NOARGS", ''' PyObject *obj; - obj = PyObject_New(PyObject, &Foo12_Type); + PyTypeObject *Base1, *Base2, *Base12; + Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base1->tp_name = "Base1"; + Base2->tp_name = "Base2"; + Base12->tp_name = "Base12"; + Base1->tp_basicsize = sizeof(PyHeapTypeObject); + Base2->tp_basicsize = sizeof(PyHeapTypeObject); + Base12->tp_basicsize = sizeof(PyHeapTypeObject); + Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_base = Base1; + Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); + Base12->tp_doc = "The Base12 type or object"; + if (PyType_Ready(Base1) < 0) return NULL; + if (PyType_Ready(Base2) < 0) return NULL; + if (PyType_Ready(Base12) < 0) return NULL; + obj = PyObject_New(PyObject, Base12); return obj; ''' - )], prologue=''' - static PyTypeObject Foo1_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - "foo.foo1", - }; - static PyTypeObject Foo2_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - "foo.foo2", - }; - static PyTypeObject Foo12_Type = { - PyVarObject_HEAD_INIT(NULL, 0) - "foo.foo12", - }; - static char doc[]="The foo12 object"; - ''', more_init = ''' - Foo1_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; - Foo2_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; - Foo12_Type.tp_flags = Py_TPFLAGS_DEFAULT; - Foo12_Type.tp_base = &Foo1_Type; - Foo12_Type.tp_doc = doc; - Foo12_Type.tp_bases = PyTuple_Pack(2, &Foo1_Type, &Foo2_Type); - if (PyType_Ready(&Foo1_Type) < 0) INITERROR; - if (PyType_Ready(&Foo2_Type) < 0) INITERROR; - if (PyType_Ready(&Foo12_Type) < 0) INITERROR; - ''') + )]) obj = module.new_obj() - assert 'foo.foo12' in str(obj) - assert type(obj).__doc__ == "The foo12 object" - assert obj.__doc__ == "The foo12 object" + assert 'Base12' in str(obj) + assert type(obj).__doc__ == "The Base12 type or object" + assert obj.__doc__ == "The Base12 type or object" diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -465,13 +465,17 @@ convert_member_defs(space, dict_w, pto.c_tp_members, self) name = rffi.charp2str(pto.c_tp_name) - new_layout = (pto.c_tp_basicsize > rffi.sizeof(PyObject.TO) or - pto.c_tp_itemsize > 0) + flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + if flag_heaptype: + minsize = rffi.sizeof(PyHeapTypeObject.TO) + else: + minsize = rffi.sizeof(PyObject.TO) + new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0) W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w, force_new_layout=new_layout) self.flag_cpytype = True - self.flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + self.flag_heaptype = flag_heaptype # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: self.flag_map_or_seq = 'S' From pypy.commits at gmail.com Wed Dec 7 19:22:58 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 16:22:58 -0800 (PST) Subject: [pypy-commit] pypy controller-refactor: Don't mix interp-level and translator-level code in the same class: move all ctrl_foo methods to obviously annotator-related objects Message-ID: <5848a7e2.41a3c20a.7de65.932e@mx.google.com> Author: Ronan Lamy Branch: controller-refactor Changeset: r88961:5aff3d2cbe9c Date: 2016-12-07 05:18 +0000 http://bitbucket.org/pypy/pypy/changeset/5aff3d2cbe9c/ Log: Don't mix interp-level and translator-level code in the same class: move all ctrl_foo methods to obviously annotator-related objects diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -10,7 +10,14 @@ def compute_result_annotation(self, *args_s, **kwds_s): controller = self.getcontroller(*args_s, **kwds_s) - return controller.ctrl_new_ex(self.bookkeeper, *args_s, **kwds_s) + if kwds_s: + raise TypeError("cannot handle keyword arguments in %s" % ( + self.new,)) + s_real_obj = delegate(controller.new, *args_s) + if s_real_obj == annmodel.s_ImpossibleValue: + return annmodel.s_ImpossibleValue + else: + return SomeControlledInstance(s_real_obj, controller) def getcontroller(self, *args_s, **kwds_s): return self._controller_() @@ -65,19 +72,6 @@ return controlled_instance_is_box(self, obj) is_box._annspecialcase_ = 'specialize:arg(0)' - def ctrl_new(self, *args_s, **kwds_s): - if kwds_s: - raise TypeError("cannot handle keyword arguments in %s" % ( - self.new,)) - s_real_obj = delegate(self.new, *args_s) - if s_real_obj == annmodel.s_ImpossibleValue: - return annmodel.s_ImpossibleValue - else: - return SomeControlledInstance(s_real_obj, controller=self) - - def ctrl_new_ex(self, bookkeeper, *args_s, **kwds_s): - return self.ctrl_new(*args_s, **kwds_s) - def rtype_new(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.new, hop, revealargs=[], revealresult=True) @@ -86,9 +80,6 @@ return getattr(self, 'get_' + attr)(obj) getattr._annspecialcase_ = 'specialize:arg(0, 2)' - def ctrl_getattr(self, s_obj, s_attr): - return delegate(self.getattr, s_obj, s_attr) - def rtype_getattr(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.getattr, hop) @@ -97,44 +88,26 @@ return getattr(self, 'set_' + attr)(obj, value) setattr._annspecialcase_ = 'specialize:arg(0, 2)' - def ctrl_setattr(self, s_obj, s_attr, s_value): - return delegate(self.setattr, s_obj, s_attr, s_value) - def rtype_setattr(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.setattr, hop) - def ctrl_getitem(self, s_obj, s_key): - return delegate(self.getitem, s_obj, s_key) - def rtype_getitem(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.getitem, hop) - def ctrl_setitem(self, s_obj, s_key, s_value): - return delegate(self.setitem, s_obj, s_key, s_value) - def rtype_setitem(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.setitem, hop) - def ctrl_delitem(self, s_obj, s_key): - return delegate(self.delitem, s_obj, s_key) - def rtype_delitem(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.delitem, hop) - def ctrl_bool(self, s_obj): - return delegate(self.bool, s_obj) - def rtype_bool(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.bool, hop) - def ctrl_call(self, s_obj, *args_s): - return delegate(self.call, s_obj, *args_s) - def rtype_call(self, hop): from rpython.rtyper.rcontrollerentry import rtypedelegate return rtypedelegate(self.call, hop) @@ -226,34 +199,34 @@ real_key = self.s_real_obj.rtyper_makekey() return self.__class__, real_key, self.controller + def getattr(self, s_attr): + assert s_attr.is_constant() + ctrl = self.controller + return delegate(ctrl.getattr, self.s_real_obj, s_attr) -class __extend__(SomeControlledInstance): + def setattr(self, s_attr, s_value): + assert s_attr.is_constant() + ctrl = self.controller + return delegate(ctrl.setattr, self.s_real_obj, s_attr, s_value) - def getattr(s_cin, s_attr): - assert s_attr.is_constant() - return s_cin.controller.ctrl_getattr(s_cin.s_real_obj, s_attr) + def bool(self): + ctrl = self.controller + return delegate(ctrl.bool, self.s_real_obj) - def setattr(s_cin, s_attr, s_value): - assert s_attr.is_constant() - s_cin.controller.ctrl_setattr(s_cin.s_real_obj, s_attr, s_value) - - def bool(s_cin): - return s_cin.controller.ctrl_is_true(s_cin.s_real_obj) - - def simple_call(s_cin, *args_s): - return s_cin.controller.ctrl_call(s_cin.s_real_obj, *args_s) + def simple_call(self, *args_s): + return delegate(self.controller.call, self.s_real_obj, *args_s) class __extend__(pairtype(SomeControlledInstance, annmodel.SomeObject)): def getitem((s_cin, s_key)): - return s_cin.controller.ctrl_getitem(s_cin.s_real_obj, s_key) + return delegate(s_cin.controller.getitem, s_cin.s_real_obj, s_key) def setitem((s_cin, s_key), s_value): - s_cin.controller.ctrl_setitem(s_cin.s_real_obj, s_key, s_value) + delegate(s_cin.controller.setitem, s_cin.s_real_obj, s_key, s_value) def delitem((s_cin, s_key)): - s_cin.controller.ctrl_delitem(s_cin.s_real_obj, s_key) + delegate(s_cin.controller.delitem, s_cin.s_real_obj, s_key) class __extend__(pairtype(SomeControlledInstance, SomeControlledInstance)): From pypy.commits at gmail.com Wed Dec 7 19:23:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 16:23:00 -0800 (PST) Subject: [pypy-commit] pypy controller-refactor: kill Controller.rtype_* methods Message-ID: <5848a7e4.d5091c0a.fdf1e.2f72@mx.google.com> Author: Ronan Lamy Branch: controller-refactor Changeset: r88962:82b3d304854c Date: 2016-12-07 11:48 +0000 http://bitbucket.org/pypy/pypy/changeset/82b3d304854c/ Log: kill Controller.rtype_* methods diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -23,12 +23,14 @@ return self._controller_() def specialize_call(self, hop, **kwds_i): + from rpython.rtyper.rcontrollerentry import rtypedelegate if hop.s_result == annmodel.s_ImpossibleValue: raise TyperError("object creation always raises: %s" % ( hop.spaceop,)) + assert not kwds_i controller = hop.s_result.controller - return controller.rtype_new(hop, **kwds_i) - + return rtypedelegate(controller.new, hop, revealargs=[], + revealresult=True) def controlled_instance_box(controller, obj): @@ -72,46 +74,14 @@ return controlled_instance_is_box(self, obj) is_box._annspecialcase_ = 'specialize:arg(0)' - def rtype_new(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.new, hop, revealargs=[], revealresult=True) - def getattr(self, obj, attr): return getattr(self, 'get_' + attr)(obj) getattr._annspecialcase_ = 'specialize:arg(0, 2)' - def rtype_getattr(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.getattr, hop) - def setattr(self, obj, attr, value): return getattr(self, 'set_' + attr)(obj, value) setattr._annspecialcase_ = 'specialize:arg(0, 2)' - def rtype_setattr(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.setattr, hop) - - def rtype_getitem(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.getitem, hop) - - def rtype_setitem(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.setitem, hop) - - def rtype_delitem(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.delitem, hop) - - def rtype_bool(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.bool, hop) - - def rtype_call(self, hop): - from rpython.rtyper.rcontrollerentry import rtypedelegate - return rtypedelegate(self.call, hop) - def delegate(boundmethod, *args_s): bk = getbookkeeper() diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py --- a/rpython/rtyper/rcontrollerentry.py +++ b/rpython/rtyper/rcontrollerentry.py @@ -24,28 +24,28 @@ return self.s_real_obj, self.r_real_obj def rtype_getattr(self, hop): - return self.controller.rtype_getattr(hop) + return rtypedelegate(self.controller.getattr, hop) def rtype_setattr(self, hop): - return self.controller.rtype_setattr(hop) + return rtypedelegate(self.controller.setattr, hop) def rtype_bool(self, hop): - return self.controller.rtype_bool(hop) + return rtypedelegate(self.controller.bool, hop) def rtype_simple_call(self, hop): - return self.controller.rtype_call(hop) + return rtypedelegate(self.controller.call, hop) class __extend__(pairtype(ControlledInstanceRepr, Repr)): def rtype_getitem((r_controlled, r_key), hop): - return r_controlled.controller.rtype_getitem(hop) + return rtypedelegate(r_controlled.controller.getitem, hop) def rtype_setitem((r_controlled, r_key), hop): - return r_controlled.controller.rtype_setitem(hop) + return rtypedelegate(r_controlled.controller.setitem, hop) def rtype_delitem((r_controlled, r_key), hop): - return r_controlled.controller.rtype_delitem(hop) + return rtypedelegate(r_controlled.controller.delitem, hop) def rtypedelegate(callable, hop, revealargs=[0], revealresult=False): From pypy.commits at gmail.com Wed Dec 7 19:23:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 07 Dec 2016 16:23:02 -0800 (PST) Subject: [pypy-commit] pypy controller-refactor: Merge rcontrollerentry into controllerentry Message-ID: <5848a7e6.d32f1c0a.700bb.2f3d@mx.google.com> Author: Ronan Lamy Branch: controller-refactor Changeset: r88963:68adf38ffa01 Date: 2016-12-08 00:21 +0000 http://bitbucket.org/pypy/pypy/changeset/68adf38ffa01/ Log: Merge rcontrollerentry into controllerentry diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -1,6 +1,9 @@ +from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.tool.pairtype import pairtype from rpython.annotator.bookkeeper import getbookkeeper +from rpython.rtyper.rmodel import Repr from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.annlowlevel import cachedtype from rpython.rtyper.error import TyperError @@ -23,7 +26,6 @@ return self._controller_() def specialize_call(self, hop, **kwds_i): - from rpython.rtyper.rcontrollerentry import rtypedelegate if hop.s_result == annmodel.s_ImpossibleValue: raise TyperError("object creation always raises: %s" % ( hop.spaceop,)) @@ -101,7 +103,6 @@ return SomeControlledInstance(s_real_obj, controller=controller) def specialize_call(self, hop): - from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr if not isinstance(hop.r_result, ControlledInstanceRepr): raise TyperError("box() should return ControlledInstanceRepr,\n" "got %r" % (hop.r_result,)) @@ -119,7 +120,6 @@ return s_obj.s_real_obj def specialize_call(self, hop): - from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr if not isinstance(hop.args_r[1], ControlledInstanceRepr): raise TyperError("unbox() should take a ControlledInstanceRepr,\n" "got %r" % (hop.args_r[1],)) @@ -162,7 +162,6 @@ return SomeControlledInstance(self.s_real_obj, self.controller) def rtyper_makerepr(self, rtyper): - from rpython.rtyper.rcontrollerentry import ControlledInstanceRepr return ControlledInstanceRepr(rtyper, self.s_real_obj, self.controller) def rtyper_makekey(self): @@ -207,3 +206,75 @@ return SomeControlledInstance(annmodel.unionof(s_cin1.s_real_obj, s_cin2.s_real_obj), s_cin1.controller) + +class ControlledInstanceRepr(Repr): + + def __init__(self, rtyper, s_real_obj, controller): + self.rtyper = rtyper + self.s_real_obj = s_real_obj + self.r_real_obj = rtyper.getrepr(s_real_obj) + self.controller = controller + self.lowleveltype = self.r_real_obj.lowleveltype + + def convert_const(self, value): + real_value = self.controller.convert(value) + return self.r_real_obj.convert_const(real_value) + + def reveal(self, r): + if r is not self: + raise TyperError("expected %r, got %r" % (self, r)) + return self.s_real_obj, self.r_real_obj + + def rtype_getattr(self, hop): + return rtypedelegate(self.controller.getattr, hop) + + def rtype_setattr(self, hop): + return rtypedelegate(self.controller.setattr, hop) + + def rtype_bool(self, hop): + return rtypedelegate(self.controller.bool, hop) + + def rtype_simple_call(self, hop): + return rtypedelegate(self.controller.call, hop) + + +class __extend__(pairtype(ControlledInstanceRepr, Repr)): + + def rtype_getitem((r_controlled, r_key), hop): + return rtypedelegate(r_controlled.controller.getitem, hop) + + def rtype_setitem((r_controlled, r_key), hop): + return rtypedelegate(r_controlled.controller.setitem, hop) + + def rtype_delitem((r_controlled, r_key), hop): + return rtypedelegate(r_controlled.controller.delitem, hop) + + +def rtypedelegate(callable, hop, revealargs=[0], revealresult=False): + bk = hop.rtyper.annotator.bookkeeper + c_meth = Constant(callable) + s_meth = bk.immutablevalue(callable) + hop2 = hop.copy() + for index in revealargs: + r_controlled = hop2.args_r[index] + if not isinstance(r_controlled, ControlledInstanceRepr): + raise TyperError("args_r[%d] = %r, expected ControlledInstanceRepr" + % (index, r_controlled)) + s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj + hop2.args_s[index], hop2.args_r[index] = s_new, r_new + v = hop2.args_v[index] + if isinstance(v, Constant): + real_value = r_controlled.controller.convert(v.value) + hop2.args_v[index] = Constant(real_value) + if revealresult: + r_controlled = hop2.r_result + if not isinstance(r_controlled, ControlledInstanceRepr): + raise TyperError("r_result = %r, expected ControlledInstanceRepr" + % (r_controlled,)) + s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj + hop2.s_result, hop2.r_result = s_new, r_new + hop2.v_s_insertfirstarg(c_meth, s_meth) + spaceop = op.simple_call(*hop2.args_v) + spaceop.result = hop2.spaceop.result + hop2.spaceop = spaceop + return hop2.dispatch() diff --git a/rpython/rtyper/rcontrollerentry.py b/rpython/rtyper/rcontrollerentry.py deleted file mode 100644 --- a/rpython/rtyper/rcontrollerentry.py +++ /dev/null @@ -1,78 +0,0 @@ -from rpython.flowspace.model import Constant -from rpython.flowspace.operation import op -from rpython.rtyper.error import TyperError -from rpython.rtyper.rmodel import Repr -from rpython.tool.pairtype import pairtype - - -class ControlledInstanceRepr(Repr): - - def __init__(self, rtyper, s_real_obj, controller): - self.rtyper = rtyper - self.s_real_obj = s_real_obj - self.r_real_obj = rtyper.getrepr(s_real_obj) - self.controller = controller - self.lowleveltype = self.r_real_obj.lowleveltype - - def convert_const(self, value): - real_value = self.controller.convert(value) - return self.r_real_obj.convert_const(real_value) - - def reveal(self, r): - if r is not self: - raise TyperError("expected %r, got %r" % (self, r)) - return self.s_real_obj, self.r_real_obj - - def rtype_getattr(self, hop): - return rtypedelegate(self.controller.getattr, hop) - - def rtype_setattr(self, hop): - return rtypedelegate(self.controller.setattr, hop) - - def rtype_bool(self, hop): - return rtypedelegate(self.controller.bool, hop) - - def rtype_simple_call(self, hop): - return rtypedelegate(self.controller.call, hop) - - -class __extend__(pairtype(ControlledInstanceRepr, Repr)): - - def rtype_getitem((r_controlled, r_key), hop): - return rtypedelegate(r_controlled.controller.getitem, hop) - - def rtype_setitem((r_controlled, r_key), hop): - return rtypedelegate(r_controlled.controller.setitem, hop) - - def rtype_delitem((r_controlled, r_key), hop): - return rtypedelegate(r_controlled.controller.delitem, hop) - - -def rtypedelegate(callable, hop, revealargs=[0], revealresult=False): - bk = hop.rtyper.annotator.bookkeeper - c_meth = Constant(callable) - s_meth = bk.immutablevalue(callable) - hop2 = hop.copy() - for index in revealargs: - r_controlled = hop2.args_r[index] - if not isinstance(r_controlled, ControlledInstanceRepr): - raise TyperError("args_r[%d] = %r, expected ControlledInstanceRepr" - % (index, r_controlled)) - s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj - hop2.args_s[index], hop2.args_r[index] = s_new, r_new - v = hop2.args_v[index] - if isinstance(v, Constant): - real_value = r_controlled.controller.convert(v.value) - hop2.args_v[index] = Constant(real_value) - if revealresult: - r_controlled = hop2.r_result - if not isinstance(r_controlled, ControlledInstanceRepr): - raise TyperError("r_result = %r, expected ControlledInstanceRepr" - % (r_controlled,)) - s_new, r_new = r_controlled.s_real_obj, r_controlled.r_real_obj - hop2.s_result, hop2.r_result = s_new, r_new - hop2.v_s_insertfirstarg(c_meth, s_meth) - spaceop = op.simple_call(*hop2.args_v) - spaceop.result = hop2.spaceop.result - hop2.spaceop = spaceop - return hop2.dispatch() From pypy.commits at gmail.com Thu Dec 8 04:37:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 01:37:53 -0800 (PST) Subject: [pypy-commit] pypy py3.5: rename parameter name to hash_name on the pbkdf2_hmac method Message-ID: <584929f1.212dc20a.b3dfa.313f@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88964:2c2213836631 Date: 2016-12-08 10:36 +0100 http://bitbucket.org/pypy/pypy/changeset/2c2213836631/ Log: rename parameter name to hash_name on the pbkdf2_hmac method diff --git a/lib_pypy/_hashlib/__init__.py b/lib_pypy/_hashlib/__init__.py --- a/lib_pypy/_hashlib/__init__.py +++ b/lib_pypy/_hashlib/__init__.py @@ -142,12 +142,10 @@ globals()[_newname] = make_new_hash(_name, _newname) if hasattr(lib, 'PKCS5_PBKDF2_HMAC'): - #@unwrap_spec(name=str, password='bytes', salt='bytes', iterations=int, - # w_dklen=WrappedDefault(None)) - def pbkdf2_hmac(name, password, salt, iterations, dklen=None): - if not isinstance(name, str): - raise TypeError("expected 'str' for name, but got %s" % type(name)) - c_name = _str_to_ffi_buffer(name) + def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None): + if not isinstance(hash_name, str): + raise TypeError("expected 'str' for name, but got %s" % type(hash_name)) + c_name = _str_to_ffi_buffer(hash_name) digest = lib.EVP_get_digestbyname(c_name) if digest == ffi.NULL: raise ValueError("unsupported hash type") From pypy.commits at gmail.com Thu Dec 8 05:23:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 08 Dec 2016 02:23:04 -0800 (PST) Subject: [pypy-commit] cffi default: Document issue #295 Message-ID: <58493488.c220c20a.5beea.3d7b@mx.google.com> Author: Armin Rigo Branch: Changeset: r2828:dd328541d211 Date: 2016-12-08 11:22 +0100 http://bitbucket.org/cffi/cffi/changeset/dd328541d211/ Log: Document issue #295 diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,19 @@ ====================== +v1.9.2 +====== + +* Issue #295: use calloc() directly instead of + PyObject_Malloc()+memset() to handle ffi.new() with a default + allocator. Speeds up ``ffi.new(large-array)`` where most of the time + you never touch most of the array. (But avoid doing that too often: + on 32-bit PyPy it will quickly exhaust the address space. This case + is best handled by explicit calls to calloc() and free().) + +* some OS/X build fixes ("only with Xcode but without CLT"). + + v1.9 ==== From pypy.commits at gmail.com Thu Dec 8 06:30:14 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 03:30:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5: use arg_w('y*', ...) instead of bufferstr_w for socket.sendto Message-ID: <58494446.05371c0a.d2cc3.e982@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88965:78ca51c16326 Date: 2016-12-08 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/78ca51c16326/ Log: use arg_w('y*', ...) instead of bufferstr_w for socket.sendto diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1502,9 +1502,9 @@ def _getarg_error(self, expected, w_obj): if self.is_none(w_obj): - e = oefmt(self.w_TypeError, "must be %s, not None", expected) + e = oefmt(self.w_TypeError, "a %s is required, not None", expected) else: - e = oefmt(self.w_TypeError, "must be %s, not %T", expected, w_obj) + e = oefmt(self.w_TypeError, "a %s is requried, not %T", expected, w_obj) raise e @specialize.arg(1) @@ -1543,7 +1543,7 @@ try: return w_obj.buffer_w(self, self.BUF_SIMPLE) except BufferInterfaceNotFound: - self._getarg_error("bytes or buffer", w_obj) + self._getarg_error("bytes-like object", w_obj) else: assert False diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -449,13 +449,14 @@ except SocketError as e: raise converted_error(space, e) - @unwrap_spec(data='bufferstr') - def sendto_w(self, space, data, w_param2, w_param3=None): + @unwrap_spec(data='buffer') + def sendto_w(self, space, w_data, w_param2, w_param3=None): """sendto(data[, flags], address) -> count Like send(data, flags) but allows specifying the destination address. For IP sockets, the address is a pair (hostaddr, port). """ + data = space.arg_w('y*', w_data) if w_param3 is None: # 2 args version flags = 0 From pypy.commits at gmail.com Thu Dec 8 06:47:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 03:47:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5: remove unwrap_spec of previously modified method, new test to check that socket bind takes a byte like object Message-ID: <58494843.0bba1c0a.347da.eda1@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88966:c7429885c9fc Date: 2016-12-08 12:46 +0100 http://bitbucket.org/pypy/pypy/changeset/c7429885c9fc/ Log: remove unwrap_spec of previously modified method, new test to check that socket bind takes a byte like object diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -89,6 +89,8 @@ def addr_from_object(family, fd, space, w_address): if family == rsocket.AF_INET: w_host, w_port = space.unpackiterable(w_address, 2) + if space.isinstance_w(w_host, space.w_unicode): + pass host = space.str_w(w_host) port = space.int_w(w_port) port = make_ushort_port(space, port) @@ -110,9 +112,11 @@ return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: # Not using space.fsencode_w since Linux allows embedded NULs. + import pdb; pdb.set_trace() if space.isinstance_w(w_address, space.w_unicode): w_address = space.fsencode(w_address) - return rsocket.UNIXAddress(space.bytes_w(w_address)) + bytelike = space.arg_w('y*', w_address) + return rsocket.UNIXAddress(bytelike) if rsocket.HAS_AF_NETLINK and family == rsocket.AF_NETLINK: w_pid, w_groups = space.unpackiterable(w_address, 2) return rsocket.NETLINKAddress(space.uint_w(w_pid), space.uint_w(w_groups)) @@ -449,7 +453,6 @@ except SocketError as e: raise converted_error(space, e) - @unwrap_spec(data='buffer') def sendto_w(self, space, w_data, w_param2, w_param3=None): """sendto(data[, flags], address) -> count diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -884,6 +884,11 @@ os.close(fileno) cli.close() + def test_bytearray_name(self): + import _socket as socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(bytearray(b"\x00python\x00test\x00")) + assert s.getsockname() == b"\x00python\x00test\x00" class AppTestErrno: spaceconfig = {'usemodules': ['_socket', 'select']} From pypy.commits at gmail.com Thu Dec 8 09:06:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:06:19 -0800 (PST) Subject: [pypy-commit] pypy default: rpython/rlib/rsocket.py modification to allow bytearray to be passed as argument Message-ID: <584968db.42061c0a.b34d6.2783@mx.google.com> Author: Richard Plangger Branch: Changeset: r88968:dfe709b1b9c3 Date: 2016-12-08 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/dfe709b1b9c3/ Log: rpython/rlib/rsocket.py modification to allow bytearray to be passed as argument diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -398,7 +398,7 @@ baseofs = offsetof(_c.sockaddr_un, 'c_sun_path') self.setdata(sun, baseofs + len(path)) rffi.setintfield(sun, 'c_sun_family', AF_UNIX) - if _c.linux and path.startswith('\x00'): + if _c.linux and path[0] == '\x00': # Linux abstract namespace extension if len(path) > sizeof(_c.sockaddr_un.c_sun_path): raise RSocketError("AF_UNIX path too long") From pypy.commits at gmail.com Thu Dec 8 09:06:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:06:17 -0800 (PST) Subject: [pypy-commit] pypy py3.5: remove pdb and call right method Message-ID: <584968d9.542e1c0a.59c97.2288@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88967:db81d034b514 Date: 2016-12-08 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/db81d034b514/ Log: remove pdb and call right method diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -112,10 +112,9 @@ return rsocket.INET6Address(host, port, flowinfo, scope_id) if rsocket.HAS_AF_UNIX and family == rsocket.AF_UNIX: # Not using space.fsencode_w since Linux allows embedded NULs. - import pdb; pdb.set_trace() if space.isinstance_w(w_address, space.w_unicode): w_address = space.fsencode(w_address) - bytelike = space.arg_w('y*', w_address) + bytelike = space.getarg_w('y*', w_address) return rsocket.UNIXAddress(bytelike) if rsocket.HAS_AF_NETLINK and family == rsocket.AF_NETLINK: w_pid, w_groups = space.unpackiterable(w_address, 2) @@ -459,7 +458,7 @@ Like send(data, flags) but allows specifying the destination address. For IP sockets, the address is a pair (hostaddr, port). """ - data = space.arg_w('y*', w_data) + data = space.getarg_w('y*', w_data) if w_param3 is None: # 2 args version flags = 0 diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -886,7 +886,7 @@ def test_bytearray_name(self): import _socket as socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(bytearray(b"\x00python\x00test\x00")) assert s.getsockname() == b"\x00python\x00test\x00" From pypy.commits at gmail.com Thu Dec 8 09:06:21 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:06:21 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: add rffi.str2charp_gc to implement StringBuffer.get_raw_address Message-ID: <584968dd.46bb1c0a.96c8b.16e9@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88969:003194aeab2e Date: 2016-12-08 15:05 +0100 http://bitbucket.org/pypy/pypy/changeset/003194aeab2e/ Log: add rffi.str2charp_gc to implement StringBuffer.get_raw_address diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -76,12 +76,13 @@ return [1] class StringBuffer(Buffer): - __slots__ = ['value'] + __slots__ = ['value', '_charp'] _immutable_ = True def __init__(self, value): self.value = value self.readonly = True + self._charp = 0 def getlength(self): return len(self.value) @@ -105,6 +106,13 @@ return self.value[start:stop] return Buffer.getslice(self, start, stop, step, size) + def get_raw_address(self): + from rpython.rtyper.lltypesystem import rffi + if self._charp == 0: + self._charp = rffi.str2charp_gc(self.value) + return self._charp + + class SubBuffer(Buffer): __slots__ = ['buffer', 'offset', 'size'] diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -1,4 +1,4 @@ -from rpython.rlib.buffer import * +from rpython.rlib.buffer import StringBuffer, SubBuffer, Buffer from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.model import SomeInteger @@ -64,3 +64,11 @@ for i in range(9999, 9, -1): buf = SubBuffer(buf, 1, i) assert buf.getlength() == 10 + +def test_string_buffer_as_buffer(): + buf = StringBuffer(b'hello world') + addr = buf.get_raw_address() + assert addr[0] == b'h' + assert addr[4] == b'o' + assert addr[6] == b'w' + assert addr[len(b'hello world')] == b'\x00' diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -805,6 +805,19 @@ else: lltype.free(cp, flavor='raw', track_allocation=False) + # str -> char* + def str2charp_gc(s): + """ str -> char* but collected by the gc + """ + array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='gc', immortal=True) + i = len(s) + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, 0, i) + array[i] = lastchar + return array + str2charp_gc._annenforceargs_ = [strtype, bool] + + # str -> already-existing char[maxsize] def str2chararray(s, array, maxsize): length = min(len(s), maxsize) @@ -984,20 +997,20 @@ return result charpsize2str._annenforceargs_ = [None, int] - return (str2charp, free_charp, charp2str, + return (str2charp, free_charp, str2charp_gc, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, charp2strn, charpsize2str, str2chararray, str2rawmem, ) -(str2charp, free_charp, charp2str, +(str2charp, free_charp, str2charp_gc, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, charp2strn, charpsize2str, str2chararray, str2rawmem, ) = make_string_mappings(str) -(unicode2wcharp, free_wcharp, wcharp2unicode, +(unicode2wcharp, free_wcharp, _, wcharp2unicode, get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, __not_usable, alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here, wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem, From pypy.commits at gmail.com Thu Dec 8 09:07:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:07:50 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: provide the length as parameter to sendto in rlib/rsocket.py Message-ID: <58496936.46bb1c0a.a874.03d8@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88970:5b624c957ac2 Date: 2016-12-08 15:07 +0100 http://bitbucket.org/pypy/pypy/changeset/5b624c957ac2/ Log: provide the length as parameter to sendto in rlib/rsocket.py diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -424,7 +424,7 @@ w_addr = w_param3 try: addr = self.addr_from_object(space, w_addr) - count = self.sock.sendto(data, flags, addr) + count = self.sock.sendto(data, len(data), flags, addr) except SocketError as e: raise converted_error(space, e) return space.wrap(count) diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -997,12 +997,12 @@ if signal_checker is not None: signal_checker() - def sendto(self, data, flags, address): + def sendto(self, data, length, flags, address): """Like send(data, flags) but allows specifying the destination address. (Note that 'flags' is mandatory here.)""" self.wait_for_data(True) addr = address.lock() - res = _c.sendto(self.fd, data, len(data), flags, + res = _c.sendto(self.fd, data, length, flags, addr, address.addrlen) address.unlock() if res < 0: From pypy.commits at gmail.com Thu Dec 8 09:09:25 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Dec 2016 06:09:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Add bytearray.__rmod__ Message-ID: <58496995.46bb1c0a.a874.0466@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88971:fcf3ec196b93 Date: 2016-12-08 14:08 +0000 http://bitbucket.org/pypy/pypy/changeset/fcf3ec196b93/ Log: Add bytearray.__rmod__ diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -488,6 +488,11 @@ def descr_mod(self, space, w_values): return mod_format(space, self, w_values, fmt_type=FORMAT_BYTEARRAY) + def descr_rmod(self, space, w_value): + if not isinstance(w_value, W_BytearrayObject): + return space.w_NotImplemented + return mod_format(space, w_value, self, fmt_type=FORMAT_BYTEARRAY) + @staticmethod def _iter_getitem_result(self, space, index): assert isinstance(self, W_BytearrayObject) @@ -671,7 +676,10 @@ """x.__mul__(n) <==> x*n""" def __mod__(): - """x.__mod__(y) <==> x % y""" + """Return self%value.""" + + def __rmod__(): + """Return value%self.""" def __ne__(): """x.__ne__(y) <==> x!=y""" @@ -1176,6 +1184,8 @@ doc=BytearrayDocstrings.__delitem__.__doc__), __mod__ = interp2app(W_BytearrayObject.descr_mod, doc=BytearrayDocstrings.__mod__.__doc__), + __rmod__ = interp2app(W_BytearrayObject.descr_rmod, + doc=BytearrayDocstrings.__rmod__.__doc__), append = interp2app(W_BytearrayObject.descr_append, doc=BytearrayDocstrings.append.__doc__), diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -583,6 +583,10 @@ assert bytearray(b'%04X') % 10 == b'000A' assert bytearray(b'%c') % 48 == b'0' assert bytearray(b'%c') % b'a' == b'a' + assert bytearray(b'%c') % bytearray(b'a') == b'a' + + raises(TypeError, bytearray(b'a').__mod__, 5) + assert bytearray(b'a').__rmod__(5) == NotImplemented """ def test_format_b(self): From pypy.commits at gmail.com Thu Dec 8 09:14:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:14:03 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: remove _annenforceargs_ entry from str2charp_gc Message-ID: <58496aab.ce841c0a.24576.256c@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88972:15c7297598dd Date: 2016-12-08 15:13 +0100 http://bitbucket.org/pypy/pypy/changeset/15c7297598dd/ Log: remove _annenforceargs_ entry from str2charp_gc diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -815,7 +815,7 @@ copy_string_to_raw(ll_s, array, 0, i) array[i] = lastchar return array - str2charp_gc._annenforceargs_ = [strtype, bool] + str2charp_gc._annenforceargs_ = [strtype] # str -> already-existing char[maxsize] From pypy.commits at gmail.com Thu Dec 8 09:23:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:23:01 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: modify test to specify length of sendto's first argument Message-ID: <58496cc5.973f1c0a.949ca.2c57@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88973:b6249f4e3a0f Date: 2016-12-08 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/b6249f4e3a0f/ Log: modify test to specify length of sendto's first argument diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -320,7 +320,7 @@ s2.bind(INETAddress('127.0.0.1', INADDR_ANY)) addr2 = s2.getsockname() - s1.sendto('?', 0, addr2) + s1.sendto('?', 1, 0, addr2) buf = s2.recv(100) assert buf == '?' s2.connect(addr) From pypy.commits at gmail.com Thu Dec 8 09:54:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 06:54:59 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: expose a raw_ptr for a resizable list for StringBuffer Message-ID: <58497443.6a5cc20a.f6424.a8cc@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88974:371ebe3ffc26 Date: 2016-12-08 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/371ebe3ffc26/ Log: expose a raw_ptr for a resizable list for StringBuffer diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -2,6 +2,8 @@ Buffer protocol support. """ from rpython.rlib import jit +from rpython.rlib.rgc import (resizable_list_supporting_raw_ptr, + nonmoving_raw_ptr_for_resizable_list) class Buffer(object): @@ -76,13 +78,14 @@ return [1] class StringBuffer(Buffer): - __slots__ = ['value', '_charp'] + __slots__ = ['value', 'charlist'] _immutable_ = True def __init__(self, value): self.value = value self.readonly = True - self._charp = 0 + # currently the + self.charlist = None def getlength(self): return len(self.value) @@ -107,10 +110,10 @@ return Buffer.getslice(self, start, stop, step, size) def get_raw_address(self): - from rpython.rtyper.lltypesystem import rffi - if self._charp == 0: - self._charp = rffi.str2charp_gc(self.value) - return self._charp + if not self.charlist: + data = [c for c in self.value] + self.charlist = resizable_list_supporting_raw_ptr(data) + return nonmoving_raw_ptr_for_resizable_list(self.charlist) diff --git a/rpython/rlib/test/test_buffer.py b/rpython/rlib/test/test_buffer.py --- a/rpython/rlib/test/test_buffer.py +++ b/rpython/rlib/test/test_buffer.py @@ -71,4 +71,3 @@ assert addr[0] == b'h' assert addr[4] == b'o' assert addr[6] == b'w' - assert addr[len(b'hello world')] == b'\x00' diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -805,19 +805,6 @@ else: lltype.free(cp, flavor='raw', track_allocation=False) - # str -> char* - def str2charp_gc(s): - """ str -> char* but collected by the gc - """ - array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='gc', immortal=True) - i = len(s) - ll_s = llstrtype(s) - copy_string_to_raw(ll_s, array, 0, i) - array[i] = lastchar - return array - str2charp_gc._annenforceargs_ = [strtype] - - # str -> already-existing char[maxsize] def str2chararray(s, array, maxsize): length = min(len(s), maxsize) @@ -997,20 +984,20 @@ return result charpsize2str._annenforceargs_ = [None, int] - return (str2charp, free_charp, str2charp_gc, charp2str, + return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, charp2strn, charpsize2str, str2chararray, str2rawmem, ) -(str2charp, free_charp, str2charp_gc, charp2str, +(str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, get_nonmovingbuffer_final_null, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, charp2strn, charpsize2str, str2chararray, str2rawmem, ) = make_string_mappings(str) -(unicode2wcharp, free_wcharp, _, wcharp2unicode, +(unicode2wcharp, free_wcharp, wcharp2unicode, get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, __not_usable, alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here, wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem, From pypy.commits at gmail.com Thu Dec 8 10:02:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 08 Dec 2016 07:02:27 -0800 (PST) Subject: [pypy-commit] pypy py3.5: CPython issue #25766: .__bytes__() now works in str subclasses Message-ID: <58497603.4438c20a.2b668.a51e@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88975:877aef4785e1 Date: 2016-12-08 15:01 +0000 http://bitbucket.org/pypy/pypy/changeset/877aef4785e1/ Log: CPython issue #25766: .__bytes__() now works in str subclasses diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -700,28 +700,34 @@ return chr(value) def newbytesdata_w(space, w_source, encoding, errors): + # None value + if w_source is None: + if encoding is not None or errors is not None: + raise oefmt(space.w_TypeError, + "encoding or errors without sequence argument") + else: + return b"" # Unicode with encoding - if w_source is not None and space.isinstance_w(w_source, space.w_unicode): - if encoding is None: + if encoding is not None: + if not space.isinstance_w(w_source, space.w_unicode): raise oefmt(space.w_TypeError, - "string argument without an encoding") + "encoding without string argument (got '%T' instead)", + w_source) from pypy.objspace.std.unicodeobject import encode_object w_source = encode_object(space, w_source, encoding, errors) # and continue with the encoded string - elif encoding is not None or errors is not None: - if w_source is None: + elif errors is not None: + if not space.isinstance_w(w_source, space.w_unicode): raise oefmt(space.w_TypeError, - "encoding or errors without string argument") - raise oefmt(space.w_TypeError, - "encoding or errors without string argument (got '%T' instead)", - w_source) - # None value - if w_source is None: - return b"" + "errors without string argument (got '%T' instead)", + w_source) + else: + raise oefmt(space.w_TypeError, + "string argument without an encoding") # Fast-path for bytes if space.isinstance_w(w_source, space.w_str): return space.bytes_w(w_source) - # Some other object with a __bytes__ special method + # Some other object with a __bytes__ special method (could be str subclass) w_bytes_method = space.lookup(w_source, "__bytes__") if w_bytes_method is not None: w_bytes = space.get_and_call_function(w_bytes_method, w_source) @@ -729,6 +735,9 @@ raise oefmt(space.w_TypeError, "__bytes__ returned non-bytes (type '%T')", w_bytes) return space.bytes_w(w_bytes) + if space.isinstance_w(w_source, space.w_unicode): + raise oefmt(space.w_TypeError, "string argument without an encoding") + # Is it an integer? # Note that we're calling space.getindex_w() instead of space.int_w(). try: diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -832,6 +832,11 @@ return 3 assert bytes(WithIndex()) == b'a' + class Str(str): + def __bytes__(self): + return b'a' + assert bytes(Str('abc')) == b'a' + def test_getnewargs(self): assert b"foo".__getnewargs__() == (b"foo",) From pypy.commits at gmail.com Thu Dec 8 10:05:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 07:05:46 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: get_raw_address added to two Buffer sub classes Message-ID: <584976ca.0a4cc20a.cf0fa.a068@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88976:41e417a2b7ba Date: 2016-12-08 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/41e417a2b7ba/ Log: get_raw_address added to two Buffer sub classes diff --git a/pypy/module/__pypy__/bytebuffer.py b/pypy/module/__pypy__/bytebuffer.py --- a/pypy/module/__pypy__/bytebuffer.py +++ b/pypy/module/__pypy__/bytebuffer.py @@ -4,6 +4,7 @@ from rpython.rlib.buffer import Buffer from pypy.interpreter.gateway import unwrap_spec +from rpython.rlib.rgc import nonmoving_raw_ptr_for_resizable_list class ByteBuffer(Buffer): @@ -22,6 +23,8 @@ def setitem(self, index, char): self.data[index] = char + def get_raw_address(self): + return nonmoving_raw_ptr_for_resizable_list(self.data) @unwrap_spec(length=int) def bytebuffer(space, length): diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -4,6 +4,7 @@ from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from rpython.rlib.rgc import nonmoving_raw_ptr_for_resizable_list from rpython.rlib.buffer import Buffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask @@ -120,6 +121,9 @@ def setitem(self, index, char): self.buf[self.start + index] = char + def get_raw_address(self): + return nonmoving_raw_ptr_for_resizable_list(self.buf) + class BufferedMixin: _mixin_ = True From pypy.commits at gmail.com Thu Dec 8 10:10:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 08 Dec 2016 07:10:20 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: extend comment I did not finish Message-ID: <584977dc.aaa3c20a.711ce.b132@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88977:38b92b25e0b9 Date: 2016-12-08 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/38b92b25e0b9/ Log: extend comment I did not finish diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -84,7 +84,8 @@ def __init__(self, value): self.value = value self.readonly = True - # currently the + # the not initialized list of chars, copied from value + # as soon as get_raw_address is called self.charlist = None def getlength(self): From pypy.commits at gmail.com Thu Dec 8 12:02:22 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 08 Dec 2016 09:02:22 -0800 (PST) Subject: [pypy-commit] pypy default: win32 translation fix: with macro=True, there is one level of function Message-ID: <5849921e.e576c20a.c0ecf.da4e@mx.google.com> Author: Armin Rigo Branch: Changeset: r88978:1b7bd9ff3dc0 Date: 2016-12-08 18:01 +0100 http://bitbucket.org/pypy/pypy/changeset/1b7bd9ff3dc0/ Log: win32 translation fix: with macro=True, there is one level of function that is written actively inside a .c file, even if they are not called at all diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1778,22 +1778,23 @@ finally: lltype.free(l_utsbuf, flavor='raw') -# These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) -c_major = external('major', [rffi.INT], rffi.INT, macro=True) -c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) +if sys.platform != 'win32': + # These are actually macros on some/most systems + c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) + c_major = external('major', [rffi.INT], rffi.INT, macro=True) + c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) - at replace_os_function('makedev') -def makedev(maj, min): - return c_makedev(maj, min) + @replace_os_function('makedev') + def makedev(maj, min): + return c_makedev(maj, min) - at replace_os_function('major') -def major(dev): - return c_major(dev) + @replace_os_function('major') + def major(dev): + return c_major(dev) - at replace_os_function('minor') -def minor(dev): - return c_minor(dev) + @replace_os_function('minor') + def minor(dev): + return c_minor(dev) #___________________________________________________________________ From pypy.commits at gmail.com Thu Dec 8 12:05:18 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 08 Dec 2016 09:05:18 -0800 (PST) Subject: [pypy-commit] pypy default: skip makedev test on Windows Message-ID: <584992ce.a558c20a.f1afe.b12b@mx.google.com> Author: Armin Rigo Branch: Changeset: r88979:0d6dbff73259 Date: 2016-12-08 18:04 +0100 http://bitbucket.org/pypy/pypy/changeset/0d6dbff73259/ Log: skip makedev test on Windows diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,7 @@ def test_isatty(self): assert rposix.isatty(-1) is False + @py.test.mark.skipif("not hasattr(rposix, 'makedev')") def test_makedev(self): dev = rposix.makedev(24, 7) assert rposix.major(dev) == 24 From pypy.commits at gmail.com Thu Dec 8 16:54:48 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 08 Dec 2016 13:54:48 -0800 (PST) Subject: [pypy-commit] pypy default: Oops, this comment is only visible in py.test tracebacks, but not with Message-ID: <5849d6a8.41a3c20a.7de65.3eb8@mx.google.com> Author: Armin Rigo Branch: Changeset: r88980:48a3d2466462 Date: 2016-12-08 22:54 +0100 http://bitbucket.org/pypy/pypy/changeset/48a3d2466462/ Log: Oops, this comment is only visible in py.test tracebacks, but not with bin/rpython. Move it to the assertion error message. diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -142,10 +142,14 @@ assert len(lst) == len(args_v), ( "not supported so far: 'greens' variables contain Void") # a crash here means that you have to reorder the variable named in - # the JitDriver. Indeed, greens and reds must both be sorted: first - # all INTs, followed by all REFs, followed by all FLOATs. + # the JitDriver. lst2 = sort_vars(lst) - assert lst == lst2 + assert lst == lst2, ("You have to reorder the variables named in " + "the JitDriver (both the 'greens' and 'reds' independently). " + "They must be sorted like this: first all the integer-like, " + "then all the pointer-like, and finally the floats.\n" + "Got: %r\n" + "Expected: %r" % (lst, lst2)) return lst # return (_sort(greens_v, True), _sort(reds_v, False)) From pypy.commits at gmail.com Fri Dec 9 04:00:06 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 01:00:06 -0800 (PST) Subject: [pypy-commit] pypy py3.5: revert call to getarg_w (does not translate) Message-ID: <584a7296.c8111c0a.572bf.888f@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r88981:116106fe6bde Date: 2016-12-09 09:57 +0100 http://bitbucket.org/pypy/pypy/changeset/116106fe6bde/ Log: revert call to getarg_w (does not translate) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -89,8 +89,6 @@ def addr_from_object(family, fd, space, w_address): if family == rsocket.AF_INET: w_host, w_port = space.unpackiterable(w_address, 2) - if space.isinstance_w(w_host, space.w_unicode): - pass host = space.str_w(w_host) port = space.int_w(w_port) port = make_ushort_port(space, port) @@ -114,7 +112,7 @@ # Not using space.fsencode_w since Linux allows embedded NULs. if space.isinstance_w(w_address, space.w_unicode): w_address = space.fsencode(w_address) - bytelike = space.getarg_w('y*', w_address) + bytelike = space.bytes_w(w_address) # getarg_w('y*', w_address) return rsocket.UNIXAddress(bytelike) if rsocket.HAS_AF_NETLINK and family == rsocket.AF_NETLINK: w_pid, w_groups = space.unpackiterable(w_address, 2) @@ -458,7 +456,7 @@ Like send(data, flags) but allows specifying the destination address. For IP sockets, the address is a pair (hostaddr, port). """ - data = space.getarg_w('y*', w_data) + data = space.bufferstr_w(w_data) if w_param3 is None: # 2 args version flags = 0 From pypy.commits at gmail.com Fri Dec 9 04:15:21 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Dec 2016 01:15:21 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Add my abstract Message-ID: <584a7629.471ec20a.7e60c.f70b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5763:6aa8fac3ce95 Date: 2016-12-09 10:15 +0100 http://bitbucket.org/pypy/extradoc/changeset/6aa8fac3ce95/ Log: Add my abstract diff --git a/talk/swisspython2017/abstract.txt b/talk/swisspython2017/abstract.txt new file mode 100644 --- /dev/null +++ b/talk/swisspython2017/abstract.txt @@ -0,0 +1,28 @@ +Abstract +-------- + +RevDB is an experimental "reverse debugger" for Python, similar to +UndoDB-GDB or LL for C. You run your program once, in "record" mode, +producing a log file; once you get buggy behavior, you start the +reverse-debugger on the log file. It gives an (improved) pdb-like +experience, but it is replaying your program exactly as it ran---all +input/outputs are replayed from the log file instead of being redone. + +The main point is that you can then go backward as well as forward in +time: from a situation that looks really buggy you can go back and +discover how it came to be. You also get "watchpoints", which are very +useful to find when things change. Watchpoints work both forward and +backward. + +I will show on small examples how you can use it, and also give an idea +about how it works. It is based on PyPy, not CPython, so you need to +ensure your program works on PyPy in the first place (but chances are +that it does). + + +Short bio +--------- + +Armin Rigo is based in Leysin, VD, Switzerland. He is working as a +freelancer on PyPy (various aspects, currently focusing on Python 3.5 +support) and related projects like CFFI and RevDB. From pypy.commits at gmail.com Fri Dec 9 06:04:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 03:04:44 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: a new approach to get the raw address to a string Message-ID: <584a8fcc.0f341c0a.17ca9.ba4a@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88982:15b144ea1d4d Date: 2016-12-09 12:04 +0100 http://bitbucket.org/pypy/pypy/changeset/15b144ea1d4d/ Log: a new approach to get the raw address to a string diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -78,15 +78,12 @@ return [1] class StringBuffer(Buffer): - __slots__ = ['value', 'charlist'] + __slots__ = ['value', '__weakref__'] _immutable_ = True def __init__(self, value): self.value = value self.readonly = True - # the not initialized list of chars, copied from value - # as soon as get_raw_address is called - self.charlist = None def getlength(self): return len(self.value) @@ -111,12 +108,8 @@ return Buffer.getslice(self, start, stop, step, size) def get_raw_address(self): - if not self.charlist: - data = [c for c in self.value] - self.charlist = resizable_list_supporting_raw_ptr(data) - return nonmoving_raw_ptr_for_resizable_list(self.charlist) - - + from rpython.rtyper.lltypesystem import rffi + return rffi.get_raw_address_of_string(self, self.value) class SubBuffer(Buffer): __slots__ = ['buffer', 'offset', 'size'] diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -754,6 +754,53 @@ SIGNED = lltype.Signed SIGNEDP = lltype.Ptr(lltype.Array(SIGNED, hints={'nolength': True})) + +class RawBytes(object): + # literal copy of _cffi_backend/func.py + def __init__(self, string): + self.ptr = str2charp(string, track_allocation=False) + def __del__(self): + free_charp(self.ptr, track_allocation=False) + +from rpython.rlib import rweakref +from rpython.rlib.buffer import Buffer +_STR_WDICT = rweakref.RWeakKeyDictionary(Buffer, RawBytes) + + at jit.dont_look_inside +def get_raw_address_of_string(key, string): + """Returns a 'char *' that is valid as long as the key object is alive. + Two calls to to this function are guaranteed to return the same pointer. + + The extra parameter key is necessary to create a weak reference. + The buffer of the returned pointer (if object is young) lives as long + as key is alive. If key goes out of scope, the buffer will eventually + be freed. `string` cannot go out of scope until the RawBytes object + referencing it goes out of scope. + """ + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import STR + from rpython.rtyper.lltypesystem import llmemory + from rpython.rlib import rgc + + global _STR_WDICT + rawbytes = _STR_WDICT.get(key) + if rawbytes is None: + if we_are_translated() and not rgc.can_move(string): + lldata = llstr(string) + data_start = (llmemory.cast_ptr_to_adr(lldata) + + offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + data_start = cast(CCHARP, data_start) + data_start[len(string)] = '\x00' # write the final extra null + return data_start + rawbytes = RawBytes(string) + _STR_WDICT.set(key, rawbytes) + return rawbytes.ptr + + + + + # various type mapping # conversions between str and char* @@ -876,6 +923,7 @@ get_nonmovingbuffer._always_inline_ = 'try' # get rid of the returned tuple get_nonmovingbuffer._annenforceargs_ = [strtype] + @jit.dont_look_inside def get_nonmovingbuffer_final_null(data): tup = get_nonmovingbuffer(data) From pypy.commits at gmail.com Fri Dec 9 06:24:22 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Dec 2016 03:24:22 -0800 (PST) Subject: [pypy-commit] cffi default: Document 'FILE *' Message-ID: <584a9466.4f831c0a.2ee7e.bc38@mx.google.com> Author: Armin Rigo Branch: Changeset: r2829:c23ad11dc403 Date: 2016-12-09 12:24 +0100 http://bitbucket.org/cffi/cffi/changeset/c23ad11dc403/ Log: Document 'FILE *' diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -183,9 +183,7 @@ * _Bool and bool (equivalent). If not directly supported by the C compiler, this is declared with the size of ``unsigned char``. -* FILE. You can declare C functions taking a ``FILE *`` argument and - call them with a Python file object. If needed, you can also do - ``c_f = ffi.cast("FILE *", fileobj)`` and then pass around ``c_f``. +* FILE. `See here.`__ * all `common Windows types`_ are defined if you run on Windows (``DWORD``, ``LPARAM``, etc.). Exception: @@ -196,6 +194,7 @@ stdint.h, like ``intmax_t``, as long as they map to integers of 1, 2, 4 or 8 bytes. Larger integers are not supported. +.. __: ref.html#file .. _`common Windows types`: http://msdn.microsoft.com/en-us/library/windows/desktop/aa383751%28v=vs.85%29.aspx The declarations can also contain "``...``" at various places; these are diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -709,3 +709,35 @@ *New in version 1.7.* In previous versions, it only worked on pointers; for primitives it always returned True. + +.. _file: + +Support for FILE +++++++++++++++++ + +You can declare C functions taking a ``FILE *`` argument and +call them with a Python file object. If needed, you can also do ``c_f += ffi.cast("FILE *", fileobj)`` and then pass around ``c_f``. + +Note, however, that CFFI does this by a best-effort approach. If you +need finer control over buffering, flushing, and timely closing of the +``FILE *``, then you should not use this special support for ``FILE *``. +Instead, you can handle regular ``FILE *`` cdata objects that you +explicitly make using fdopen(), like this: + +.. code-block:: python + + ffi.cdef(''' + FILE *fdopen(int, const char *); // from the C + int fclose(FILE *); + ''') + + myfile.flush() # make sure the file is flushed + newfd = os.dup(myfile.fileno()) # make a copy of the file descriptor + fp = lib.fdopen(newfd, "w") # make a cdata 'FILE *' around newfd + lib.write_stuff_to_file(fp) # invoke the external function + lib.fclose(fp) # when you're done, close fp (and newfd) + +The special support for ``FILE *`` is anyway implemented in a similar manner +on CPython 3.x and on PyPy, because these Python implementations' files are +not natively based on ``FILE *``. Doing it explicity offers more control. From pypy.commits at gmail.com Fri Dec 9 06:27:58 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 09 Dec 2016 03:27:58 -0800 (PST) Subject: [pypy-commit] cffi default: add warning Message-ID: <584a953e.8675c20a.108e7.2d4c@mx.google.com> Author: Armin Rigo Branch: Changeset: r2830:5462aebb2c5f Date: 2016-12-09 12:27 +0100 http://bitbucket.org/cffi/cffi/changeset/5462aebb2c5f/ Log: add warning diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -200,7 +200,7 @@ memory and must not be used any more. *New in version 1.8:* the python_buffer can be a byte string (but still -not a buffer/memoryview on a string). +not a buffer/memoryview on a string). Never modify a byte string! ffi.memmove() From pypy.commits at gmail.com Fri Dec 9 07:22:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 04:22:15 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: update test, from_buffer(memoryview) is now allowed! Message-ID: <584aa1f7.43e61c0a.62791.d8d4@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88983:a3362d15b0e7 Date: 2016-12-09 13:21 +0100 http://bitbucket.org/pypy/pypy/changeset/a3362d15b0e7/ Log: update test, from_buffer(memoryview) is now allowed! diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3417,22 +3417,27 @@ assert p1 == from_buffer(BCharA, b"foo") import gc; gc.collect() assert p1 == from_buffer(BCharA, b"foo") - py.test.raises(TypeError, from_buffer, BCharA, u+"foo") try: from __builtin__ import buffer except ImportError: - pass + # python3 does not allow from to get buffer from unicode! + raises(TypeError, from_buffer, BCharA, u+"foo") else: - # from_buffer(buffer(b"foo")) does not work, because it's not - # implemented on pypy; only from_buffer(b"foo") works. - py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo")) - py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo")) + p4 = from_buffer(BCharA, u+"foo") + contents = from_buffer(BCharA, buffer(b"foo")) + for i in range(len(contents)): + assert contents[i] == p1[i] + contents = from_buffer(BCharA, buffer(u+"foo")) + for i in range(len(contents)): + assert contents[i] == p4[i] try: from __builtin__ import memoryview except ImportError: pass else: - py.test.raises(TypeError, from_buffer, BCharA, memoryview(b"foo")) + contents = from_buffer(BCharA, memoryview(b"foo")) + for i in range(len(contents)): + assert contents[i] == p1[i] def test_from_buffer_bytearray(): a = bytearray(b"xyz") diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -795,12 +795,9 @@ return data_start rawbytes = RawBytes(string) _STR_WDICT.set(key, rawbytes) + return rawbytes.ptr - - - - # various type mapping # conversions between str and char* From pypy.commits at gmail.com Fri Dec 9 07:28:15 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Dec 2016 04:28:15 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: draft version, wip Message-ID: <584aa35f.973f1c0a.949ca.d65e@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r88984:68ef60b6a404 Date: 2016-12-05 22:52 +0200 http://bitbucket.org/pypy/pypy/changeset/68ef60b6a404/ Log: draft version, wip diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -261,13 +261,17 @@ return 0 w_key = space.listview(w_keys)[pos] w_value = space.getitem(w_dict, w_key) - if isinstance(w_value, GetSetProperty): - # XXX doesn't quite work, need to convert GetSetProperty - # to PyGetSetDef, with c_name, c_get, c_set, c_doc, c_closure - w_value = W_GetSetPropertyEx(w_value, w_dict.dstorage._x) if pkey: pkey[0] = as_pyobj(space, w_key) if pvalue: + if 0 and isinstance(w_value, GetSetProperty): + # XXX implement this method for all W_Dict storage strategies + w_type = w_dict.get_storage().get_original_type_object_if_classdict() + # XXX doesn't quite work, need to convert GetSetProperty + # to PyGetSetDef, with c_name, c_get, c_set, c_doc, c_closure + # Do this by calling a make_typedescr(GetSetProperty)? + py_getsetdef = as_pyobj(space, w_value) + w_value = W_GetSetPropertyEx(py_getsetdef, w_type) pvalue[0] = as_pyobj(space, w_value) return 1 From pypy.commits at gmail.com Fri Dec 9 07:28:20 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Dec 2016 04:28:20 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: merge default into branch Message-ID: <584aa364.54b31c0a.61b83.c48f@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r88985:7c55b879f853 Date: 2016-12-08 20:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7c55b879f853/ Log: merge default into branch diff too long, truncating to 2000 out of 5162 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import shlex import imp from distutils.errors import DistutilsPlatformError @@ -62,13 +61,32 @@ def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} + g['CC'] = "gcc -pthread" + g['CXX'] = "g++ -pthread" + g['OPT'] = "-DNDEBUG -O2" + g['CFLAGS'] = "-DNDEBUG -O2" + g['CCSHARED'] = "-fPIC" + g['LDSHARED'] = "gcc -pthread -shared" + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] + g['AR'] = "ar" + g['ARFLAGS'] = "rc" g['EXE'] = "" - g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') - g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" g['VERSION'] = get_python_version() + if sys.platform[:6] == "darwin": + import platform + if platform.machine() == 'i386': + if platform.architecture()[0] == '32bit': + arch = 'i386' + else: + arch = 'x86_64' + else: + # just a guess + arch = platform.machine() + g['LDSHARED'] += ' -undefined dynamic_lookup' + g['CC'] += ' -arch %s' % (arch,) + global _config_vars _config_vars = g @@ -104,6 +122,12 @@ _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_config_vars) + if args: vals = [] for name in args: @@ -119,30 +143,80 @@ """ return get_config_vars().get(name) + def customize_compiler(compiler): - """Dummy method to let some easy_install packages that have - optional C speedup components. + """Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile (CPython) + or hard-coded in _init_posix() (PyPy). """ - def customize(executable, flags): - command = compiler.executables[executable] + flags - setattr(compiler, executable, command) + if compiler.compiler_type == "unix": + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + # Use get_config_var() to ensure _config_vars is initialized. + if not get_config_var('CUSTOMIZED_OSX_COMPILER'): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - if compiler.compiler_type == "unix": - # compiler_so can be c++ which has no -Wimplicit - #compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) - compiler.compiler_so.extend(['-O2', '-fPIC']) - compiler.shared_lib_extension = get_config_var('SO') - if "CPPFLAGS" in os.environ: - cppflags = shlex.split(os.environ["CPPFLAGS"]) - for executable in ('compiler', 'compiler_so', 'linker_so'): - customize(executable, cppflags) - if "CFLAGS" in os.environ: - cflags = shlex.split(os.environ["CFLAGS"]) - for executable in ('compiler', 'compiler_so', 'linker_so'): - customize(executable, cflags) - if "LDFLAGS" in os.environ: - ldflags = shlex.split(os.environ["LDFLAGS"]) - customize('linker_so', ldflags) + (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', + 'CCSHARED', 'LDSHARED', 'SO', 'AR', + 'ARFLAGS') + + if 'CC' in os.environ: + newcc = os.environ['CC'] + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + ldshared = newcc + ldshared[len(cc):] + cc = newcc + if 'CXX' in os.environ: + cxx = os.environ['CXX'] + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + if 'AR' in os.environ: + ar = os.environ['AR'] + if 'ARFLAGS' in os.environ: + archiver = ar + ' ' + os.environ['ARFLAGS'] + else: + archiver = ar + ' ' + ar_flags + + cc_cmd = cc + ' ' + cflags + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + compiler_cxx=cxx, + linker_so=ldshared, + linker_exe=cc, + archiver=archiver) + + compiler.shared_lib_extension = so_ext from sysconfig_cpython import ( diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,4 @@ +import os from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,6 +8,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait3(status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -16,6 +20,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait4(pid, status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -71,8 +71,11 @@ Analyzing performance of applications is always tricky. We have various tools, for example a `jitviewer`_ that help us analyze performance. -The jitviewer shows the code generated by the PyPy JIT in a hierarchical way, -as shown by the screenshot below: +The old tool was partly rewritten and combined with vmprof. The service is +hosted at `vmprof.com`_. + +The following shows an old image of the jitviewer. +The code generated by the PyPy JIT in a hierarchical way: - at the bottom level, it shows the Python source code of the compiled loops @@ -84,13 +87,17 @@ .. image:: image/jitviewer.png -The jitviewer is a web application based on flask and jinja2 (and jQuery on -the client): if you have great web developing skills and want to help PyPy, +The jitviewer is a web application based on django and angularjs: +if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep -knowledge of the internals. +knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and +`vmprof-integration`_ to find open issues and documentation. -.. _jitviewer: http://bitbucket.org/pypy/jitviewer - +.. _jitviewer: http://vmprof.com +.. _vmprof.com: http://vmprof.com +.. _vmprof-python: https://github.com/vmprof/vmprof-python +.. _vmprof-server: https://github.com/vmprof/vmprof-server +.. _vmprof-integration: https://github.com/vmprof/vmprof-integration Optimized Unicode Representation -------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,15 @@ .. this is a revision shortly after release-pypy2.7-v5.6 .. startrev: 7e9787939641 + +Since a while now, PyPy preserves the order of dictionaries and sets. +However, the set literal syntax ``{x, y, z}`` would by mistake build a +set with the opposite order: ``set([z, y, x])``. This has been fixed. +Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be +``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior +changed in exactly the same way because of this fix. + + .. branch: rpython-error-to-systemerror Any uncaught RPython exception (from a PyPy bug) is turned into an @@ -20,3 +29,19 @@ .. branch: clean-exported-state Clean-ups in the jit optimizeopt + +.. branch: conditional_call_value_4 + +Add jit.conditional_call_elidable(), a way to tell the JIT "conditonally +call this function" returning a result. + +.. branch: desc-specialize + +Refactor FunctionDesc.specialize() and related code (RPython annotator). + +.. branch: raw-calloc + +.. branch: issue2446 + +Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key +so it will be picked up by app-level objects of that type diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -298,6 +298,12 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False + if config.objspace.usemodules.cpyext: + if config.translation.gc != 'incminimark': + raise Exception("The 'cpyext' module requires the 'incminimark'" + " GC. You need either 'targetpypystandalone.py" + " --withoutmod-cpyext' or '--gc=incminimark'") + config.translating = True import translate diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -183,6 +183,14 @@ assert self._finalize_.im_func is not W_Root._finalize_.im_func space.finalizer_queue.register_finalizer(self) + def may_unregister_rpython_finalizer(self, space): + """Optimization hint only: if there is no user-defined __del__() + method, pass the hint ``don't call any finalizer'' to rgc. + """ + if not self.getclass(space).hasuserdel: + from rpython.rlib import rgc + rgc.may_ignore_finalizer(self) + # hooks that the mapdict implementations needs: def _get_mapdict_map(self): return None diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -439,6 +439,7 @@ space.wrap(msg)) return OperationError(exc, w_error) + at specialize.arg(3) def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError', w_exception_class=None): assert isinstance(e, OSError) @@ -466,8 +467,8 @@ w_error = space.call_function(exc, space.wrap(errno), space.wrap(msg)) return OperationError(exc, w_error) -wrap_oserror2._annspecialcase_ = 'specialize:arg(3)' + at specialize.arg(3) def wrap_oserror(space, e, filename=None, exception_name='w_OSError', w_exception_class=None): if filename is not None: @@ -478,7 +479,6 @@ return wrap_oserror2(space, e, None, exception_name=exception_name, w_exception_class=w_exception_class) -wrap_oserror._annspecialcase_ = 'specialize:arg(3)' def exception_from_saved_errno(space, w_type): from rpython.rlib.rposix import get_saved_errno diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -547,6 +547,8 @@ @jit.dont_look_inside def _run_finalizers(self): + # called by perform() when we have to "perform" this action, + # and also directly at the end of gc.collect). while True: w_obj = self.space.finalizer_queue.next_dead() if w_obj is None: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pyopcode import LoopBlock from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY -from rpython.rlib import jit +from rpython.rlib import jit, rgc class GeneratorIterator(W_Root): @@ -103,11 +103,11 @@ w_result = frame.execute_frame(w_arg, operr) except OperationError: # errors finish a frame - self.frame = None + self.frame_is_finished() raise # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: - self.frame = None + self.frame_is_finished() raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed @@ -209,7 +209,7 @@ finally: frame.f_backref = jit.vref_None self.running = False - self.frame = None + self.frame_is_finished() return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() @@ -228,6 +228,10 @@ break block = block.previous + def frame_is_finished(self): + self.frame = None + rgc.may_ignore_finalizer(self) + def get_printable_location_genentry(bytecode): return '%s ' % (bytecode.get_repr(),) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1295,9 +1295,10 @@ @jit.unroll_safe def BUILD_SET(self, itemcount, next_instr): w_set = self.space.newset() - for i in range(itemcount): - w_item = self.popvalue() + for i in range(itemcount-1, -1, -1): + w_item = self.peekvalue(i) self.space.call_method(w_set, 'add', w_item) + self.popvalues(itemcount) self.pushvalue(w_set) def STORE_MAP(self, oparg, next_instr): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -729,6 +729,10 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_host_is_pypy = cls.space.wrap( + '__pypy__' in sys.builtin_module_names) + def test_bom_with_future(self): s = '\xef\xbb\xbffrom __future__ import division\nx = 1/2' ns = {} @@ -771,6 +775,18 @@ assert math.copysign(1., c[0]) == -1.0 assert math.copysign(1., c[1]) == -1.0 + def test_dict_and_set_literal_order(self): + x = 1 + l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) + l2 = list({1, 3, 2, 4}) + l3 = list({x:'a', 3:'b', 2:'c', 4:'d'}) + l4 = list({x, 3, 2, 4}) + if not self.host_is_pypy: + # the full test relies on the host Python providing ordered dicts + assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4} + else: + assert l1 == l2 == l3 == l4 == [1, 3, 2, 4] + ##class TestPythonAstCompiler(BaseTestCompiler): ## def setup_method(self, method): diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -4,9 +4,11 @@ def test_Ellipsis(self): assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' + assert Ellipsis.__class__.__name__ == 'ellipsis' def test_NotImplemented(self): def f(): return NotImplemented assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' + assert NotImplemented.__class__.__name__ == 'NotImplementedType' diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -770,12 +770,12 @@ ) assert not Cell.typedef.acceptable_as_base_class # no __new__ -Ellipsis.typedef = TypeDef("Ellipsis", +Ellipsis.typedef = TypeDef("ellipsis", __repr__ = interp2app(Ellipsis.descr__repr__), ) assert not Ellipsis.typedef.acceptable_as_base_class # no __new__ -NotImplemented.typedef = TypeDef("NotImplemented", +NotImplemented.typedef = TypeDef("NotImplementedType", __repr__ = interp2app(NotImplemented.descr__repr__), ) assert not NotImplemented.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -397,7 +397,7 @@ space = self.space if space.is_none(w_destructor): if isinstance(self, W_CDataGCP): - self.w_destructor = None + self.detach_destructor() return space.w_None raise oefmt(space.w_TypeError, "Can remove destructor only on a object " @@ -604,6 +604,10 @@ self.w_destructor = None self.space.call_function(w_destructor, self.w_original_cdata) + def detach_destructor(self): + self.w_destructor = None + self.may_unregister_rpython_finalizer(self.space) + W_CData.typedef = TypeDef( '_cffi_backend.CData', diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py --- a/pypy/module/_cffi_backend/cdlopen.py +++ b/pypy/module/_cffi_backend/cdlopen.py @@ -55,6 +55,7 @@ if not libhandle: raise oefmt(self.ffi.w_FFIError, "library '%s' is already closed", self.libname) + self.may_unregister_rpython_finalizer(self.ffi.space) # Clear the dict to force further accesses to do cdlopen_fetch() # again, and fail because the library was closed. Note that the diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -401,7 +401,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -1,4 +1,5 @@ import sys +from rpython.rlib.objectmodel import specialize from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, make_weakref_descr @@ -6,7 +7,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.debug import check_nonneg -from rpython.rlib.objectmodel import specialize # A `dequeobject` is composed of a doubly-linked list of `block` nodes. diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -172,6 +172,7 @@ self.newlines = self.stream.getnewlines() self.stream = None self.fd = -1 + self.may_unregister_rpython_finalizer(self.space) openstreams = getopenstreams(self.space) try: del openstreams[stream] diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -233,6 +233,7 @@ except SocketError: # cpython doesn't return any errors on close pass + self.may_unregister_rpython_finalizer(space) def connect_w(self, space, w_addr): """connect(address) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -217,7 +217,7 @@ return self.space.w_None return w_obj - def descr__eq__(self, space, w_ref2): + def compare(self, space, w_ref2, invert): if not isinstance(w_ref2, W_Weakref): return space.w_NotImplemented ref1 = self @@ -225,11 +225,18 @@ w_obj1 = ref1.dereference() w_obj2 = ref2.dereference() if w_obj1 is None or w_obj2 is None: - return space.is_(ref1, ref2) - return space.eq(w_obj1, w_obj2) + w_res = space.is_(ref1, ref2) + else: + w_res = space.eq(w_obj1, w_obj2) + if invert: + w_res = space.not_(w_res) + return w_res + + def descr__eq__(self, space, w_ref2): + return self.compare(space, w_ref2, invert=False) def descr__ne__(self, space, w_ref2): - return space.not_(space.eq(self, w_ref2)) + return self.compare(space, w_ref2, invert=True) def getlifeline(space, w_obj): lifeline = w_obj.getweakref() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -150,6 +150,14 @@ assert not (ref1 == []) assert ref1 != [] + def test_ne(self): + import _weakref + class X(object): + pass + ref1 = _weakref.ref(X()) + assert ref1.__eq__(X()) is NotImplemented + assert ref1.__ne__(X()) is NotImplemented + def test_getweakrefs(self): import _weakref, gc class A(object): diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -124,13 +124,13 @@ assert isinstance(w_obj, FakeFloat) return w_obj.val + @specialize.arg(1) def interp_w(self, RequiredClass, w_obj, can_be_None=False): if can_be_None and w_obj is None: return None if not isinstance(w_obj, RequiredClass): raise TypeError return w_obj - interp_w._annspecialcase_ = 'specialize:arg(1)' def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -36,8 +36,6 @@ from rpython.rlib.objectmodel import specialize from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions -# CPython 2.4 compatibility -from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount @@ -985,7 +983,7 @@ py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook - _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], + _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) def reinit_tls(space): _reinit_tls() @@ -1614,9 +1612,8 @@ miniglobals = {'__name__': __name__, # for module name propagation } exec source.compile() in miniglobals - call_external_function = miniglobals['cpy_call_external'] + call_external_function = specialize.ll()(miniglobals['cpy_call_external']) call_external_function._dont_inline_ = True - call_external_function._annspecialcase_ = 'specialize:ll' call_external_function._gctransformer_hint_close_stack_ = True # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -62,12 +62,14 @@ position must be positive, indexing from the end of the list is not supported. If pos is out of bounds, return NULL and set an IndexError exception.""" + from pypy.module.cpyext.sequence import CPyListStrategy if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) if index < 0 or index >= w_list.length(): raise oefmt(space.w_IndexError, "list index out of range") - w_list.ensure_object_strategy() # make sure we can return a borrowed obj - # XXX ^^^ how does this interact with CPyListStrategy? + cpy_strategy = space.fromcache(CPyListStrategy) + if w_list.strategy is not cpy_strategy: + w_list.ensure_object_strategy() # make sure we can return a borrowed obj w_res = w_list.getitem(index) return w_res # borrowed ref diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -15,6 +15,7 @@ from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rawrefcount +from rpython.rlib.debug import fatalerror #________________________________________________________ @@ -192,6 +193,8 @@ rawrefcount.create_link_pypy(w_obj, py_obj) +w_marker_deallocating = W_Root() + def from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the @@ -202,7 +205,23 @@ return None w_obj = rawrefcount.to_obj(W_Root, ref) if w_obj is not None: - return w_obj + if w_obj is not w_marker_deallocating: + return w_obj + fatalerror( + "*** Invalid usage of a dying CPython object ***\n" + "\n" + "cpyext, the emulation layer, detected that while it is calling\n" + "an object's tp_dealloc, the C code calls back a function that\n" + "tries to recreate the PyPy version of the object. Usually it\n" + "means that tp_dealloc calls some general PyXxx() API. It is\n" + "a dangerous and potentially buggy thing to do: even in CPython\n" + "the PyXxx() function could, in theory, cause a reference to the\n" + "object to be taken and stored somewhere, for an amount of time\n" + "exceeding tp_dealloc itself. Afterwards, the object will be\n" + "freed, making that reference point to garbage.\n" + ">>> PyPy could contain some workaround to still work if\n" + "you are lucky, but it is not done so far; better fix the bug in\n" + "the CPython extension.") # This reference is not yet a real interpreter object. # Realize it. @@ -233,7 +252,8 @@ INTERPLEVEL_API['as_pyobj'] = as_pyobj def pyobj_has_w_obj(pyobj): - return rawrefcount.to_obj(W_Root, pyobj) is not None + w_obj = rawrefcount.to_obj(W_Root, pyobj) + return w_obj is not None and w_obj is not w_marker_deallocating INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) @@ -335,6 +355,7 @@ pto = obj.c_ob_type #print >>sys.stderr, "Calling dealloc slot", pto.c_tp_dealloc, "of", obj, \ # "'s type which is", rffi.charp2str(pto.c_tp_name) + rawrefcount.mark_deallocating(w_marker_deallocating, obj) generic_cpy_call(space, pto.c_tp_dealloc, obj) @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -142,7 +142,7 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) - def test_multiple_inheritance(self): + def test_multiple_inheritance1(self): module = self.import_module(name='foo') obj = module.UnicodeSubtype(u'xyz') obj2 = module.UnicodeSubtype2() @@ -422,7 +422,7 @@ assert space.int_w(space.getattr(w_class, w_name)) == 1 space.delitem(w_dict, w_name) - def test_multiple_inheritance(self, space, api): + def test_multiple_inheritance2(self, space, api): w_class = space.appexec([], """(): class A(object): pass @@ -1167,3 +1167,38 @@ __metaclass__ = FooType print repr(X) X() + + def test_multiple_inheritance3(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + PyTypeObject *Base1, *Base2, *Base12; + Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base1->tp_name = "Base1"; + Base2->tp_name = "Base2"; + Base12->tp_name = "Base12"; + Base1->tp_basicsize = sizeof(PyHeapTypeObject); + Base2->tp_basicsize = sizeof(PyHeapTypeObject); + Base12->tp_basicsize = sizeof(PyHeapTypeObject); + Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_base = Base1; + Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); + Base12->tp_doc = "The Base12 type or object"; + if (PyType_Ready(Base1) < 0) return NULL; + if (PyType_Ready(Base2) < 0) return NULL; + if (PyType_Ready(Base12) < 0) return NULL; + obj = PyObject_New(PyObject, Base12); + return obj; + ''' + )]) + obj = module.new_obj() + assert 'Base12' in str(obj) + assert type(obj).__doc__ == "The Base12 type or object" + assert obj.__doc__ == "The Base12 type or object" + + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -327,6 +327,8 @@ w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func, wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = space.wrap(w_obj) + if pto.c_tp_doc: + dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) @@ -463,13 +465,17 @@ convert_member_defs(space, dict_w, pto.c_tp_members, self) name = rffi.charp2str(pto.c_tp_name) - new_layout = (pto.c_tp_basicsize > rffi.sizeof(PyObject.TO) or - pto.c_tp_itemsize > 0) + flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + if flag_heaptype: + minsize = rffi.sizeof(PyHeapTypeObject.TO) + else: + minsize = rffi.sizeof(PyObject.TO) + new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0) W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w, force_new_layout=new_layout) self.flag_cpytype = True - self.flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + self.flag_heaptype = flag_heaptype # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: self.flag_map_or_seq = 'S' diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -14,7 +14,28 @@ cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() + rgc.collect() + + # if we are running in gc.disable() mode but gc.collect() is called, + # we should still call the finalizers now. We do this as an attempt + # to get closer to CPython's behavior: in Py3.5 some tests + # specifically rely on that. This is similar to how, in CPython, an + # explicit gc.collect() will invoke finalizers from cycles and fully + # ignore the gc.disable() mode. + temp_reenable = not space.user_del_action.enabled_at_app_level + if temp_reenable: + enable_finalizers(space) + try: + # fetch the pending finalizers from the queue, where they are + # likely to have been added by rgc.collect() above, and actually + # run them now. This forces them to run before this function + # returns, and also always in the enable_finalizers() mode. + space.user_del_action._run_finalizers() + finally: + if temp_reenable: + disable_finalizers(space) + return space.wrap(0) def enable(space): diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -70,6 +70,19 @@ gc.enable() assert gc.isenabled() + def test_gc_collect_overrides_gc_disable(self): + import gc + deleted = [] + class X(object): + def __del__(self): + deleted.append(1) + assert gc.isenabled() + gc.disable() + X() + gc.collect() + assert deleted == [1] + gc.enable() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -374,17 +374,7 @@ def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_vectorized(1, 1) - - def define_sum(): - return """ - a = |30| - sum(a) - """ - def test_sum(self): - result = self.run("sum") - assert result == sum(range(30)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_int(): return """ @@ -408,7 +398,7 @@ def test_sum_multi(self): result = self.run("sum_multi") assert result == sum(range(30)) + sum(range(60)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_float_to_int16(): return """ @@ -490,7 +480,7 @@ assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(2, 1) + self.check_vectorized(2, 0) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -501,7 +491,7 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(3, 1) + self.check_vectorized(3, 0) def define_prod(): return """ @@ -518,12 +508,10 @@ def test_prod(self): result = self.run("prod") assert int(result) == 576 - self.check_vectorized(1, 1) def test_prod_zero(self): result = self.run("prod_zero") assert int(result) == 0 - self.check_vectorized(1, 1) def define_max(): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -307,7 +307,8 @@ def __init__(self, space): self.stat_float_times = True -def stat_float_times(space, w_value=None): + at unwrap_spec(newval=int) +def stat_float_times(space, newval=-1): """stat_float_times([newval]) -> oldval Determine whether os.[lf]stat represents time stamps as float objects. @@ -317,10 +318,10 @@ """ state = space.fromcache(StatState) - if w_value is None: + if newval == -1: return space.wrap(state.stat_float_times) else: - state.stat_float_times = space.bool_w(w_value) + state.stat_float_times = (newval != 0) @unwrap_spec(fd=c_int) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -159,11 +159,14 @@ st = posix.stat(path) assert isinstance(st.st_mtime, float) assert st[7] == int(st.st_atime) + assert posix.stat_float_times(-1) is True posix.stat_float_times(False) st = posix.stat(path) assert isinstance(st.st_mtime, (int, long)) assert st[7] == st.st_atime + assert posix.stat_float_times(-1) is False + finally: posix.stat_float_times(current) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -3,11 +3,11 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import rgc, jit +from rpython.rlib.objectmodel import specialize from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform -from rpython.rlib.objectmodel import specialize import sys import weakref diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -67,7 +67,8 @@ p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) guard_nonnull(p10, descr=...) - i12 = call_i(ConstClass(_ll_strhash__rpy_stringPtr), p10, descr=) + i99 = strhash(p10) + i12 = cond_call_value_i(i99, ConstClass(_ll_strhash__rpy_stringPtr), p10, descr=) p13 = new(descr=...) p15 = new_array_clear(16, descr=) {{{ @@ -86,6 +87,7 @@ call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) + i98 = strhash(p10) i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) i27 = int_lt(i23, 0) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -75,8 +75,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), - ('sum','float', 2581, 2581, 1), - ('prod','float', 1, 3178, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -387,7 +387,8 @@ def test_long_comparison(self): def main(n): while n: - 12345L > 123L # ID: long_op + x = 12345L + x > 123L # ID: long_op n -= 1 log = self.run(main, [300]) diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -79,6 +79,7 @@ class W_Epoll(W_Root): def __init__(self, space, epfd): + self.space = space self.epfd = epfd self.register_finalizer(space) @@ -113,6 +114,7 @@ if not self.get_closed(): socketclose(self.epfd) self.epfd = -1 + self.may_unregister_rpython_finalizer(self.space) def epoll_ctl(self, space, ctl, w_fd, eventmask, ignore_ebadf=False): fd = space.c_filedescriptor_w(w_fd) diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -108,6 +108,7 @@ class W_Kqueue(W_Root): def __init__(self, space, kqfd): + self.space = space self.kqfd = kqfd self.register_finalizer(space) @@ -132,6 +133,7 @@ kqfd = self.kqfd self.kqfd = -1 socketclose_no_errno(kqfd) + self.may_unregister_rpython_finalizer(self.space) def check_closed(self, space): if self.get_closed(): diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -494,3 +494,15 @@ def test_negative_array_size(self): ffi = FFI() py.test.raises(ValueError, ffi.cast, "int[-5]", 0) + + def test_cannot_instantiate_manually(self): + ffi = FFI() + ct = type(ffi.typeof("void *")) + py.test.raises(TypeError, ct) + py.test.raises(TypeError, ct, ffi.NULL) + for cd in [type(ffi.cast("void *", 0)), + type(ffi.new("char[]", 3)), + type(ffi.gc(ffi.NULL, lambda x: None))]: + py.test.raises(TypeError, cd) + py.test.raises(TypeError, cd, ffi.NULL) + py.test.raises(TypeError, cd, ffi.typeof("void *")) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -361,7 +361,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -34,3 +34,7 @@ assert os.WEXITSTATUS(status) == exit_status assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) + +def test_errors(): + py.test.raises(OSError, _pypy_wait.wait3, -999) + py.test.raises(OSError, _pypy_wait.wait4, -999, -999) diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -202,6 +202,7 @@ if mode == rzlib.Z_FINISH: # release the data structures now rzlib.deflateEnd(self.stream) self.stream = rzlib.null_stream + self.may_unregister_rpython_finalizer(space) finally: self.unlock() except rzlib.RZlibError as e: diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -853,7 +853,7 @@ if not e.match(space, space.w_TypeError): raise raise oefmt(space.w_TypeError, - "%(specialname)s returned non-%(targetname)s " + "%(specialname)s returned non-string " "(type '%%T')", w_result) else: # re-wrap the result as a real string diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -66,15 +66,16 @@ class W_MyType(W_MyObject): name = "foobar" flag_map_or_seq = '?' + hasuserdel = False def __init__(self): self.mro_w = [w_some_obj(), w_some_obj()] self.dict_w = {'__str__': w_some_obj()} + self.hasuserdel = True def get_module(self): return w_some_obj() - def getname(self, space): return self.name @@ -202,6 +203,7 @@ def newunicode(self, x): return w_some_obj() + @specialize.argtype(1) def wrap(self, x): if not we_are_translated(): if isinstance(x, gateway.interp2app): @@ -215,7 +217,6 @@ return w_some_obj() self._wrap_not_rpython(x) return w_some_obj() - wrap._annspecialcase_ = "specialize:argtype(1)" def _wrap_not_rpython(self, x): "NOT_RPYTHON" @@ -305,10 +306,10 @@ is_root(w_complex) return 1.1, 2.2 + @specialize.arg(1) def allocate_instance(self, cls, w_subtype): is_root(w_subtype) return instantiate(cls) - allocate_instance._annspecialcase_ = "specialize:arg(1)" def decode_index(self, w_index_or_slice, seqlength): is_root(w_index_or_slice) diff --git a/pypy/objspace/fake/test/test_checkmodule.py b/pypy/objspace/fake/test/test_checkmodule.py --- a/pypy/objspace/fake/test/test_checkmodule.py +++ b/pypy/objspace/fake/test/test_checkmodule.py @@ -9,9 +9,9 @@ def make_checker(): check = [] + @specialize.memo() def see(): check.append(True) - see._annspecialcase_ = 'specialize:memo' return see, check def test_wrap_interp2app(): diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -2,12 +2,12 @@ import sys from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import INT_MAX from rpython.rlib.rfloat import DTSF_ALT, formatd, isnan, isinf from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.unroll import unrolling_iterable from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, oefmt diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -315,7 +315,8 @@ assert operate(A()) == "world" * n assert type(operate(A())) is str answer = 42 - raises(TypeError, operate, A()) + excinfo = raises(TypeError, operate, A()) + assert "returned non-string (type 'int')" in str(excinfo.value) def test_missing_getattribute(self): class X(object): diff --git a/pypy/tool/cpyext/extbuild.py b/pypy/tool/cpyext/extbuild.py --- a/pypy/tool/cpyext/extbuild.py +++ b/pypy/tool/cpyext/extbuild.py @@ -204,6 +204,10 @@ pass from distutils.ccompiler import new_compiler from distutils import sysconfig + + # XXX for Darwin running old versions of CPython 2.7.x + sysconfig.get_config_vars() + compiler = new_compiler(force=1) sysconfig.customize_compiler(compiler) # XXX objects = [] diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -2,6 +2,7 @@ import types from collections import defaultdict +from contextlib import contextmanager from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair @@ -83,22 +84,17 @@ annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) - flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy) + with self.using_policy(policy): + flowgraph, inputs_s = self.get_call_parameters(function, args_s) if main_entry_point: self.translator.entry_point_graph = flowgraph return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now) - def get_call_parameters(self, function, args_s, policy): - desc = self.bookkeeper.getdesc(function) - prevpolicy = self.policy - self.policy = policy - self.bookkeeper.enter(None) - try: + def get_call_parameters(self, function, args_s): + with self.bookkeeper.at_position(None): + desc = self.bookkeeper.getdesc(function) return desc.get_call_parameters(args_s) - finally: - self.bookkeeper.leave() - self.policy = prevpolicy def annotate_helper(self, function, args_s, policy=None): if policy is None: @@ -107,21 +103,29 @@ # XXX hack annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) - graph, inputcells = self.get_call_parameters(function, args_s, policy) - self.build_graph_types(graph, inputcells, complete_now=False) - self.complete_helpers(policy) + with self.using_policy(policy): + graph, inputcells = self.get_call_parameters(function, args_s) + self.build_graph_types(graph, inputcells, complete_now=False) + self.complete_helpers() return graph - def complete_helpers(self, policy): - saved = self.policy, self.added_blocks - self.policy = policy + def complete_helpers(self): + saved = self.added_blocks + self.added_blocks = {} try: - self.added_blocks = {} self.complete() # invoke annotation simplifications for the new blocks self.simplify(block_subset=self.added_blocks) finally: - self.policy, self.added_blocks = saved + self.added_blocks = saved + + @contextmanager + def using_policy(self, policy): + """A context manager that temporarily replaces the annotator policy""" + old_policy = self.policy + self.policy = policy + yield + self.policy = old_policy def build_graph_types(self, flowgraph, inputcells, complete_now=True): checkgraph(flowgraph) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -9,6 +9,7 @@ from collections import OrderedDict from rpython.flowspace.model import Constant +from rpython.flowspace.bytecode import cpython_code_signature from rpython.annotator.model import ( SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, @@ -21,6 +22,7 @@ from rpython.annotator import description from rpython.annotator.signature import annotationoftype from rpython.annotator.argument import simple_args +from rpython.annotator.specialize import memo from rpython.rlib.objectmodel import r_dict, r_ordereddict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper import extregistry @@ -358,7 +360,7 @@ return self.descs[obj_key] except KeyError: if isinstance(pyobj, types.FunctionType): - result = description.FunctionDesc(self, pyobj) + result = self.newfuncdesc(pyobj) elif isinstance(pyobj, (type, types.ClassType)): if pyobj is object: raise Exception("ClassDesc for object not supported") @@ -403,6 +405,23 @@ self.descs[obj_key] = result return result + def newfuncdesc(self, pyfunc): + name = pyfunc.__name__ + if hasattr(pyfunc, '_generator_next_method_of_'): + from rpython.flowspace.argument import Signature + signature = Signature(['entry']) # haaaaaack + defaults = () + else: + signature = cpython_code_signature(pyfunc.func_code) + defaults = pyfunc.func_defaults + # get the specializer based on the tag of the 'pyobj' + # (if any), according to the current policy + tag = getattr(pyfunc, '_annspecialcase_', None) + specializer = self.annotator.policy.get_specializer(tag) + if specializer is memo: + return description.MemoDesc(self, pyfunc, name, signature, defaults, specializer) + return description.FunctionDesc(self, pyfunc, name, signature, defaults, specializer) + def getfrozen(self, pyobj): return description.FrozenDesc(self, pyobj) diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -608,7 +608,7 @@ if mixin: # make a new copy of the FunctionDesc for this class, # but don't specialize further for all subclasses - funcdesc = FunctionDesc(self.bookkeeper, value) + funcdesc = self.bookkeeper.newfuncdesc(value) self.classdict[name] = funcdesc return # NB. if value is, say, AssertionError.__init__, then we diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -3,11 +3,10 @@ from rpython.annotator.signature import ( enforce_signature_args, enforce_signature_return, finish_type) from rpython.flowspace.model import FunctionGraph -from rpython.flowspace.bytecode import cpython_code_signature from rpython.annotator.argument import rawshape, ArgErr, simple_args from rpython.tool.sourcetools import valid_identifier from rpython.tool.pairtype import extendabletype -from rpython.annotator.model import AnnotatorError, s_ImpossibleValue +from rpython.annotator.model import AnnotatorError, s_ImpossibleValue, unionof class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -117,7 +116,6 @@ self.s_value = s_ImpossibleValue # union of possible values def update(self, other): - from rpython.annotator.model import unionof self.descs.update(other.descs) self.read_locations.update(other.read_locations) self.s_value = unionof(self.s_value, other.s_value) @@ -192,24 +190,12 @@ class FunctionDesc(Desc): knowntype = types.FunctionType - def __init__(self, bookkeeper, pyobj=None, - name=None, signature=None, defaults=None, + def __init__(self, bookkeeper, pyobj, name, signature, defaults, specializer=None): super(FunctionDesc, self).__init__(bookkeeper, pyobj) - if name is None: - name = pyobj.func_name - if signature is None: - if hasattr(pyobj, '_generator_next_method_of_'): - from rpython.flowspace.argument import Signature - signature = Signature(['entry']) # haaaaaack - defaults = () - else: - signature = cpython_code_signature(pyobj.func_code) - if defaults is None: - defaults = pyobj.func_defaults self.name = name self.signature = signature - self.defaults = defaults or () + self.defaults = defaults if defaults is not None else () # 'specializer' is a function with the following signature: # specializer(funcdesc, args_s) => graph # or => s_result (overridden/memo cases) @@ -288,12 +274,43 @@ getattr(self.bookkeeper, "position_key", None) is not None): _, block, i = self.bookkeeper.position_key op = block.operations[i] - if self.specializer is None: - # get the specializer based on the tag of the 'pyobj' - # (if any), according to the current policy - tag = getattr(self.pyobj, '_annspecialcase_', None) - policy = self.bookkeeper.annotator.policy - self.specializer = policy.get_specializer(tag) + self.normalize_args(inputcells) + if getattr(self.pyobj, '_annspecialcase_', '').endswith("call_location"): + return self.specializer(self, inputcells, op) + else: + return self.specializer(self, inputcells) + + def pycall(self, whence, args, s_previous_result, op=None): + inputcells = self.parse_arguments(args) + graph = self.specialize(inputcells, op) + assert isinstance(graph, FunctionGraph) + # if that graph has a different signature, we need to re-parse + # the arguments. + # recreate the args object because inputcells may have been changed + new_args = args.unmatch_signature(self.signature, inputcells) + inputcells = self.parse_arguments(new_args, graph) + annotator = self.bookkeeper.annotator + result = annotator.recursivecall(graph, whence, inputcells) + signature = getattr(self.pyobj, '_signature_', None) + if signature: + sigresult = enforce_signature_return(self, signature[1], result) + if sigresult is not None: + annotator.addpendingblock( + graph, graph.returnblock, [sigresult]) + result = sigresult + # Some specializations may break the invariant of returning + # annotations that are always more general than the previous time. + # We restore it here: + result = unionof(result, s_previous_result) + return result + + def normalize_args(self, inputs_s): + """ + Canonicalize argument annotations into the exact parameter + annotations of a specific specialized graph. + + Note: this method has no return value but mutates its argument instead. + """ enforceargs = getattr(self.pyobj, '_annenforceargs_', None) signature = getattr(self.pyobj, '_signature_', None) if enforceargs and signature: @@ -304,39 +321,9 @@ from rpython.annotator.signature import Sig enforceargs = Sig(*enforceargs) self.pyobj._annenforceargs_ = enforceargs - enforceargs(self, inputcells) # can modify inputcells in-place + enforceargs(self, inputs_s) # can modify inputs_s in-place if signature: - enforce_signature_args(self, signature[0], inputcells) # mutates inputcells - if getattr(self.pyobj, '_annspecialcase_', '').endswith("call_location"): - return self.specializer(self, inputcells, op) - else: - return self.specializer(self, inputcells) - - def pycall(self, whence, args, s_previous_result, op=None): - inputcells = self.parse_arguments(args) - result = self.specialize(inputcells, op) - if isinstance(result, FunctionGraph): - graph = result # common case - annotator = self.bookkeeper.annotator - # if that graph has a different signature, we need to re-parse - # the arguments. - # recreate the args object because inputcells may have been changed - new_args = args.unmatch_signature(self.signature, inputcells) - inputcells = self.parse_arguments(new_args, graph) - result = annotator.recursivecall(graph, whence, inputcells) - signature = getattr(self.pyobj, '_signature_', None) - if signature: - sigresult = enforce_signature_return(self, signature[1], result) - if sigresult is not None: - annotator.addpendingblock( - graph, graph.returnblock, [sigresult]) - result = sigresult - # Some specializations may break the invariant of returning - # annotations that are always more general than the previous time. - # We restore it here: - from rpython.annotator.model import unionof - result = unionof(result, s_previous_result) - return result + enforce_signature_args(self, signature[0], inputs_s) # mutates inputs_s def get_graph(self, args, op): inputs_s = self.parse_arguments(args) @@ -405,6 +392,16 @@ return s_sigs +class MemoDesc(FunctionDesc): + def pycall(self, whence, args, s_previous_result, op=None): + inputcells = self.parse_arguments(args) + s_result = self.specialize(inputcells, op) + if isinstance(s_result, FunctionGraph): + s_result = s_result.getreturnvar().annotation + s_result = unionof(s_result, s_previous_result) + return s_result + + class MethodDesc(Desc): knowntype = types.MethodType diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -3,11 +3,13 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.tool.algo.unionfind import UnionFind -from rpython.flowspace.model import Block, Link, Variable, SpaceOperation +from rpython.flowspace.model import Block, Link, Variable from rpython.flowspace.model import checkgraph from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.flowspace.argument import Signature +from rpython.annotator.model import SomePBC, SomeImpossibleValue, SomeBool +from rpython.annotator.model import unionof def flatten_star_args(funcdesc, args_s): argnames, vararg, kwarg = funcdesc.signature @@ -127,7 +129,6 @@ def finish(self): if self.do_not_process: return - from rpython.annotator.model import unionof assert self.graph is None, "MemoTable already finished" # list of which argument positions can take more than one value example_args, example_value = self.table.iteritems().next() @@ -246,34 +247,36 @@ args_s.append(unionof(*values_s)) annotator.addpendinggraph(self.graph, args_s) +def all_values(s): + """Return the exhaustive list of possible values matching annotation `s`. -def memo(funcdesc, arglist_s): - from rpython.annotator.model import SomePBC, SomeImpossibleValue, SomeBool - from rpython.annotator.model import unionof + Raises `AnnotatorError` if no such (reasonably small) finite list exists. + """ + if s.is_constant(): + return [s.const] + elif isinstance(s, SomePBC): + values = [] + assert not s.can_be_None, "memo call: cannot mix None and PBCs" + for desc in s.descriptions: + if desc.pyobj is None: + raise annmodel.AnnotatorError( + "memo call with a class or PBC that has no " + "corresponding Python object (%r)" % (desc,)) + values.append(desc.pyobj) + return values + elif isinstance(s, SomeImpossibleValue): + return [] + elif isinstance(s, SomeBool): + return [False, True] + else: + raise annmodel.AnnotatorError("memo call: argument must be a class " + "or a frozen PBC, got %r" % (s,)) + +def memo(funcdesc, args_s): # call the function now, and collect possible results - argvalues = [] - for s in arglist_s: - if s.is_constant(): - values = [s.const] - elif isinstance(s, SomePBC): - values = [] - assert not s.can_be_None, "memo call: cannot mix None and PBCs" - for desc in s.descriptions: - if desc.pyobj is None: - raise annmodel.AnnotatorError( - "memo call with a class or PBC that has no " - "corresponding Python object (%r)" % (desc,)) - values.append(desc.pyobj) - elif isinstance(s, SomeImpossibleValue): - return s # we will probably get more possible args later - elif isinstance(s, SomeBool): - values = [False, True] - else: - raise annmodel.AnnotatorError("memo call: argument must be a class " - "or a frozen PBC, got %r" % (s,)) - argvalues.append(values) + # the list of all possible tuples of arguments to give to the memo function - possiblevalues = cartesian_product(argvalues) + possiblevalues = cartesian_product([all_values(s_arg) for s_arg in args_s]) # a MemoTable factory -- one MemoTable per family of arguments that can # be called together, merged via a UnionFind. diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -268,12 +268,15 @@ """ mc = InstrBuilder(self.cpu.cpuinfo.arch_version) # - self._push_all_regs_to_jitframe(mc, [], self.cpu.supports_floats, callee_only) + # We don't save/restore r4; instead the return value (if any) + # will be stored there. + self._push_all_regs_to_jitframe(mc, [r.r4], self.cpu.supports_floats, callee_only) ## args are in their respective positions mc.PUSH([r.ip.value, r.lr.value]) mc.BLX(r.r4.value) + mc.MOV_rr(r.r4.value, r.r0.value) self._reload_frame_if_necessary(mc) - self._pop_all_regs_from_jitframe(mc, [], supports_floats, + self._pop_all_regs_from_jitframe(mc, [r.r4], supports_floats, callee_only) # return mc.POP([r.ip.value, r.pc.value]) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -357,7 +357,13 @@ return fcond def emit_op_cond_call(self, op, arglocs, regalloc, fcond): - [call_loc] = arglocs + call_loc = arglocs[0] + if len(arglocs) == 2: + res_loc = arglocs[1] # cond_call_value + else: + res_loc = None # cond_call + # useless to list res_loc in the gcmap, because if the call is + # done it means res_loc was initially NULL gcmap = regalloc.get_gcmap([call_loc]) assert call_loc is r.r4 @@ -378,8 +384,13 @@ floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] self.mc.BL(cond_call_adr) + # if this is a COND_CALL_VALUE, we need to move the result in place + # from its current location (which is, unusually, in r4: see + # cond_call_slowpath) + if res_loc is not None and res_loc is not r.r4: + self.mc.MOV_rr(res_loc.value, r.r4.value) + # self.pop_gcmap(self.mc) - # never any result value cond = c.get_opposite_of(self.guard_success_cc) self.guard_success_cc = c.cond_none pmc = OverwritingBuilder(self.mc, jmp_adr, WORD) @@ -389,6 +400,9 @@ self.previous_cond_call_jcond = jmp_adr, cond return fcond + emit_op_cond_call_value_i = emit_op_cond_call + emit_op_cond_call_value_r = emit_op_cond_call + def emit_op_jump(self, op, arglocs, regalloc, fcond): target_token = op.getdescr() assert isinstance(target_token, TargetToken) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1004,7 +1004,6 @@ def prepare_op_cond_call(self, op, fcond): # XXX don't force the arguments to be loaded in specific # locations before knowing if we can take the fast path - # XXX add cond_call_value support assert 2 <= op.numargs() <= 4 + 2 tmpreg = self.get_scratch_reg(INT, selected_reg=r.r4) v = op.getarg(1) @@ -1017,8 +1016,33 @@ arg = op.getarg(i) self.make_sure_var_in_reg(arg, args_so_far, selected_reg=reg) args_so_far.append(arg) - self.load_condition_into_cc(op.getarg(0)) - return [tmpreg] + + if op.type == 'v': + # a plain COND_CALL. Calls the function when args[0] is + # true. Often used just after a comparison operation. + self.load_condition_into_cc(op.getarg(0)) + return [tmpreg] + else: + # COND_CALL_VALUE_I/R. Calls the function when args[0] + # is equal to 0 or NULL. Returns the result from the + # function call if done, or args[0] if it was not 0/NULL. + # Implemented by forcing the result to live in the same + # register as args[0], and overwriting it if we really do + # the call. + + # Load the register for the result. Possibly reuse 'args[0]'. + # But the old value of args[0], if it survives, is first + # spilled away. We can't overwrite any of op.args[2:] here. + args = op.getarglist() + resloc = self.rm.force_result_in_reg(op, args[0], + forbidden_vars=args[2:]) + # Test the register for the result. + self.assembler.mc.CMP_ri(resloc.value, 0) + self.assembler.guard_success_cc = c.EQ + return [tmpreg, resloc] + + prepare_op_cond_call_value_i = prepare_op_cond_call + prepare_op_cond_call_value_r = prepare_op_cond_call def prepare_op_force_token(self, op, fcond): # XXX for now we return a regular reg diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -15,11 +15,12 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import hlstr, hlunicode from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong, intmask -from rpython.rlib.objectmodel import Symbolic +from rpython.rlib.objectmodel import Symbolic, compute_hash class LLAsmInfo(object): def __init__(self, lltrace): @@ -326,7 +327,6 @@ supports_longlong = r_uint is not r_ulonglong supports_singlefloats = True supports_guard_gc_type = True - supports_cond_call_value = True translate_support_code = False is_llgraph = True vector_ext = VectorExt() @@ -789,6 +789,10 @@ assert 0 <= dststart <= dststart + length <= len(dst.chars) rstr.copy_string_contents(src, dst, srcstart, dststart, length) + def bh_strhash(self, s): + lls = s._obj.container + return compute_hash(hlstr(lls._as_ptr())) + def bh_newunicode(self, length): return lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(rstr.UNICODE, length, @@ -811,6 +815,10 @@ assert 0 <= dststart <= dststart + length <= len(dst.chars) rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) + def bh_unicodehash(self, s): + lls = s._obj.container + return compute_hash(hlunicode(lls._as_ptr())) + def bh_new(self, sizedescr): return lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(sizedescr.S, zero=True)) @@ -1120,7 +1128,7 @@ value = sum(value) elif info.accum_operation == '*': def prod(acc, x): return acc * x - value = reduce(prod, value, 1) + value = reduce(prod, value, 1.0) else: raise NotImplementedError("accum operator in fail guard") values[i] = value diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -3,8 +3,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator +from rpython.rtyper.annlowlevel import hlstr, hlunicode from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rlib.objectmodel import we_are_translated, specialize, compute_hash from rpython.jit.metainterp import history, compile from rpython.jit.metainterp.optimize import SpeculativeError from rpython.jit.codewriter import heaptracker, longlong @@ -663,6 +664,14 @@ u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) return len(u.chars) + def bh_strhash(self, string): + s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) + return compute_hash(hlstr(s)) + + def bh_unicodehash(self, string): + u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string) + return compute_hash(hlunicode(u)) + def bh_strgetitem(self, string, index): s = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), string) return ord(s.chars[index]) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -9,9 +9,9 @@ from rpython.jit.metainterp.typesystem import rd_eq, rd_hash from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import (WORD, - get_array_token) + get_field_token, get_array_token) from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr,\ - FLAG_POINTER, CallDescr + FLAG_POINTER from rpython.jit.metainterp.history import JitCellToken from rpython.jit.backend.llsupport.descr import (unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) @@ -262,6 +262,18 @@ self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) + elif opnum == rop.STRHASH: + offset, size = get_field_token(rstr.STR, + 'hash', self.cpu.translate_support_code) + assert size == WORD + self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), + WORD, 1, offset, sign=True) + elif opnum == rop.UNICODEHASH: + offset, size = get_field_token(rstr.UNICODE, + 'hash', self.cpu.translate_support_code) + assert size == WORD + self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), + WORD, 1, offset, sign=True) elif opnum == rop.STRGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) @@ -347,9 +359,7 @@ self.consider_setfield_gc(op) elif op.getopnum() == rop.SETARRAYITEM_GC: self.consider_setarrayitem_gc(op) - # ---------- calls ----------- - if OpHelpers.is_plain_call(op.getopnum()): - self.expand_call_shortcut(op) + # ---------- call assembler ----------- if OpHelpers.is_call_assembler(op.getopnum()): self.handle_call_assembler(op) continue @@ -595,33 +605,6 @@ self.emit_gc_store_or_indexed(None, ptr, ConstInt(0), value, size, 1, ofs) - def expand_call_shortcut(self, op): - if not self.cpu.supports_cond_call_value: - return - descr = op.getdescr() - if descr is None: - return - assert isinstance(descr, CallDescr) - effectinfo = descr.get_extra_info() - if effectinfo is None or effectinfo.call_shortcut is None: - return - if op.type == 'r': - cond_call_opnum = rop.COND_CALL_VALUE_R - elif op.type == 'i': - cond_call_opnum = rop.COND_CALL_VALUE_I - else: - return - cs = effectinfo.call_shortcut - ptr_box = op.getarg(1 + cs.argnum) - if cs.fielddescr is not None: - value_box = self.emit_getfield(ptr_box, descr=cs.fielddescr, - raw=(ptr_box.type == 'i')) - else: - value_box = ptr_box - self.replace_op_with(op, ResOperation(cond_call_opnum, - [value_box] + op.getarglist(), - descr=descr)) - def handle_call_assembler(self, op): descrs = self.gc_ll_descr.getframedescrs(self.cpu) loop_token = op.getdescr() diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1,8 +1,7 @@ import py from rpython.jit.backend.llsupport.descr import get_size_descr,\ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescr, get_interiorfield_descr, get_call_descr -from rpython.jit.codewriter.effectinfo import EffectInfo, CallShortcut + SizeDescr, get_interiorfield_descr from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ GcLLDescr_framework from rpython.jit.backend.llsupport import jitframe @@ -81,21 +80,6 @@ lltype.malloc(T, zero=True)) self.myT = myT # - call_shortcut = CallShortcut(0, tzdescr) - effectinfo = EffectInfo(None, None, None, None, None, None, - EffectInfo.EF_RANDOM_EFFECTS, - call_shortcut=call_shortcut) - call_shortcut_descr = get_call_descr(self.gc_ll_descr, - [lltype.Ptr(T)], lltype.Signed, - effectinfo) - call_shortcut_2 = CallShortcut(0, None) - effectinfo_2 = EffectInfo(None, None, None, None, None, None, - EffectInfo.EF_RANDOM_EFFECTS, - call_shortcut=call_shortcut_2) - call_shortcut_descr_2 = get_call_descr(self.gc_ll_descr, - [lltype.Signed], lltype.Signed, - effectinfo_2) - # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) adescr.tid = 4321 @@ -216,7 +200,6 @@ load_constant_offset = True load_supported_factors = (1,2,4,8) - supports_cond_call_value = True translate_support_code = None @@ -1239,6 +1222,10 @@ 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], + [True, (1,), 'i3 = strhash(p0)' '->' + 'i3 = gc_load_i(p0,' + '%(strhashdescr.offset)s,' + '-%(strhashdescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(unicodelendescr.offset)s,' @@ -1247,7 +1234,10 @@ 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], - + [True, (1,), 'i3 = unicodehash(p0)' '->' + 'i3 = gc_load_i(p0,' + '%(unicodehashdescr.offset)s,' + '-%(unicodehashdescr.field_size)s)'], ## getitem str/unicode [True, (2,4), 'i3 = unicodegetitem(p0,i1)' '->' 'i3 = gc_load_indexed_i(p0,i1,' From pypy.commits at gmail.com Fri Dec 9 07:28:22 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 09 Dec 2016 04:28:22 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: progress - not yet assigning the {g, s}etter. Then generalize to all w_dict value usage Message-ID: <584aa366.4dd41c0a.c435f.c89b@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r88986:d8c3c1c7812f Date: 2016-12-09 14:26 +0200 http://bitbucket.org/pypy/pypy/changeset/d8c3c1c7812f/ Log: progress - not yet assigning the {g,s}etter. Then generalize to all w_dict value usage diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -11,7 +11,7 @@ make_typedescr, track_reference, create_ref, from_ref, Py_DecRef, Py_IncRef) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.typeobject import W_GetSetPropertyEx +from pypy.module.cpyext.typeobject import W_GetSetPropertyEx, make_GetSet PyDictObjectStruct = lltype.ForwardReference() PyDictObject = lltype.Ptr(PyDictObjectStruct) @@ -264,13 +264,15 @@ if pkey: pkey[0] = as_pyobj(space, w_key) if pvalue: - if 0 and isinstance(w_value, GetSetProperty): - # XXX implement this method for all W_Dict storage strategies - w_type = w_dict.get_storage().get_original_type_object_if_classdict() - # XXX doesn't quite work, need to convert GetSetProperty - # to PyGetSetDef, with c_name, c_get, c_set, c_doc, c_closure - # Do this by calling a make_typedescr(GetSetProperty)? - py_getsetdef = as_pyobj(space, w_value) + if isinstance(w_value, GetSetProperty): + strategy = w_dict.get_strategy() + # for translation + assert isinstance(strategy, ClassDictStrategy) + w_type = strategy.unerase(w_dict.get_storage()) + assert space.isinstance_w(w_type, space.w_type) + #XXX Could this by calling a make_typedescr(GetSetProperty), + # but how to feed in w_type? + py_getsetdef = make_GetSet(space, w_value) w_value = W_GetSetPropertyEx(py_getsetdef, w_type) pvalue[0] = as_pyobj(space, w_value) return 1 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -32,7 +32,7 @@ from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( - PyGetSetDef, PyMemberDef, newfunc, + PyGetSetDef, PyMemberDef, newfunc, getter, setter, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.objspace.std.typeobject import W_TypeObject, find_best_base @@ -61,6 +61,7 @@ self.w_type = w_type doc = set = get = None if doc: + # XXX dead code? doc = rffi.charp2str(getset.c_doc) if getset.c_get: get = GettersAndSetters.getter.im_func @@ -73,6 +74,21 @@ def PyDescr_NewGetSet(space, getset, w_type): return space.wrap(W_GetSetPropertyEx(getset, w_type)) +def make_GetSet(space, getsetprop): + py_getsetdef = lltype.malloc(PyGetSetDef, flavor='raw') + doc = getsetprop.doc + if doc: + py_getsetdef.c_doc = rffi.str2charp(doc) + else: + py_getsetdef.c_doc = rffi.cast(rffi.CCHARP, 0) + py_getsetdef.c_name = rffi.str2charp(getsetprop.getname(space)) + # XXX FIXME - actually assign these !!! + py_getsetdef.c_get = rffi.cast(getter, 0) + py_getsetdef.c_set = rffi.cast(setter, 0) + py_getsetdef.c_closure = rffi.cast(rffi.VOIDP, 0) + return py_getsetdef + + class W_MemberDescr(GetSetProperty): name = 'member_descriptor' def __init__(self, member, w_type): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -356,6 +356,9 @@ if strategy is not object_strategy: strategy.switch_to_object_strategy(self) + def get_storage(self): # for getting the w_type from a ClassDictStrategy + return self.dstorage + class W_DictObject(W_DictMultiObject): """ a regular dict object """ From pypy.commits at gmail.com Fri Dec 9 08:14:49 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 05:14:49 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: fixed some more tests Message-ID: <584aae49.876ec20a.35ac6.44a0@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88987:d9a749dfce80 Date: 2016-12-09 14:14 +0100 http://bitbucket.org/pypy/pypy/changeset/d9a749dfce80/ Log: fixed some more tests diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -83,7 +83,7 @@ def __init__(self, value): self.value = value - self.readonly = True + self.readonly = 1 def getlength(self): return len(self.value) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -755,49 +755,6 @@ SIGNEDP = lltype.Ptr(lltype.Array(SIGNED, hints={'nolength': True})) -class RawBytes(object): - # literal copy of _cffi_backend/func.py - def __init__(self, string): - self.ptr = str2charp(string, track_allocation=False) - def __del__(self): - free_charp(self.ptr, track_allocation=False) - -from rpython.rlib import rweakref -from rpython.rlib.buffer import Buffer -_STR_WDICT = rweakref.RWeakKeyDictionary(Buffer, RawBytes) - - at jit.dont_look_inside -def get_raw_address_of_string(key, string): - """Returns a 'char *' that is valid as long as the key object is alive. - Two calls to to this function are guaranteed to return the same pointer. - - The extra parameter key is necessary to create a weak reference. - The buffer of the returned pointer (if object is young) lives as long - as key is alive. If key goes out of scope, the buffer will eventually - be freed. `string` cannot go out of scope until the RawBytes object - referencing it goes out of scope. - """ - from rpython.rtyper.annlowlevel import llstr - from rpython.rtyper.lltypesystem.rstr import STR - from rpython.rtyper.lltypesystem import llmemory - from rpython.rlib import rgc - - global _STR_WDICT - rawbytes = _STR_WDICT.get(key) - if rawbytes is None: - if we_are_translated() and not rgc.can_move(string): - lldata = llstr(string) - data_start = (llmemory.cast_ptr_to_adr(lldata) + - offsetof(STR, 'chars') + - llmemory.itemoffsetof(STR.chars, 0)) - data_start = cast(CCHARP, data_start) - data_start[len(string)] = '\x00' # write the final extra null - return data_start - rawbytes = RawBytes(string) - _STR_WDICT.set(key, rawbytes) - - return rawbytes.ptr - # various type mapping # conversions between str and char* @@ -1355,3 +1312,47 @@ releasegil=False, calling_conv='c', ) + +class RawBytes(object): + # literal copy of _cffi_backend/func.py + def __init__(self, string): + self.ptr = str2charp(string, track_allocation=False) + def __del__(self): + free_charp(self.ptr, track_allocation=False) + +from rpython.rlib import rweakref +from rpython.rlib.buffer import Buffer +_STR_WDICT = rweakref.RWeakKeyDictionary(Buffer, RawBytes) + + at jit.dont_look_inside +def get_raw_address_of_string(key, string): + """Returns a 'char *' that is valid as long as the key object is alive. + Two calls to to this function are guaranteed to return the same pointer. + + The extra parameter key is necessary to create a weak reference. + The buffer of the returned pointer (if object is young) lives as long + as key is alive. If key goes out of scope, the buffer will eventually + be freed. `string` cannot go out of scope until the RawBytes object + referencing it goes out of scope. + """ + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import STR + from rpython.rtyper.lltypesystem import llmemory + from rpython.rlib import rgc + + global _STR_WDICT + rawbytes = _STR_WDICT.get(key) + if rawbytes is None: + if we_are_translated() and not rgc.can_move(string): + lldata = llstr(string) + data_start = (llmemory.cast_ptr_to_adr(lldata) + + offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + data_start = cast(CCHARP, data_start) + data_start[len(string)] = '\x00' # write the final extra null + return data_start + rawbytes = RawBytes(string) + _STR_WDICT.set(key, rawbytes) + + return rawbytes.ptr + From pypy.commits at gmail.com Fri Dec 9 09:52:34 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 06:52:34 -0800 (PST) Subject: [pypy-commit] cffi strbuf-as-buffer: pypy now can get a raw address from a StringBuffer, modify test Message-ID: <584ac532.e6b0c20a.2bab1.6e24@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r2831:a00f88374679 Date: 2016-12-09 15:52 +0100 http://bitbucket.org/cffi/cffi/changeset/a00f88374679/ Log: pypy now can get a raw address from a StringBuffer, modify test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3428,22 +3428,28 @@ assert p1 == from_buffer(BCharA, b"foo") import gc; gc.collect() assert p1 == from_buffer(BCharA, b"foo") - py.test.raises(TypeError, from_buffer, BCharA, u+"foo") try: from __builtin__ import buffer except ImportError: - pass + # python3 does not allow a buffer from unicode! + raises(TypeError, from_buffer, BCharA, u+"foo") else: - # from_buffer(buffer(b"foo")) does not work, because it's not - # implemented on pypy; only from_buffer(b"foo") works. - py.test.raises(TypeError, from_buffer, BCharA, buffer(b"foo")) - py.test.raises(TypeError, from_buffer, BCharA, buffer(u+"foo")) + contents = from_buffer(BCharA, buffer(b"foo")) + for i in range(len(contents)): + assert contents[i] == p1[i] + p4 = from_buffer(BCharA, u+"foo") + contents = from_buffer(BCharA, buffer(u+"foo")) + for i in range(len(contents)): + assert contents[i] == p4[i] try: from __builtin__ import memoryview except ImportError: pass else: - py.test.raises(TypeError, from_buffer, BCharA, memoryview(b"foo")) + contents = from_buffer(BCharA, memoryview(b"foo")) + for i in range(len(contents)): + assert contents[i] == p1[i] + def test_from_buffer_bytearray(): a = bytearray(b"xyz") From pypy.commits at gmail.com Fri Dec 9 10:23:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 07:23:16 -0800 (PST) Subject: [pypy-commit] cffi strbuf-as-buffer: allow from_buffer for buffer and memoryview even when they point to bytes/unicode Message-ID: <584acc64.e5ebc20a.27b99.0877@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r2832:f92733f03d32 Date: 2016-12-09 16:22 +0100 http://bitbucket.org/cffi/cffi/changeset/f92733f03d32/ Log: allow from_buffer for buffer and memoryview even when they point to bytes/unicode diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6110,24 +6110,13 @@ #if PY_MAJOR_VERSION < 3 if (PyBuffer_Check(x)) { - /* XXX fish fish fish in an inofficial way */ - typedef struct { - PyObject_HEAD - PyObject *b_base; - } _my_PyBufferObject; - - _my_PyBufferObject *b = (_my_PyBufferObject *)x; - x = b->b_base; - if (x == NULL) - return 0; + return 0; } else #endif #if PY_MAJOR_VERSION > 2 || PY_MINOR_VERSION > 6 if (PyMemoryView_Check(x)) { - x = PyMemoryView_GET_BASE(x); - if (x == NULL) - return 0; + return 0; } else #endif diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3426,18 +3426,16 @@ BCharA = new_array_type(BCharP, None) p1 = from_buffer(BCharA, b"foo") assert p1 == from_buffer(BCharA, b"foo") - import gc; gc.collect() - assert p1 == from_buffer(BCharA, b"foo") try: from __builtin__ import buffer except ImportError: - # python3 does not allow a buffer from unicode! - raises(TypeError, from_buffer, BCharA, u+"foo") + # python3 does not allow from to get buffer from unicode! + py.test.raises(TypeError, from_buffer, BCharA, u+"foo") else: contents = from_buffer(BCharA, buffer(b"foo")) for i in range(len(contents)): assert contents[i] == p1[i] - p4 = from_buffer(BCharA, u+"foo") + p4 = from_buffer(BCharA, b"f\x00\x00\x00o\x00\x00\x00o\x00\x00\x00") contents = from_buffer(BCharA, buffer(u+"foo")) for i in range(len(contents)): assert contents[i] == p4[i] From pypy.commits at gmail.com Fri Dec 9 10:24:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 07:24:44 -0800 (PST) Subject: [pypy-commit] cffi strbuf-as-buffer: do not allow unicode to directly be passed to from_buffer Message-ID: <584accbc.c9b3c20a.3c7fe.8d9f@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r2833:d2c09e27ecde Date: 2016-12-09 16:24 +0100 http://bitbucket.org/cffi/cffi/changeset/d2c09e27ecde/ Log: do not allow unicode to directly be passed to from_buffer diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3426,11 +3426,11 @@ BCharA = new_array_type(BCharP, None) p1 = from_buffer(BCharA, b"foo") assert p1 == from_buffer(BCharA, b"foo") + py.test.raises(TypeError, from_buffer, BCharA, u+"foo") try: from __builtin__ import buffer except ImportError: - # python3 does not allow from to get buffer from unicode! - py.test.raises(TypeError, from_buffer, BCharA, u+"foo") + pass else: contents = from_buffer(BCharA, buffer(b"foo")) for i in range(len(contents)): From pypy.commits at gmail.com Fri Dec 9 11:04:44 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 09 Dec 2016 08:04:44 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: copy over test_c.py to _backend_test_c.py (of the strbuf-as-buffer branch) Message-ID: <584ad61c.0a4cc20a.cf0fa.8dae@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r88988:c78e1e9c9ed9 Date: 2016-12-09 17:04 +0100 http://bitbucket.org/pypy/pypy/changeset/c78e1e9c9ed9/ Log: copy over test_c.py to _backend_test_c.py (of the strbuf-as-buffer branch) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -135,7 +135,16 @@ # return _from_buffer(space, w_ctype, w_x) +def invalid_input_buffer_type(space, w_x): + if space.isinstance_w(w_x, space.w_unicode): + return True + return False + def _from_buffer(space, w_ctype, w_x): + if invalid_input_buffer_type(space, w_x): + raise oefmt(space.w_TypeError, + "from_buffer() cannot return the address of the " + "raw string within a str or unicode object") buf = _fetch_as_read_buffer(space, w_x) if space.isinstance_w(w_x, space.w_str): _cdata = get_raw_address_of_string(space, w_x) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3415,18 +3415,16 @@ BCharA = new_array_type(BCharP, None) p1 = from_buffer(BCharA, b"foo") assert p1 == from_buffer(BCharA, b"foo") - import gc; gc.collect() - assert p1 == from_buffer(BCharA, b"foo") + py.test.raises(TypeError, from_buffer, BCharA, u+"foo") try: from __builtin__ import buffer except ImportError: - # python3 does not allow from to get buffer from unicode! - raises(TypeError, from_buffer, BCharA, u+"foo") + pass else: - p4 = from_buffer(BCharA, u+"foo") contents = from_buffer(BCharA, buffer(b"foo")) for i in range(len(contents)): assert contents[i] == p1[i] + p4 = from_buffer(BCharA, b"f\x00\x00\x00o\x00\x00\x00o\x00\x00\x00") contents = from_buffer(BCharA, buffer(u+"foo")) for i in range(len(contents)): assert contents[i] == p4[i] @@ -3439,6 +3437,7 @@ for i in range(len(contents)): assert contents[i] == p1[i] + def test_from_buffer_bytearray(): a = bytearray(b"xyz") BChar = new_primitive_type("char") diff --git a/pypy/objspace/std/test/test_bufferobject.py b/pypy/objspace/std/test/test_bufferobject.py --- a/pypy/objspace/std/test/test_bufferobject.py +++ b/pypy/objspace/std/test/test_bufferobject.py @@ -199,7 +199,9 @@ raises(TypeError, "buf[MyInt(0):MyInt(5)]") def test_pypy_raw_address_base(self): - raises(ValueError, buffer("foobar")._pypy_raw_address) - raises(ValueError, buffer(u"foobar")._pypy_raw_address) - a = buffer(bytearray("foobar"))._pypy_raw_address() + a = buffer("foobar")._pypy_raw_address() assert a != 0 + b = buffer(u"foobar")._pypy_raw_address() + assert b != 0 + c = buffer(bytearray("foobar"))._pypy_raw_address() + assert c != 0 diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -56,6 +56,7 @@ assert u"abc" != memoryview("abc") def test_pypy_raw_address_base(self): - raises(ValueError, memoryview("foobar")._pypy_raw_address) - a = memoryview(bytearray("foobar"))._pypy_raw_address() + a = memoryview("foobar")._pypy_raw_address() assert a != 0 + b = memoryview(bytearray("foobar"))._pypy_raw_address() + assert b != 0 From pypy.commits at gmail.com Fri Dec 9 14:23:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 09 Dec 2016 11:23:17 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Update C implementations of PyObject_GetBuffer/PyBuffer_Release to match CPython 3.5 Message-ID: <584b04a5.06891c0a.e1974.7d6f@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88989:5a3b35c71677 Date: 2016-12-09 19:22 +0000 http://bitbucket.org/pypy/pypy/changeset/5a3b35c71677/ Log: Update C implementations of PyObject_GetBuffer/PyBuffer_Release to match CPython 3.5 diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c --- a/pypy/module/cpyext/src/abstract.c +++ b/pypy/module/cpyext/src/abstract.c @@ -101,23 +101,29 @@ int PyObject_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (!PyObject_CheckBuffer(obj)) { + PyBufferProcs *pb = obj->ob_type->tp_as_buffer; + + if (pb == NULL || pb->bf_getbuffer == NULL) { PyErr_Format(PyExc_TypeError, - "'%100s' does not support the buffer interface", + "a bytes-like object is required, not '%.100s'", Py_TYPE(obj)->tp_name); return -1; } - return (*(obj->ob_type->tp_as_buffer->bf_getbuffer))(obj, view, flags); + return (*pb->bf_getbuffer)(obj, view, flags); } void PyBuffer_Release(Py_buffer *view) { PyObject *obj = view->obj; - if (obj && Py_TYPE(obj)->tp_as_buffer && Py_TYPE(obj)->tp_as_buffer->bf_releasebuffer) - Py_TYPE(obj)->tp_as_buffer->bf_releasebuffer(obj, view); - Py_XDECREF(obj); + PyBufferProcs *pb; + if (obj == NULL) + return; + pb = Py_TYPE(obj)->tp_as_buffer; + if (pb && pb->bf_releasebuffer) + pb->bf_releasebuffer(obj, view); view->obj = NULL; + Py_DECREF(obj); } /* Operations on callable objects */ From pypy.commits at gmail.com Fri Dec 9 23:14:56 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 09 Dec 2016 20:14:56 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Implement PyObject_GetBuffer in RPython Message-ID: <584b8140.913fc20a.ca4e8.915c@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88990:3b7a9aafcab8 Date: 2016-12-10 03:06 +0000 http://bitbucket.org/pypy/pypy/changeset/3b7a9aafcab8/ Log: Implement PyObject_GetBuffer in RPython diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -519,7 +519,7 @@ 'PyObject_CallMethod', 'PyObject_CallFunctionObjArgs', 'PyObject_CallMethodObjArgs', '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', - 'PyObject_GetBuffer', 'PyBuffer_Release', + 'PyBuffer_Release', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,17 +1,40 @@ -from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, make_ref, incref, from_ref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen +from pypy.interpreter.error import oefmt from pypy.objspace.std.memoryobject import W_MemoryView +from pypy.module.cpyext.api import ( + cpython_api, Py_buffer, CANNOT_FAIL, Py_MAX_FMT, Py_MAX_NDIMS, + build_type_checkers, Py_ssize_tP, generic_cpy_call) +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.import_ import PyImport_Import +from pypy.module.cpyext.buffer import CBuffer -from pypy.interpreter.error import oefmt -from pypy.module.cpyext.pyobject import PyObject, from_ref -from pypy.module.cpyext.buffer import CBuffer -from pypy.objspace.std.memoryobject import W_MemoryView PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") + + at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + rffi.INT_real, error=-1) +def PyObject_GetBuffer(space, exporter, view, flags): + """Send a request to exporter to fill in view as specified by flags. If the + exporter cannot provide a buffer of the exact type, it MUST raise + PyExc_BufferError, set view->obj to NULL and return -1. + + On success, fill in view, set view->obj to a new reference to exporter and + return 0. In the case of chained buffer providers that redirect requests + to a single object, view->obj MAY refer to this object instead of exporter. + + Successful calls to PyObject_GetBuffer() must be paired with calls to + PyBuffer_Release(), similar to malloc() and free(). Thus, after the + consumer is done with the buffer, PyBuffer_Release() must be called exactly + once. + """ + pb = exporter.c_ob_type.c_tp_as_buffer + if not pb or not pb.c_bf_getbuffer: + w_exporter = from_ref(space, exporter) + raise oefmt(space.w_TypeError, + "a bytes-like object is required, not '%T'", w_exporter) + return generic_cpy_call(space, pb.c_bf_getbuffer, exporter, view, flags) + def fill_Py_buffer(space, buf, view): # c_buf, c_obj have been filled in ndim = buf.getndim() @@ -148,4 +171,3 @@ rffi.setintfield(view, 'c_readonly', 1) isstr = True return view - diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c --- a/pypy/module/cpyext/src/abstract.c +++ b/pypy/module/cpyext/src/abstract.c @@ -98,20 +98,6 @@ /* Buffer C-API for Python 3.0 */ -int -PyObject_GetBuffer(PyObject *obj, Py_buffer *view, int flags) -{ - PyBufferProcs *pb = obj->ob_type->tp_as_buffer; - - if (pb == NULL || pb->bf_getbuffer == NULL) { - PyErr_Format(PyExc_TypeError, - "a bytes-like object is required, not '%.100s'", - Py_TYPE(obj)->tp_name); - return -1; - } - return (*pb->bf_getbuffer)(obj, view, flags); -} - void PyBuffer_Release(Py_buffer *view) { From pypy.commits at gmail.com Fri Dec 9 23:14:58 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 09 Dec 2016 20:14:58 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix behaviour of PyObject_GetBuffer(, ...) Message-ID: <584b8142.d39a1c0a.18fa1.7f90@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r88991:1c85b8127b59 Date: 2016-12-10 03:58 +0000 http://bitbucket.org/pypy/pypy/changeset/1c85b8127b59/ Log: Fix behaviour of PyObject_GetBuffer(, ...) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -358,7 +358,10 @@ ptr = pybuf.c_buf size = pybuf.c_len ndim = widen(pybuf.c_ndim) - shape = [pybuf.c_shape[i] for i in range(ndim)] + if pybuf.c_shape: + shape = [pybuf.c_shape[i] for i in range(ndim)] + else: + shape = None if pybuf.c_strides: strides = [pybuf.c_strides[i] for i in range(ndim)] else: diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -3,13 +3,14 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.bytesobject import new_empty_str, PyBytesObject -from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call -from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref +from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call, Py_buffer +from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref, as_pyobj from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr import py import sys + class AppTestBytesObject(AppTestCpythonExtensionBase): def test_bytesobject(self): module = self.import_extension('foo', [ @@ -290,3 +291,10 @@ w_obj = space.wrap(u"test") assert api.PyBytes_FromObject(w_obj) is None api.PyErr_Clear() + + def test_suboffsets(self, space, api): + w_bytes = space.newbytes('1234') + view = lltype.malloc(Py_buffer, flavor='raw', zero=True) + flags = rffi.cast(rffi.INT_real, 0) + api.PyObject_GetBuffer(w_bytes, view, flags) + assert not view.c_suboffsets diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -505,10 +505,10 @@ header=None, error=-1) def bytes_getbuffer(space, w_str, view, flags): from pypy.module.cpyext.bytesobject import PyBytes_AsString - view.c_obj = make_ref(space, w_str) - view.c_buf = rffi.cast(rffi.VOIDP, PyBytes_AsString(space, view.c_obj)) - view.c_len = space.len_w(w_str) - return 0 + from pypy.module.cpyext.object import PyBuffer_FillInfo + c_buf = rffi.cast(rffi.VOIDP, PyBytes_AsString(space, w_str)) + return PyBuffer_FillInfo(space, view, w_str, c_buf, + space.len_w(w_str), 1, flags) def setup_bytes_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) From pypy.commits at gmail.com Sat Dec 10 09:15:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 06:15:33 -0800 (PST) Subject: [pypy-commit] pypy default: Test for unicodehelper.{decode_utf8, encode_utf8} Message-ID: <584c0e05.624fc20a.7231f.1fc1@mx.google.com> Author: Armin Rigo Branch: Changeset: r88992:81769ca3299e Date: 2016-12-10 15:09 +0100 http://bitbucket.org/pypy/pypy/changeset/81769ca3299e/ Log: Test for unicodehelper.{decode_utf8,encode_utf8} diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -0,0 +1,26 @@ +from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class FakeSpace: + pass + +def test_encode_utf8(): + space = FakeSpace() + assert encode_utf8(space, u"abc") == "abc" + assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" + assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + +def test_decode_utf8(): + space = FakeSpace() + assert decode_utf8(space, "abc") == u"abc" + assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" + assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -61,6 +61,8 @@ # Note that this function never raises UnicodeEncodeError, # since surrogate pairs are allowed. # This is not the case with Python3. + # Also, note that the two characters \d800\dc00 are considered as + # a paired surrogate, and turn into a single 4-byte utf8 char. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=raise_unicode_exception_encode, From pypy.commits at gmail.com Sat Dec 10 09:15:36 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 06:15:36 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <584c0e08.448e1c0a.43198.a3ce@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88993:28385e50deee Date: 2016-12-10 15:10 +0100 http://bitbucket.org/pypy/pypy/changeset/28385e50deee/ Log: hg merge default diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -71,8 +71,11 @@ Analyzing performance of applications is always tricky. We have various tools, for example a `jitviewer`_ that help us analyze performance. -The jitviewer shows the code generated by the PyPy JIT in a hierarchical way, -as shown by the screenshot below: +The old tool was partly rewritten and combined with vmprof. The service is +hosted at `vmprof.com`_. + +The following shows an old image of the jitviewer. +The code generated by the PyPy JIT in a hierarchical way: - at the bottom level, it shows the Python source code of the compiled loops @@ -84,13 +87,17 @@ .. image:: image/jitviewer.png -The jitviewer is a web application based on flask and jinja2 (and jQuery on -the client): if you have great web developing skills and want to help PyPy, +The jitviewer is a web application based on django and angularjs: +if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep -knowledge of the internals. +knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and +`vmprof-integration`_ to find open issues and documentation. -.. _jitviewer: http://bitbucket.org/pypy/jitviewer - +.. _jitviewer: http://vmprof.com +.. _vmprof.com: http://vmprof.com +.. _vmprof-python: https://github.com/vmprof/vmprof-python +.. _vmprof-server: https://github.com/vmprof/vmprof-server +.. _vmprof-integration: https://github.com/vmprof/vmprof-integration Optimized Unicode Representation -------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -47,3 +47,10 @@ .. branch: desc-specialize Refactor FunctionDesc.specialize() and related code (RPython annotator). + +.. branch: raw-calloc + +.. branch: issue2446 + +Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key +so it will be picked up by app-level objects of that type diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -0,0 +1,26 @@ +from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class FakeSpace: + pass + +def test_encode_utf8(): + space = FakeSpace() + assert encode_utf8(space, u"abc") == "abc" + assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" + assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + +def test_decode_utf8(): + space = FakeSpace() + assert decode_utf8(space, "abc") == u"abc" + assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" + assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -142,6 +142,8 @@ def encode_utf8(space, uni, allow_surrogates=False): # Note that Python3 tends to forbid lone surrogates + # Also, note that the two characters \d800\dc00 are considered as + # a paired surrogate, and turn into a single 4-byte utf8 char. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=encode_error_handler(space), diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -401,7 +401,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -145,7 +145,7 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) - def test_multiple_inheritance(self): + def test_multiple_inheritance1(self): module = self.import_module(name='foo') obj = module.UnicodeSubtype(u'xyz') obj2 = module.UnicodeSubtype2() @@ -397,7 +397,7 @@ assert space.int_w(space.getattr(w_class, w_name)) == 1 space.delitem(w_dict, w_name) - def test_multiple_inheritance(self, space, api): + def test_multiple_inheritance2(self, space, api): w_class = space.appexec([], """(): class A(object): pass @@ -1132,3 +1132,38 @@ print(repr(X)) X() + + def test_multiple_inheritance3(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + PyTypeObject *Base1, *Base2, *Base12; + Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base1->tp_name = "Base1"; + Base2->tp_name = "Base2"; + Base12->tp_name = "Base12"; + Base1->tp_basicsize = sizeof(PyHeapTypeObject); + Base2->tp_basicsize = sizeof(PyHeapTypeObject); + Base12->tp_basicsize = sizeof(PyHeapTypeObject); + Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_base = Base1; + Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); + Base12->tp_doc = "The Base12 type or object"; + if (PyType_Ready(Base1) < 0) return NULL; + if (PyType_Ready(Base2) < 0) return NULL; + if (PyType_Ready(Base12) < 0) return NULL; + obj = PyObject_New(PyObject, Base12); + return obj; + ''' + )]) + obj = module.new_obj() + assert 'Base12' in str(obj) + assert type(obj).__doc__ == "The Base12 type or object" + assert obj.__doc__ == "The Base12 type or object" + + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -326,6 +326,8 @@ w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func, wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = space.wrap(w_obj) + if pto.c_tp_doc: + dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) @@ -461,13 +463,17 @@ convert_member_defs(space, dict_w, pto.c_tp_members, self) name = rffi.charp2str(pto.c_tp_name) - new_layout = (pto.c_tp_basicsize > rffi.sizeof(PyObject.TO) or - pto.c_tp_itemsize > 0) + flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + if flag_heaptype: + minsize = rffi.sizeof(PyHeapTypeObject.TO) + else: + minsize = rffi.sizeof(PyObject.TO) + new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0) W_TypeObject.__init__(self, space, name, bases_w or [space.w_object], dict_w, force_new_layout=new_layout) self.flag_cpytype = True - self.flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + self.flag_heaptype = flag_heaptype # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: self.flag_map_or_seq = 'S' diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -494,3 +494,15 @@ def test_negative_array_size(self): ffi = FFI() py.test.raises(ValueError, ffi.cast, "int[-5]", 0) + + def test_cannot_instantiate_manually(self): + ffi = FFI() + ct = type(ffi.typeof("void *")) + py.test.raises(TypeError, ct) + py.test.raises(TypeError, ct, ffi.NULL) + for cd in [type(ffi.cast("void *", 0)), + type(ffi.new("char[]", 3)), + type(ffi.gc(ffi.NULL, lambda x: None))]: + py.test.raises(TypeError, cd) + py.test.raises(TypeError, cd, ffi.NULL) + py.test.raises(TypeError, cd, ffi.typeof("void *")) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -361,7 +361,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -593,6 +593,8 @@ log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): + # NB. the operation 'raw_malloc' is not supported; this is for + # the operation 'malloc'/'malloc_varsize' with {flavor: 'gc'} d = op.args[1].value.copy() d.pop('flavor') add_memory_pressure = d.pop('add_memory_pressure', False) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -142,10 +142,14 @@ assert len(lst) == len(args_v), ( "not supported so far: 'greens' variables contain Void") # a crash here means that you have to reorder the variable named in - # the JitDriver. Indeed, greens and reds must both be sorted: first - # all INTs, followed by all REFs, followed by all FLOATs. + # the JitDriver. lst2 = sort_vars(lst) - assert lst == lst2 + assert lst == lst2, ("You have to reorder the variables named in " + "the JitDriver (both the 'greens' and 'reds' independently). " + "They must be sorted like this: first all the integer-like, " + "then all the pointer-like, and finally the floats.\n" + "Got: %r\n" + "Expected: %r" % (lst, lst2)) return lst # return (_sort(greens_v, True), _sort(reds_v, False)) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -427,6 +427,13 @@ return result mh._ll_malloc_fixedsize = _ll_malloc_fixedsize + def _ll_malloc_fixedsize_zero(size): + result = mh.allocate(size, zero=True) + if not result: + raise MemoryError() + return result + mh._ll_malloc_fixedsize_zero = _ll_malloc_fixedsize_zero + def _ll_compute_size(length, size, itemsize): try: varsize = ovfcheck(itemsize * length) @@ -453,10 +460,9 @@ def _ll_malloc_varsize_no_length_zero(length, size, itemsize): tot_size = _ll_compute_size(length, size, itemsize) - result = mh.allocate(tot_size) + result = mh.allocate(tot_size, zero=True) if not result: raise MemoryError() - llmemory.raw_memclear(result, tot_size) return result mh.ll_malloc_varsize_no_length_zero = _ll_malloc_varsize_no_length_zero @@ -470,17 +476,16 @@ mh = mallocHelpers() mh.allocate = llmemory.raw_malloc ll_raw_malloc_fixedsize = mh._ll_malloc_fixedsize + ll_raw_malloc_fixedsize_zero = mh._ll_malloc_fixedsize_zero ll_raw_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_raw_malloc_varsize = mh.ll_malloc_varsize ll_raw_malloc_varsize_no_length_zero = mh.ll_malloc_varsize_no_length_zero - stack_mh = mallocHelpers() - stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address, size) - ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize - if self.translator: self.raw_malloc_fixedsize_ptr = self.inittime_helper( ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address) + self.raw_malloc_fixedsize_zero_ptr = self.inittime_helper( + ll_raw_malloc_fixedsize_zero, [lltype.Signed], llmemory.Address) self.raw_malloc_varsize_no_length_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.raw_malloc_varsize_ptr = self.inittime_helper( @@ -488,9 +493,6 @@ self.raw_malloc_varsize_no_length_zero_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length_zero, [lltype.Signed]*3, llmemory.Address, inline=False) - self.stack_malloc_fixedsize_ptr = self.inittime_helper( - ll_stack_malloc_fixedsize, [lltype.Signed], llmemory.Address) - def gct_malloc(self, hop, add_flags=None): TYPE = hop.spaceop.result.concretetype.TO assert not TYPE._is_varsize() @@ -503,21 +505,16 @@ hop.cast_result(v_raw) def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], + if flags.get('zero'): + ll_func = self.raw_malloc_fixedsize_zero_ptr + else: + ll_func = self.raw_malloc_fixedsize_ptr + v_raw = hop.genop("direct_call", [ll_func, c_size], resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) if flags.get('track_allocation', True): hop.genop("track_alloc_start", [v_raw]) return v_raw - def gct_fv_stack_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.stack_malloc_fixedsize_ptr, c_size], - resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) - return v_raw - def gct_malloc_varsize(self, hop, add_flags=None): flags = hop.spaceop.args[1].value if add_flags: diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1778,22 +1778,23 @@ finally: lltype.free(l_utsbuf, flavor='raw') -# These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) -c_major = external('major', [rffi.INT], rffi.INT, macro=True) -c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) +if sys.platform != 'win32': + # These are actually macros on some/most systems + c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) + c_major = external('major', [rffi.INT], rffi.INT, macro=True) + c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) - at replace_os_function('makedev') -def makedev(maj, min): - return c_makedev(maj, min) + @replace_os_function('makedev') + def makedev(maj, min): + return c_makedev(maj, min) - at replace_os_function('major') -def major(dev): - return c_major(dev) + @replace_os_function('major') + def major(dev): + return c_major(dev) - at replace_os_function('minor') -def minor(dev): - return c_minor(dev) + @replace_os_function('minor') + def minor(dev): + return c_minor(dev) #___________________________________________________________________ diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -398,7 +398,7 @@ baseofs = offsetof(_c.sockaddr_un, 'c_sun_path') self.setdata(sun, baseofs + len(path)) rffi.setintfield(sun, 'c_sun_family', AF_UNIX) - if _c.linux and path.startswith('\x00'): + if _c.linux and path[0] == '\x00': # Linux abstract namespace extension if len(path) > sizeof(_c.sockaddr_un.c_sun_path): raise RSocketError("AF_UNIX path too long") diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,7 @@ def test_isatty(self): assert rposix.isatty(-1) is False + @py.test.mark.skipif("not hasattr(rposix, 'makedev')") def test_makedev(self): dev = rposix.makedev(24, 7) assert rposix.major(dev) == 24 diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -997,11 +997,14 @@ # __________________________________________________________ # operations on addresses - def op_raw_malloc(self, size): + def op_raw_malloc(self, size, zero): + assert lltype.typeOf(size) == lltype.Signed + return llmemory.raw_malloc(size, zero=zero) + + def op_boehm_malloc(self, size): assert lltype.typeOf(size) == lltype.Signed return llmemory.raw_malloc(size) - - op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc + op_boehm_malloc_atomic = op_boehm_malloc def op_boehm_register_finalizer(self, p, finalizer): pass @@ -1069,9 +1072,6 @@ assert offset.TYPE == ARGTYPE getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value - def op_stack_malloc(self, size): # mmh - raise NotImplementedError("backend only") - def op_track_alloc_start(self, addr): # we don't do tracking at this level checkadr(addr) diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -506,13 +506,17 @@ llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, sandboxsafe=True, _nowrapper=True) +llimpl_calloc = rffi.llexternal('calloc', [lltype.Signed, lltype.Signed], + llmemory.Address, + sandboxsafe=True, _nowrapper=True) llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, sandboxsafe=True, _nowrapper=True) def llimpl_arena_malloc(nbytes, zero): - addr = llimpl_malloc(nbytes) - if bool(addr): - llimpl_arena_reset(addr, nbytes, zero) + if zero: + addr = llimpl_calloc(nbytes, 1) + else: + addr = llimpl_malloc(nbytes) return addr llimpl_arena_malloc._always_inline_ = True register_external(arena_malloc, [int, int], llmemory.Address, diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -7,6 +7,7 @@ import weakref from rpython.annotator.bookkeeper import analyzer_for from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool +from rpython.annotator.model import SomeBool from rpython.rlib.objectmodel import Symbolic, specialize from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lltype import SomePtr @@ -936,14 +937,15 @@ # ____________________________________________________________ -def raw_malloc(size): +def raw_malloc(size, zero=False): if not isinstance(size, AddressOffset): raise NotImplementedError(size) - return size._raw_malloc([], zero=False) + return size._raw_malloc([], zero=zero) @analyzer_for(raw_malloc) -def ann_raw_malloc(s_size): +def ann_raw_malloc(s_size, s_zero=None): assert isinstance(s_size, SomeInteger) # XXX add noneg...? + assert s_zero is None or isinstance(s_zero, SomeBool) return SomeAddress() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,7 +396,6 @@ 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), - 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), 'adr_add': LLOp(canfold=True), diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -574,10 +574,14 @@ # memory addresses @typer_for(llmemory.raw_malloc) -def rtype_raw_malloc(hop): - v_size, = hop.inputargs(lltype.Signed) +def rtype_raw_malloc(hop, i_zero=None): + v_size = hop.inputarg(lltype.Signed, arg=0) + v_zero, = parse_kwds(hop, (i_zero, None)) + if v_zero is None: + v_zero = hop.inputconst(lltype.Bool, False) hop.exception_cannot_occur() - return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address) + return hop.genop('raw_malloc', [v_size, v_zero], + resulttype=llmemory.Address) @typer_for(llmemory.raw_malloc_usage) def rtype_raw_malloc_usage(hop): diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -372,19 +372,6 @@ result = interpret(getids, [i, j]) assert result -def test_stack_malloc(): - py.test.skip("stack-flavored mallocs no longer supported") - class A(object): - pass - def f(): - a = A() - a.i = 1 - return a.i - interp, graph = get_interpreter(f, []) - graph.startblock.operations[0].args[1] = inputconst(Void, {'flavor': "stack"}) - result = interp.eval_graph(graph, []) - assert result == 1 - def test_invalid_stack_access(): py.test.skip("stack-flavored mallocs no longer supported") class A(object): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -608,16 +608,6 @@ return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \ % (self.expr(op.args[0]), self.expr(op.args[1])) - def OP_RAW_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult) - - def OP_STACK_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_STACK_MALLOC(%s, %s, void *);" % (esize, eresult) - def OP_DIRECT_FIELDPTR(self, op): return self.OP_GETFIELD(op, ampersand='&') diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -8,11 +8,14 @@ #define OP_STACK_CURRENT(r) r = (Signed)&r -#define OP_RAW_MALLOC(size, r, restype) { \ - r = (restype) malloc(size); \ - if (r != NULL) { \ - COUNT_MALLOC; \ - } \ +#define OP_RAW_MALLOC(size, zero, result) { \ + if (zero) \ + result = calloc(size, 1); \ + else \ + result = malloc(size); \ + if (result != NULL) { \ + COUNT_MALLOC; \ + } \ } #define OP_RAW_FREE(p, r) free(p); COUNT_FREE; @@ -26,10 +29,6 @@ #define alloca _alloca #endif -#define OP_STACK_MALLOC(size,r,restype) \ - r = (restype) alloca(size); \ - if (r != NULL) memset((void*) r, 0, size); - #define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size); #define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size); diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -32,7 +32,29 @@ assert res == 42 res = fc(1) assert res == 1 - + +def test_memory_access_zero(): + def f(): + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=False) + addr.signed[1] = 10000 + i + blocks.append(addr) + for addr in blocks: + raw_free(addr) + result = 0 + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=True) + result |= addr.signed[1] + blocks.append(addr) + for addr in blocks: + raw_free(addr) + return result + fc = compile(f, []) + res = fc() + assert res == 0 + def test_memory_float(): S = lltype.GcStruct("S", ("x", lltype.Float), ("y", lltype.Float)) offset = FieldOffset(S, 'x') @@ -155,18 +177,6 @@ fn = compile(f, [int]) assert fn(1) == 2 -def test_flavored_malloc_stack(): - class A(object): - _alloc_flavor_ = "stack" - def __init__(self, val): - self.val = val - def f(x): - a = A(x + 1) - result = a.val - return result - fn = compile(f, [int]) - assert fn(1) == 2 - def test_gcref(): if sys.platform == 'darwin': py.test.skip("'boehm' may crash") From pypy.commits at gmail.com Sat Dec 10 09:15:38 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 06:15:38 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Adapt the test to py3.5 Message-ID: <584c0e0a.0b561c0a.742e1.9bfa@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88994:e985a2343147 Date: 2016-12-10 15:14 +0100 http://bitbucket.org/pypy/pypy/changeset/e985a2343147/ Log: Adapt the test to py3.5 diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py --- a/pypy/interpreter/test/test_unicodehelper.py +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -1,26 +1,34 @@ +import py from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class Hit(Exception): + pass + class FakeSpace: - pass + def __getattr__(self, name): + if name in ('w_UnicodeEncodeError', 'w_UnicodeDecodeError'): + raise Hit + raise AttributeError(name) + def test_encode_utf8(): space = FakeSpace() assert encode_utf8(space, u"abc") == "abc" assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" - assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" - assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + py.test.raises(Hit, encode_utf8, space, u"\ud800") + py.test.raises(Hit, encode_utf8, space, u"\udc00") # for the following test, go to lengths to avoid CPython's optimizer # and .pyc file storage, which collapse the two surrogates into one c = u"\udc00" - assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + py.test.raises(Hit, encode_utf8, space, u"\ud800" + c) def test_decode_utf8(): space = FakeSpace() assert decode_utf8(space, "abc") == u"abc" assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" - assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" - assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" - got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") - assert map(ord, got) == [0xd800, 0xdc00] + py.test.raises(Hit, decode_utf8, space, "\xed\xa0\x80") + py.test.raises(Hit, decode_utf8, space, "\xed\xb0\x80") + py.test.raises(Hit, decode_utf8, space, "\xed\xa0\x80\xed\xb0\x80") got = decode_utf8(space, "\xf0\x90\x80\x80") assert map(ord, got) == [0x10000] From pypy.commits at gmail.com Sat Dec 10 09:40:06 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 06:40:06 -0800 (PST) Subject: [pypy-commit] pypy default: expand the comments Message-ID: <584c13c6.c89cc20a.df59e.1f43@mx.google.com> Author: Armin Rigo Branch: Changeset: r88995:e8b1d9913039 Date: 2016-12-10 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/e8b1d9913039/ Log: expand the comments diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -51,6 +51,10 @@ return result def decode_utf8(space, string): + # Surrogates are accepted and not treated specially at all. + # If there happen to be two 3-bytes encoding a pair of surrogates, + # you still get two surrogate unicode characters in the result. + # These are the Python2 rules; Python3 differs. result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), @@ -59,10 +63,9 @@ def encode_utf8(space, uni): # Note that this function never raises UnicodeEncodeError, - # since surrogate pairs are allowed. - # This is not the case with Python3. - # Also, note that the two characters \d800\dc00 are considered as - # a paired surrogate, and turn into a single 4-byte utf8 char. + # since surrogates are allowed, either paired or lone. + # A paired surrogate is considered like the non-BMP character + # it stands for. These are the Python2 rules; Python3 differs. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=raise_unicode_exception_encode, From pypy.commits at gmail.com Sat Dec 10 09:40:08 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 06:40:08 -0800 (PST) Subject: [pypy-commit] pypy py3.5: more tests, fix the comments Message-ID: <584c13c8.8f95c20a.505c6.29d4@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88996:40d2fd7302c4 Date: 2016-12-10 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/40d2fd7302c4/ Log: more tests, fix the comments diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py --- a/pypy/interpreter/test/test_unicodehelper.py +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -23,6 +23,14 @@ c = u"\udc00" py.test.raises(Hit, encode_utf8, space, u"\ud800" + c) +def test_encode_utf8_allow_surrogates(): + sp = FakeSpace() + assert encode_utf8(sp, u"\ud800", allow_surrogates=True) == "\xed\xa0\x80" + assert encode_utf8(sp, u"\udc00", allow_surrogates=True) == "\xed\xb0\x80" + c = u"\udc00" + got = encode_utf8(sp, u"\ud800" + c, allow_surrogates=True) + assert got == "\xf0\x90\x80\x80" + def test_decode_utf8(): space = FakeSpace() assert decode_utf8(space, "abc") == u"abc" @@ -32,3 +40,12 @@ py.test.raises(Hit, decode_utf8, space, "\xed\xa0\x80\xed\xb0\x80") got = decode_utf8(space, "\xf0\x90\x80\x80") assert map(ord, got) == [0x10000] + +def test_decode_utf8_allow_surrogates(): + sp = FakeSpace() + assert decode_utf8(sp, "\xed\xa0\x80", allow_surrogates=True) == u"\ud800" + assert decode_utf8(sp, "\xed\xb0\x80", allow_surrogates=True) == u"\udc00" + got = decode_utf8(sp, "\xed\xa0\x80\xed\xb0\x80", allow_surrogates=True) + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(sp, "\xf0\x90\x80\x80", allow_surrogates=True) + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -134,6 +134,11 @@ return result def decode_utf8(space, string, allow_surrogates=False): + # Note that Python3 tends to forbid *all* surrogates in utf-8. + # If allow_surrogates=True, then revert to the Python 2 behavior, + # i.e. surrogates are accepted and not treated specially at all. + # If there happen to be two 3-bytes encoding a pair of surrogates, + # you still get two surrogate unicode characters in the result. result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), @@ -141,9 +146,11 @@ return result def encode_utf8(space, uni, allow_surrogates=False): - # Note that Python3 tends to forbid lone surrogates - # Also, note that the two characters \d800\dc00 are considered as - # a paired surrogate, and turn into a single 4-byte utf8 char. + # Note that Python3 tends to forbid *all* surrogates in utf-8. + # If allow_surrogates=True, then revert to the Python 2 behavior + # which never raises UnicodeEncodeError. Surrogate pairs are then + # allowed, either paired or lone. A paired surrogate is considered + # like the non-BMP character it stands for. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=encode_error_handler(space), From pypy.commits at gmail.com Sat Dec 10 10:02:06 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 07:02:06 -0800 (PST) Subject: [pypy-commit] pypy default: Make a RPython function to encode in utf8 while preserving information Message-ID: <584c18ee.e626c20a.a1dc0.2cab@mx.google.com> Author: Armin Rigo Branch: Changeset: r88997:f88f3652d936 Date: 2016-12-10 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/f88f3652d936/ Log: Make a RPython function to encode in utf8 while preserving information diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -327,6 +327,16 @@ def unicode_encode_utf_8(s, size, errors, errorhandler=None, allow_surrogates=allow_surrogate_by_default): + # In this function, allow_surrogates can be: + # + # * True: surrogates are always allowed. A valid surrogate pair + # is replaced with the non-BMP unicode char it stands for, + # which is then encoded as 4 bytes. + # + # * False: surrogates are always forbidden. + # + # See also unicode_encode_utf8sp(). + # if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, @@ -391,6 +401,33 @@ _encodeUCS4(result, ch) return result.build() +def unicode_encode_utf8sp(s, size): + # Surrogate-preserving utf-8 encoding. Any surrogate character + # turns into its 3-bytes encoding, whether it is paired or not. + # This should always be reversible, and the reverse is the regular + # str_decode_utf_8() with allow_surrogates=True. + assert(size >= 0) + result = StringBuilder(size) + pos = 0 + while pos < size: + ch = ord(s[pos]) + pos += 1 + if ch < 0x80: + # Encode ASCII + result.append(chr(ch)) + elif ch < 0x0800: + # Encode Latin-1 + result.append(chr((0xc0 | (ch >> 6)))) + result.append(chr((0x80 | (ch & 0x3f)))) + elif ch < 0x10000: + # Encode UCS2 Unicode ordinals, and surrogates + result.append((chr((0xe0 | (ch >> 12))))) + result.append((chr((0x80 | ((ch >> 6) & 0x3f))))) + result.append((chr((0x80 | (ch & 0x3f))))) + else: + _encodeUCS4(result, ch) + return result.build() + # ____________________________________________________________ # utf-16 diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -812,6 +812,21 @@ py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) assert encoder(u'u\u1234', 2, 'replace') == 'u?' + def test_encode_utf8sp(self): + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + for input, expected in [ + (u"", ""), + (u"abc", "abc"), + (u"\u1234", "\xe1\x88\xb4"), + (u"\ud800", "\xed\xa0\x80"), + (u"\udc00", "\xed\xb0\x80"), + (u"\ud800" + c, "\xed\xa0\x80\xed\xb0\x80"), + ]: + got = runicode.unicode_encode_utf8sp(input, len(input)) + assert got == expected + class TestTranslation(object): def setup_class(cls): From pypy.commits at gmail.com Sat Dec 10 10:02:08 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 07:02:08 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <584c18f0.46bb1c0a.96c8b.a29e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88998:894e8d2f5df8 Date: 2016-12-10 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/894e8d2f5df8/ Log: hg merge default diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -327,6 +327,16 @@ def unicode_encode_utf_8(s, size, errors, errorhandler=None, allow_surrogates=allow_surrogate_by_default): + # In this function, allow_surrogates can be: + # + # * True: surrogates are always allowed. A valid surrogate pair + # is replaced with the non-BMP unicode char it stands for, + # which is then encoded as 4 bytes. + # + # * False: surrogates are always forbidden. + # + # See also unicode_encode_utf8sp(). + # if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, @@ -391,6 +401,33 @@ _encodeUCS4(result, ch) return result.build() +def unicode_encode_utf8sp(s, size): + # Surrogate-preserving utf-8 encoding. Any surrogate character + # turns into its 3-bytes encoding, whether it is paired or not. + # This should always be reversible, and the reverse is the regular + # str_decode_utf_8() with allow_surrogates=True. + assert(size >= 0) + result = StringBuilder(size) + pos = 0 + while pos < size: + ch = ord(s[pos]) + pos += 1 + if ch < 0x80: + # Encode ASCII + result.append(chr(ch)) + elif ch < 0x0800: + # Encode Latin-1 + result.append(chr((0xc0 | (ch >> 6)))) + result.append(chr((0x80 | (ch & 0x3f)))) + elif ch < 0x10000: + # Encode UCS2 Unicode ordinals, and surrogates + result.append((chr((0xe0 | (ch >> 12))))) + result.append((chr((0x80 | ((ch >> 6) & 0x3f))))) + result.append((chr((0x80 | (ch & 0x3f))))) + else: + _encodeUCS4(result, ch) + return result.build() + # ____________________________________________________________ # utf-16 diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -812,6 +812,21 @@ py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) assert encoder(u'u\u1234', 2, 'replace') == 'u?' + def test_encode_utf8sp(self): + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + for input, expected in [ + (u"", ""), + (u"abc", "abc"), + (u"\u1234", "\xe1\x88\xb4"), + (u"\ud800", "\xed\xa0\x80"), + (u"\udc00", "\xed\xb0\x80"), + (u"\ud800" + c, "\xed\xa0\x80\xed\xb0\x80"), + ]: + got = runicode.unicode_encode_utf8sp(input, len(input)) + assert got == expected + class TestTranslation(object): def setup_class(cls): From pypy.commits at gmail.com Sat Dec 10 10:02:10 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 07:02:10 -0800 (PST) Subject: [pypy-commit] pypy py3.5: encode_utf8sp, decode_utf8sp Message-ID: <584c18f2.8675c20a.108e7.230a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r88999:22b1b835c734 Date: 2016-12-10 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/22b1b835c734/ Log: encode_utf8sp, decode_utf8sp diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py --- a/pypy/interpreter/test/test_unicodehelper.py +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -1,5 +1,6 @@ import py from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 +from pypy.interpreter.unicodehelper import encode_utf8sp, decode_utf8sp class Hit(Exception): @@ -31,6 +32,14 @@ got = encode_utf8(sp, u"\ud800" + c, allow_surrogates=True) assert got == "\xf0\x90\x80\x80" +def test_encode_utf8sp(): + sp = FakeSpace() + assert encode_utf8sp(sp, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8sp(sp, u"\udc00") == "\xed\xb0\x80" + c = u"\udc00" + got = encode_utf8sp(sp, u"\ud800" + c) + assert got == "\xed\xa0\x80\xed\xb0\x80" + def test_decode_utf8(): space = FakeSpace() assert decode_utf8(space, "abc") == u"abc" @@ -49,3 +58,12 @@ assert map(ord, got) == [0xd800, 0xdc00] got = decode_utf8(sp, "\xf0\x90\x80\x80", allow_surrogates=True) assert map(ord, got) == [0x10000] + +def test_decode_utf8sp(): + space = FakeSpace() + assert decode_utf8sp(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8sp(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8sp(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8sp(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -150,8 +150,21 @@ # If allow_surrogates=True, then revert to the Python 2 behavior # which never raises UnicodeEncodeError. Surrogate pairs are then # allowed, either paired or lone. A paired surrogate is considered - # like the non-BMP character it stands for. + # like the non-BMP character it stands for. See also unicode_utf8sp(). return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=encode_error_handler(space), allow_surrogates=allow_surrogates) + +def encode_utf8sp(space, uni): + # Surrogate-preserving utf-8 encoding. Any surrogate character + # turns into its 3-bytes encoding, whether it is paired or not. + # This should always be reversible, and the reverse is + # decode_utf8sp(). + return runicode.unicode_encode_utf8sp(uni, len(uni)) + +def decode_utf8sp(space, string): + # Surrogate-preserving utf-8 decoding. Assuming there is no + # encoding error, it should always be reversible, and the reverse is + # encode_utf8sp(). + return decode_utf8(space, string, allow_surrogates=True) From pypy.commits at gmail.com Sat Dec 10 10:06:14 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 07:06:14 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Issue #2441: fix marshalling of unicode strings containing surrogates Message-ID: <584c19e6.4dd41c0a.c435f.aa1a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89000:d251251619f4 Date: 2016-12-10 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/d251251619f4/ Log: Issue #2441: fix marshalling of unicode strings containing surrogates diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -172,6 +172,8 @@ import marshal, sys self.marshal_check('\uFFFF') self.marshal_check('\ud800') + c = u"\ud800" + self.marshal_check(c + u'\udc00') self.marshal_check(chr(sys.maxunicode)) diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -441,11 +441,9 @@ if typecode != FLAG_DONE: m.atom_str(typecode, s) -def _encode_utf8(space, u): - return unicodehelper.encode_utf8(space, u, allow_surrogates=True) - -def _decode_utf8(space, s): - return unicodehelper.decode_utf8(space, s, allow_surrogates=True) +# surrogate-preserving variants +_encode_utf8 = unicodehelper.encode_utf8sp +_decode_utf8 = unicodehelper.decode_utf8sp @marshaller(W_UnicodeObject) def marshal_unicode(space, w_unicode, m): From pypy.commits at gmail.com Sat Dec 10 11:50:57 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 10 Dec 2016 08:50:57 -0800 (PST) Subject: [pypy-commit] pypy default: s390x support for cond_call_value_i/r Message-ID: <584c3271.45f6c20a.fab5.4013@mx.google.com> Author: Richard Plangger Branch: Changeset: r89001:cdd244256f29 Date: 2016-12-10 17:50 +0100 http://bitbucket.org/pypy/pypy/changeset/cdd244256f29/ Log: s390x support for cond_call_value_i/r diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1066,7 +1066,6 @@ prepare_cond_call_value_r = prepare_cond_call_value_i - def notimplemented(self, op): msg = '[PPC/regalloc] %s not implemented\n' % op.getopname() if we_are_translated(): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -374,10 +374,11 @@ _COND_CALL_SAVE_REGS = [r.r11, r.r2, r.r3, r.r4, r.r5] def emit_cond_call(self, op, arglocs, regalloc): + resloc = arglocs[0] + arglocs = arglocs[1:] fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond.value != c.cond_none.value - fcond = c.negate(fcond) jmp_adr = self.mc.get_relative_pos() self.mc.reserve_cond_jump() # patched later to a relative branch @@ -411,6 +412,8 @@ self.mc.BASR(r.r14, r.r14) # restoring the registers saved above, and doing pop_gcmap(), is left # to the cond_call_slowpath helper. We never have any result value. + if resloc is not None: + self.mc.LGR(resloc, r.RES) relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) pmc.BRCL(fcond, l.imm(relative_target)) @@ -419,6 +422,9 @@ # guard_no_exception too self.previous_cond_call_jcond = jmp_adr, fcond + emit_cond_call_value_i = emit_cond_call + emit_cond_call_value_r = emit_cond_call + class AllocOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1107,7 +1107,7 @@ def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) - locs = [] + locs = [None] # support between 0 and 4 integer arguments assert 2 <= op.numargs() <= 2 + 4 for i in range(1, op.numargs()): @@ -1116,6 +1116,22 @@ locs.append(loc) return locs + def prepare_cond_call_value_i(self, op): + x = self.ensure_reg(op.getarg(0)) + self.load_condition_into_cc(op.getarg(0)) + self.rm.force_allocate_reg(op, selected_reg=x) # spilled if survives + # ^^^ if arg0!=0, we jump over the next block of code (the call) + locs = [x] + # support between 0 and 4 integer arguments + assert 2 <= op.numargs() <= 2 + 4 + for i in range(1, op.numargs()): + loc = self.loc(op.getarg(i)) + assert loc.type != FLOAT + locs.append(loc) + return locs # [res, function, args...] + + prepare_cond_call_value_r = prepare_cond_call_value_i + def prepare_cond_call_gc_wb(self, op): arglocs = [self.ensure_reg(op.getarg(0))] return arglocs From pypy.commits at gmail.com Sat Dec 10 12:16:51 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 10 Dec 2016 09:16:51 -0800 (PST) Subject: [pypy-commit] pypy default: Simplify slot definition code by adding llslot() helper Message-ID: <584c3883.542e1c0a.59c97.e934@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89002:ced4d857b5dc Date: 2016-12-10 17:03 +0000 http://bitbucket.org/pypy/pypy/changeset/ced4d857b5dc/ Log: Simplify slot definition code by adding llslot() helper diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -27,9 +27,7 @@ def get_dealloc(self, space): from pypy.module.cpyext.typeobject import subtype_dealloc - return llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + return subtype_dealloc.api_func.get_llhelper(space) def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? @@ -110,9 +108,7 @@ if tp_dealloc: def get_dealloc(self, space): - return llhelper( - tp_dealloc.api_func.functype, - tp_dealloc.api_func.get_wrapper(space)) + return tp_dealloc.api_func.get_llhelper(space) if tp_attach: def attach(self, space, pyobj, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -59,6 +59,9 @@ "expected %d-%d arguments, got %d", low, high, space.len_w(w_ob)) +def llslot(space, func): + return llhelper(func.api_func.functype, func.api_func.get_wrapper(space)) + def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs) @@ -106,7 +109,7 @@ args_w = space.fixedview(w_args) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, w_self, args_w[0], arg3) def wrap_ternaryfunc_r(space, w_self, w_args, func): @@ -121,7 +124,7 @@ Py_DecRef(space, ref) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, args_w[0], w_self, arg3) @@ -322,7 +325,7 @@ self.strides = [1] else: self.strides = strides - self.ndim = ndim + self.ndim = ndim self.itemsize = itemsize self.readonly = readonly @@ -472,7 +475,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self): return space.call_function(slot_fn, w_self) - api_func = slot_func.api_func handled = True # binary functions @@ -499,7 +501,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg): return space.call_function(slot_fn, w_self, w_arg) - api_func = slot_func.api_func handled = True # binary-with-Py_ssize_t-type @@ -517,7 +518,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, arg): return space.call_function(slot_fn, w_self, space.wrap(arg)) - api_func = slot_func.api_func handled = True # ternary functions @@ -532,7 +532,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg1, w_arg2): return space.call_function(slot_fn, w_self, w_arg1, w_arg2) - api_func = slot_func.api_func handled = True if handled: @@ -552,7 +551,7 @@ else: space.call_function(delattr_fn, w_self, w_name) return 0 - api_func = slot_tp_setattro.api_func + slot_func = slot_tp_setattro elif name == 'tp_getattro': getattr_fn = w_type.getdictvalue(space, '__getattribute__') if getattr_fn is None: @@ -562,7 +561,7 @@ @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) - api_func = slot_tp_getattro.api_func + slot_func = slot_tp_getattro elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -574,7 +573,7 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(call_fn, args) - api_func = slot_tp_call.api_func + slot_func = slot_tp_call elif name == 'tp_iternext': iternext_fn = w_type.getdictvalue(space, 'next') @@ -590,7 +589,7 @@ if not e.match(space, space.w_StopIteration): raise return None - api_func = slot_tp_iternext.api_func + slot_func = slot_tp_iternext elif name == 'tp_init': init_fn = w_type.getdictvalue(space, '__init__') @@ -605,7 +604,7 @@ w_stararg=w_args, w_starstararg=w_kwds) space.call_args(init_fn, args) return 0 - api_func = slot_tp_init.api_func + slot_func = slot_tp_init elif name == 'tp_new': new_fn = w_type.getdictvalue(space, '__new__') if new_fn is None: @@ -617,12 +616,12 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) - api_func = slot_tp_new.api_func + slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': buff_fn = w_type.getdictvalue(space, '__buffer__') if buff_fn is None: return - @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, header=None, error=-1) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def buff_w(space, w_self, view, flags): @@ -646,14 +645,14 @@ return 0 # XXX remove this when it no longer crashes a translated PyPy return - api_func = buff_w.api_func + slot_func = buff_w else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length # richcmpfunc(s) return - return lambda: llhelper(api_func.functype, api_func.get_wrapper(space)) + return lambda: llslot(space, slot_func) PyWrapperFlag_KEYWORDS = 1 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,7 +3,6 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rstring import rsplit -from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root, DescrMismatch @@ -28,7 +27,8 @@ PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, Py_DecRef, as_pyobj) from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function, + llslot) from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( @@ -260,8 +260,7 @@ if get_slot: slot_func_helper = get_slot() elif slot_func: - slot_func_helper = llhelper(slot_func.api_func.functype, - slot_func.api_func.get_wrapper(space)) + slot_func_helper = llslot(space, slot_func) if slot_func_helper is None: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: @@ -373,9 +372,8 @@ def setup_new_method_def(space): ptr = get_new_method_def(space) - ptr.c_ml_meth = rffi.cast(PyCFunction_typedef, - llhelper(tp_new_wrapper.api_func.functype, - tp_new_wrapper.api_func.get_wrapper(space))) + ptr.c_ml_meth = rffi.cast( + PyCFunction_typedef, llslot(space, tp_new_wrapper)) def add_tp_new_wrapper(space, dict_w, pto): if "__new__" in dict_w: @@ -498,8 +496,7 @@ def subtype_dealloc(space, obj): pto = obj.c_ob_type base = pto - this_func_ptr = llhelper(subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + this_func_ptr = llslot(space, subtype_dealloc) while base.c_tp_dealloc == this_func_ptr: base = base.c_tp_base assert base @@ -601,46 +598,31 @@ return c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) lltype.render_immortal(c_buf) - c_buf.c_bf_getsegcount = llhelper(bf_segcount.api_func.functype, - bf_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getsegcount = llslot(space, bf_segcount) if space.is_w(w_type, space.w_str): # Special case: str doesn't support get_raw_address(), so we have a # custom get*buffer that instead gives the address of the char* in the # PyBytesObject*! - c_buf.c_bf_getreadbuffer = llhelper( - str_getreadbuffer.api_func.functype, - str_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper( - str_getcharbuffer.api_func.functype, - str_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, str_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, str_getcharbuffer) elif space.is_w(w_type, space.w_unicode): # Special case: unicode doesn't support get_raw_address(), so we have a # custom get*buffer that instead gives the address of the char* in the # PyUnicodeObject*! - c_buf.c_bf_getreadbuffer = llhelper( - unicode_getreadbuffer.api_func.functype, - unicode_getreadbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, unicode_getreadbuffer) elif space.is_w(w_type, space.w_buffer): # Special case: we store a permanent address on the cpyext wrapper, # so we'll reuse that. # Note: we could instead store a permanent address on the buffer object, # and use get_raw_address() - c_buf.c_bf_getreadbuffer = llhelper( - buf_getreadbuffer.api_func.functype, - buf_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper( - buf_getcharbuffer.api_func.functype, - buf_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, buf_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, buf_getcharbuffer) else: # use get_raw_address() - c_buf.c_bf_getreadbuffer = llhelper(bf_getreadbuffer.api_func.functype, - bf_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper(bf_getcharbuffer.api_func.functype, - bf_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, bf_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, bf_getcharbuffer) if bufspec == 'read-write': - c_buf.c_bf_getwritebuffer = llhelper( - bf_getwritebuffer.api_func.functype, - bf_getwritebuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getwritebuffer = llslot(space, bf_getwritebuffer) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER @@ -704,9 +686,7 @@ pto.c_tp_dealloc = typedescr.get_dealloc(space) else: # for all subtypes, use subtype_dealloc() - pto.c_tp_dealloc = llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + pto.c_tp_dealloc = llslot(space, subtype_dealloc) if space.is_w(w_type, space.w_str): pto.c_tp_itemsize = 1 elif space.is_w(w_type, space.w_tuple): @@ -714,10 +694,8 @@ # buffer protocol setup_buffer_procs(space, w_type, pto) - pto.c_tp_free = llhelper(PyObject_Free.api_func.functype, - PyObject_Free.api_func.get_wrapper(space)) - pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, - PyType_GenericAlloc.api_func.get_wrapper(space)) + pto.c_tp_free = llslot(space, PyObject_Free) + pto.c_tp_alloc = llslot(space, PyType_GenericAlloc) builder = space.fromcache(StaticObjectBuilder) if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0 and builder.cpyext_type_init is None): @@ -908,15 +886,11 @@ if not pto.c_tp_setattro: from pypy.module.cpyext.object import PyObject_GenericSetAttr - pto.c_tp_setattro = llhelper( - PyObject_GenericSetAttr.api_func.functype, - PyObject_GenericSetAttr.api_func.get_wrapper(space)) + pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr) if not pto.c_tp_getattro: from pypy.module.cpyext.object import PyObject_GenericGetAttr - pto.c_tp_getattro = llhelper( - PyObject_GenericGetAttr.api_func.functype, - PyObject_GenericGetAttr.api_func.get_wrapper(space)) + pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr) if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) From pypy.commits at gmail.com Sat Dec 10 12:36:40 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 10 Dec 2016 09:36:40 -0800 (PST) Subject: [pypy-commit] pypy default: More simplification using llslot() Message-ID: <584c3d28.d5091c0a.fdf1e.f212@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89003:25da7bc97194 Date: 2016-12-10 17:36 +0000 http://bitbucket.org/pypy/pypy/changeset/25da7bc97194/ Log: More simplification using llslot() diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -25,9 +25,9 @@ basestruct = PyObject.TO W_BaseObject = W_ObjectObject - def get_dealloc(self, space): + def get_dealloc(self): from pypy.module.cpyext.typeobject import subtype_dealloc - return subtype_dealloc.api_func.get_llhelper(space) + return subtype_dealloc def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? @@ -107,8 +107,8 @@ return tp_alloc(space, w_type, itemcount) if tp_dealloc: - def get_dealloc(self, space): - return tp_dealloc.api_func.get_llhelper(space) + def get_dealloc(self): + return tp_dealloc if tp_attach: def attach(self, space, pyobj, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -652,7 +652,7 @@ # richcmpfunc(s) return - return lambda: llslot(space, slot_func) + return slot_func PyWrapperFlag_KEYWORDS = 1 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -253,20 +253,14 @@ # XXX special case iternext continue - slot_func_helper = None - if slot_func is None and typedef is not None: - get_slot = get_slot_tp_function(space, typedef, slot_name) - if get_slot: - slot_func_helper = get_slot() - elif slot_func: - slot_func_helper = llslot(space, slot_func) - - if slot_func_helper is None: + slot_func = get_slot_tp_function(space, typedef, slot_name) + if not slot_func: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: os.write(2, "%s defined by %s but no slot function defined!\n" % ( method_name, w_type.getname(space))) continue + slot_func_helper = llslot(space, slot_func) # XXX special case wrapper-functions and use a "specific" slot func @@ -683,7 +677,7 @@ # dealloc if space.gettypeobject(w_type.layout.typedef) is w_type: # only for the exact type, like 'space.w_tuple' or 'space.w_list' - pto.c_tp_dealloc = typedescr.get_dealloc(space) + pto.c_tp_dealloc = llslot(space, typedescr.get_dealloc()) else: # for all subtypes, use subtype_dealloc() pto.c_tp_dealloc = llslot(space, subtype_dealloc) From pypy.commits at gmail.com Sat Dec 10 13:01:03 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 10 Dec 2016 10:01:03 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <584c42df.0bba1c0a.347da.ef7e@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89004:0b53fb03a003 Date: 2016-12-10 17:56 +0000 http://bitbucket.org/pypy/pypy/changeset/0b53fb03a003/ Log: hg merge default diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -25,11 +25,9 @@ basestruct = PyObject.TO W_BaseObject = W_ObjectObject - def get_dealloc(self, space): + def get_dealloc(self): from pypy.module.cpyext.typeobject import subtype_dealloc - return llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + return subtype_dealloc def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? @@ -109,10 +107,8 @@ return tp_alloc(space, w_type, itemcount) if tp_dealloc: - def get_dealloc(self, space): - return llhelper( - tp_dealloc.api_func.functype, - tp_dealloc.api_func.get_wrapper(space)) + def get_dealloc(self): + return tp_dealloc if tp_attach: def attach(self, space, pyobj, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -59,6 +59,9 @@ "expected %d-%d arguments, got %d", low, high, space.len_w(w_ob)) +def llslot(space, func): + return llhelper(func.api_func.functype, func.api_func.get_wrapper(space)) + def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs) @@ -446,7 +449,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self): return space.call_function(slot_fn, w_self) - api_func = slot_func.api_func handled = True # binary functions @@ -473,7 +475,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg): return space.call_function(slot_fn, w_self, w_arg) - api_func = slot_func.api_func handled = True # binary-with-Py_ssize_t-type @@ -491,7 +492,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, arg): return space.call_function(slot_fn, w_self, space.wrap(arg)) - api_func = slot_func.api_func handled = True # ternary functions @@ -506,7 +506,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg1, w_arg2): return space.call_function(slot_fn, w_self, w_arg1, w_arg2) - api_func = slot_func.api_func handled = True if handled: @@ -526,7 +525,7 @@ else: space.call_function(delattr_fn, w_self, w_name) return 0 - api_func = slot_tp_setattro.api_func + slot_func = slot_tp_setattro elif name == 'tp_getattro': getattr_fn = w_type.getdictvalue(space, '__getattribute__') if getattr_fn is None: @@ -536,7 +535,7 @@ @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) - api_func = slot_tp_getattro.api_func + slot_func = slot_tp_getattro elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') @@ -549,7 +548,7 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(call_fn, args) - api_func = slot_tp_call.api_func + slot_func = slot_tp_call elif name == 'tp_iternext': iternext_fn = w_type.getdictvalue(space, '__next__') @@ -565,7 +564,7 @@ if not e.match(space, space.w_StopIteration): raise return None - api_func = slot_tp_iternext.api_func + slot_func = slot_tp_iternext elif name == 'tp_init': init_fn = w_type.getdictvalue(space, '__init__') @@ -580,7 +579,7 @@ w_stararg=w_args, w_starstararg=w_kwds) space.call_args(init_fn, args) return 0 - api_func = slot_tp_init.api_func + slot_func = slot_tp_init elif name == 'tp_new': new_fn = w_type.getdictvalue(space, '__new__') if new_fn is None: @@ -592,7 +591,7 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) - api_func = slot_tp_new.api_func + slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': buff_fn = w_type.getdictvalue(space, '__buffer__') if buff_fn is None: @@ -621,14 +620,14 @@ return 0 # XXX remove this when it no longer crashes a translated PyPy return - api_func = buff_w.api_func + slot_func = buff_w else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length # richcmpfunc(s) return - return lambda: llhelper(api_func.functype, api_func.get_wrapper(space)) + return slot_func PyWrapperFlag_KEYWORDS = 1 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,7 +3,6 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rstring import rsplit -from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root, DescrMismatch @@ -27,7 +26,8 @@ PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, Py_DecRef, as_pyobj) from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function, + llslot) from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( @@ -252,21 +252,14 @@ # XXX special case iternext continue - slot_func_helper = None - if slot_func is None and typedef is not None: - get_slot = get_slot_tp_function(space, typedef, slot_name) - if get_slot: - slot_func_helper = get_slot() - elif slot_func: - slot_func_helper = llhelper(slot_func.api_func.functype, - slot_func.api_func.get_wrapper(space)) - - if slot_func_helper is None: + slot_func = get_slot_tp_function(space, typedef, slot_name) + if not slot_func: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: os.write(2, "%s defined by %s but no slot function defined!\n" % ( method_name, w_type.getname(space))) continue + slot_func_helper = llslot(space, slot_func) # XXX special case wrapper-functions and use a "specific" slot func @@ -372,9 +365,8 @@ def setup_new_method_def(space): ptr = get_new_method_def(space) - ptr.c_ml_meth = rffi.cast(PyCFunction_typedef, - llhelper(tp_new_wrapper.api_func.functype, - tp_new_wrapper.api_func.get_wrapper(space))) + ptr.c_ml_meth = rffi.cast( + PyCFunction_typedef, llslot(space, tp_new_wrapper)) def add_tp_new_wrapper(space, dict_w, pto): if "__new__" in dict_w: @@ -495,8 +487,7 @@ def subtype_dealloc(space, obj): pto = obj.c_ob_type base = pto - this_func_ptr = llhelper(subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + this_func_ptr = llslot(space, subtype_dealloc) while base.c_tp_dealloc == this_func_ptr: base = base.c_tp_base assert base @@ -519,9 +510,7 @@ def setup_bytes_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) lltype.render_immortal(c_buf) - c_buf.c_bf_getbuffer = llhelper( - bytes_getbuffer.api_func.functype, - bytes_getbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getbuffer = llslot(space, bytes_getbuffer) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER @@ -581,12 +570,10 @@ # dealloc if space.gettypeobject(w_type.layout.typedef) is w_type: # only for the exact type, like 'space.w_tuple' or 'space.w_list' - pto.c_tp_dealloc = typedescr.get_dealloc(space) + pto.c_tp_dealloc = llslot(space, typedescr.get_dealloc()) else: # for all subtypes, use subtype_dealloc() - pto.c_tp_dealloc = llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + pto.c_tp_dealloc = llslot(space, subtype_dealloc) if space.is_w(w_type, space.w_str): pto.c_tp_itemsize = 1 elif space.is_w(w_type, space.w_tuple): @@ -595,10 +582,8 @@ if space.is_w(w_type, space.w_str): setup_bytes_buffer_procs(space, pto) - pto.c_tp_free = llhelper(PyObject_Free.api_func.functype, - PyObject_Free.api_func.get_wrapper(space)) - pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, - PyType_GenericAlloc.api_func.get_wrapper(space)) + pto.c_tp_free = llslot(space, PyObject_Free) + pto.c_tp_alloc = llslot(space, PyType_GenericAlloc) builder = space.fromcache(StaticObjectBuilder) if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0 and builder.cpyext_type_init is None): @@ -780,15 +765,11 @@ if not pto.c_tp_setattro: from pypy.module.cpyext.object import PyObject_GenericSetAttr - pto.c_tp_setattro = llhelper( - PyObject_GenericSetAttr.api_func.functype, - PyObject_GenericSetAttr.api_func.get_wrapper(space)) + pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr) if not pto.c_tp_getattro: from pypy.module.cpyext.object import PyObject_GenericGetAttr - pto.c_tp_getattro = llhelper( - PyObject_GenericGetAttr.api_func.functype, - PyObject_GenericGetAttr.api_func.get_wrapper(space)) + pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr) if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1066,7 +1066,6 @@ prepare_cond_call_value_r = prepare_cond_call_value_i - def notimplemented(self, op): msg = '[PPC/regalloc] %s not implemented\n' % op.getopname() if we_are_translated(): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -374,10 +374,11 @@ _COND_CALL_SAVE_REGS = [r.r11, r.r2, r.r3, r.r4, r.r5] def emit_cond_call(self, op, arglocs, regalloc): + resloc = arglocs[0] + arglocs = arglocs[1:] fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond.value != c.cond_none.value - fcond = c.negate(fcond) jmp_adr = self.mc.get_relative_pos() self.mc.reserve_cond_jump() # patched later to a relative branch @@ -411,6 +412,8 @@ self.mc.BASR(r.r14, r.r14) # restoring the registers saved above, and doing pop_gcmap(), is left # to the cond_call_slowpath helper. We never have any result value. + if resloc is not None: + self.mc.LGR(resloc, r.RES) relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) pmc.BRCL(fcond, l.imm(relative_target)) @@ -419,6 +422,9 @@ # guard_no_exception too self.previous_cond_call_jcond = jmp_adr, fcond + emit_cond_call_value_i = emit_cond_call + emit_cond_call_value_r = emit_cond_call + class AllocOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1107,7 +1107,7 @@ def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) - locs = [] + locs = [None] # support between 0 and 4 integer arguments assert 2 <= op.numargs() <= 2 + 4 for i in range(1, op.numargs()): @@ -1116,6 +1116,22 @@ locs.append(loc) return locs + def prepare_cond_call_value_i(self, op): + x = self.ensure_reg(op.getarg(0)) + self.load_condition_into_cc(op.getarg(0)) + self.rm.force_allocate_reg(op, selected_reg=x) # spilled if survives + # ^^^ if arg0!=0, we jump over the next block of code (the call) + locs = [x] + # support between 0 and 4 integer arguments + assert 2 <= op.numargs() <= 2 + 4 + for i in range(1, op.numargs()): + loc = self.loc(op.getarg(i)) + assert loc.type != FLOAT + locs.append(loc) + return locs # [res, function, args...] + + prepare_cond_call_value_r = prepare_cond_call_value_i + def prepare_cond_call_gc_wb(self, op): arglocs = [self.ensure_reg(op.getarg(0))] return arglocs From pypy.commits at gmail.com Sat Dec 10 15:42:48 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 12:42:48 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <584c68c8.113cc20a.638ff.b348@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r830:cd0e273c0f90 Date: 2016-12-08 23:38 +0100 http://bitbucket.org/pypy/pypy.org/changeset/cd0e273c0f90/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66429 of $105000 (63.3%) + $66431 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sat Dec 10 15:42:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 10 Dec 2016 12:42:50 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <584c68ca.96a61c0a.be0fc.3e2e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r831:d5107499e7fa Date: 2016-12-10 21:42 +0100 http://bitbucket.org/pypy/pypy.org/changeset/d5107499e7fa/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $59000 of $80000 (73.7%) + $59010 of $80000 (73.8%)
    @@ -29,7 +29,7 @@ - $59010 of $80000 (73.8%) + $59030 of $80000 (73.8%)
    @@ -29,7 +29,7 @@ - $66431 of $105000 (63.3%) + $66436 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Tue Dec 13 06:11:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 03:11:16 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Fix cpyext -A testing: negative exit codes can happen and should count as failures Message-ID: <584fd754.4fcb190a.c8a96.4cd5@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89036:5718b9a37d0d Date: 2016-12-13 11:10 +0000 http://bitbucket.org/pypy/pypy/changeset/5718b9a37d0d/ Log: Fix cpyext -A testing: negative exit codes can happen and should count as failures diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -182,8 +182,9 @@ if res == 81: py.test.skip('%r was not compiled w/ required usemodules: %r' % (python_, usemodules)) - elif res > 0: - raise AssertionError("Subprocess failed:\n" + stderr) + elif res != 0: + raise AssertionError( + "Subprocess failed with exit code %s:\n%s" % (res, stderr)) elif "===aefwuiheawiu===" not in stdout: raise AssertionError("%r crashed:\n%s" % (python_, stderr)) From pypy.commits at gmail.com Tue Dec 13 06:15:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 03:15:28 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <584fd850.090d2e0a.d9ba5.63d3@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89037:b70146028617 Date: 2016-12-13 11:14 +0000 http://bitbucket.org/pypy/pypy/changeset/b70146028617/ Log: fix test diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -294,7 +294,7 @@ return obj; """)]) a = module.empty_format('hello') - assert isinstance(a, unicode) + assert isinstance(a, str) class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase): """ From pypy.commits at gmail.com Tue Dec 13 06:29:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 13 Dec 2016 03:29:16 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: merged default Message-ID: <584fdb8c.0faa190a.80718.5154@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89038:b6a80f1a44e0 Date: 2016-12-13 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b6a80f1a44e0/ Log: merged default diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -0,0 +1,26 @@ +from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class FakeSpace: + pass + +def test_encode_utf8(): + space = FakeSpace() + assert encode_utf8(space, u"abc") == "abc" + assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" + assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + +def test_decode_utf8(): + space = FakeSpace() + assert decode_utf8(space, "abc") == u"abc" + assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" + assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -51,6 +51,10 @@ return result def decode_utf8(space, string): + # Surrogates are accepted and not treated specially at all. + # If there happen to be two 3-bytes encoding a pair of surrogates, + # you still get two surrogate unicode characters in the result. + # These are the Python2 rules; Python3 differs. result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), @@ -59,8 +63,9 @@ def encode_utf8(space, uni): # Note that this function never raises UnicodeEncodeError, - # since surrogate pairs are allowed. - # This is not the case with Python3. + # since surrogates are allowed, either paired or lone. + # A paired surrogate is considered like the non-BMP character + # it stands for. These are the Python2 rules; Python3 differs. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=raise_unicode_exception_encode, diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1066,7 +1066,6 @@ prepare_cond_call_value_r = prepare_cond_call_value_i - def notimplemented(self, op): msg = '[PPC/regalloc] %s not implemented\n' % op.getopname() if we_are_translated(): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -374,10 +374,11 @@ _COND_CALL_SAVE_REGS = [r.r11, r.r2, r.r3, r.r4, r.r5] def emit_cond_call(self, op, arglocs, regalloc): + resloc = arglocs[0] + arglocs = arglocs[1:] fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond.value != c.cond_none.value - fcond = c.negate(fcond) jmp_adr = self.mc.get_relative_pos() self.mc.reserve_cond_jump() # patched later to a relative branch @@ -411,6 +412,8 @@ self.mc.BASR(r.r14, r.r14) # restoring the registers saved above, and doing pop_gcmap(), is left # to the cond_call_slowpath helper. We never have any result value. + if resloc is not None: + self.mc.LGR(resloc, r.RES) relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) pmc.BRCL(fcond, l.imm(relative_target)) @@ -419,6 +422,9 @@ # guard_no_exception too self.previous_cond_call_jcond = jmp_adr, fcond + emit_cond_call_value_i = emit_cond_call + emit_cond_call_value_r = emit_cond_call + class AllocOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1107,7 +1107,7 @@ def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) - locs = [] + locs = [None] # support between 0 and 4 integer arguments assert 2 <= op.numargs() <= 2 + 4 for i in range(1, op.numargs()): @@ -1116,6 +1116,22 @@ locs.append(loc) return locs + def prepare_cond_call_value_i(self, op): + x = self.ensure_reg(op.getarg(0)) + self.load_condition_into_cc(op.getarg(0)) + self.rm.force_allocate_reg(op, selected_reg=x) # spilled if survives + # ^^^ if arg0!=0, we jump over the next block of code (the call) + locs = [x] + # support between 0 and 4 integer arguments + assert 2 <= op.numargs() <= 2 + 4 + for i in range(1, op.numargs()): + loc = self.loc(op.getarg(i)) + assert loc.type != FLOAT + locs.append(loc) + return locs # [res, function, args...] + + prepare_cond_call_value_r = prepare_cond_call_value_i + def prepare_cond_call_gc_wb(self, op): arglocs = [self.ensure_reg(op.getarg(0))] return arglocs diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -142,10 +142,14 @@ assert len(lst) == len(args_v), ( "not supported so far: 'greens' variables contain Void") # a crash here means that you have to reorder the variable named in - # the JitDriver. Indeed, greens and reds must both be sorted: first - # all INTs, followed by all REFs, followed by all FLOATs. + # the JitDriver. lst2 = sort_vars(lst) - assert lst == lst2 + assert lst == lst2, ("You have to reorder the variables named in " + "the JitDriver (both the 'greens' and 'reds' independently). " + "They must be sorted like this: first all the integer-like, " + "then all the pointer-like, and finally the floats.\n" + "Got: %r\n" + "Expected: %r" % (lst, lst2)) return lst # return (_sort(greens_v, True), _sort(reds_v, False)) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1778,22 +1778,23 @@ finally: lltype.free(l_utsbuf, flavor='raw') -# These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) -c_major = external('major', [rffi.INT], rffi.INT, macro=True) -c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) +if sys.platform != 'win32': + # These are actually macros on some/most systems + c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) + c_major = external('major', [rffi.INT], rffi.INT, macro=True) + c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) - at replace_os_function('makedev') -def makedev(maj, min): - return c_makedev(maj, min) + @replace_os_function('makedev') + def makedev(maj, min): + return c_makedev(maj, min) - at replace_os_function('major') -def major(dev): - return c_major(dev) + @replace_os_function('major') + def major(dev): + return c_major(dev) - at replace_os_function('minor') -def minor(dev): - return c_minor(dev) + @replace_os_function('minor') + def minor(dev): + return c_minor(dev) #___________________________________________________________________ diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -327,6 +327,16 @@ def unicode_encode_utf_8(s, size, errors, errorhandler=None, allow_surrogates=allow_surrogate_by_default): + # In this function, allow_surrogates can be: + # + # * True: surrogates are always allowed. A valid surrogate pair + # is replaced with the non-BMP unicode char it stands for, + # which is then encoded as 4 bytes. + # + # * False: surrogates are always forbidden. + # + # See also unicode_encode_utf8sp(). + # if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, @@ -391,6 +401,33 @@ _encodeUCS4(result, ch) return result.build() +def unicode_encode_utf8sp(s, size): + # Surrogate-preserving utf-8 encoding. Any surrogate character + # turns into its 3-bytes encoding, whether it is paired or not. + # This should always be reversible, and the reverse is the regular + # str_decode_utf_8() with allow_surrogates=True. + assert(size >= 0) + result = StringBuilder(size) + pos = 0 + while pos < size: + ch = ord(s[pos]) + pos += 1 + if ch < 0x80: + # Encode ASCII + result.append(chr(ch)) + elif ch < 0x0800: + # Encode Latin-1 + result.append(chr((0xc0 | (ch >> 6)))) + result.append(chr((0x80 | (ch & 0x3f)))) + elif ch < 0x10000: + # Encode UCS2 Unicode ordinals, and surrogates + result.append((chr((0xe0 | (ch >> 12))))) + result.append((chr((0x80 | ((ch >> 6) & 0x3f))))) + result.append((chr((0x80 | (ch & 0x3f))))) + else: + _encodeUCS4(result, ch) + return result.build() + # ____________________________________________________________ # utf-16 diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,7 @@ def test_isatty(self): assert rposix.isatty(-1) is False + @py.test.mark.skipif("not hasattr(rposix, 'makedev')") def test_makedev(self): dev = rposix.makedev(24, 7) assert rposix.major(dev) == 24 diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -812,6 +812,21 @@ py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) assert encoder(u'u\u1234', 2, 'replace') == 'u?' + def test_encode_utf8sp(self): + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + for input, expected in [ + (u"", ""), + (u"abc", "abc"), + (u"\u1234", "\xe1\x88\xb4"), + (u"\ud800", "\xed\xa0\x80"), + (u"\udc00", "\xed\xb0\x80"), + (u"\ud800" + c, "\xed\xa0\x80\xed\xb0\x80"), + ]: + got = runicode.unicode_encode_utf8sp(input, len(input)) + assert got == expected + class TestTranslation(object): def setup_class(cls): From pypy.commits at gmail.com Tue Dec 13 06:29:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 13 Dec 2016 03:29:18 -0800 (PST) Subject: [pypy-commit] pypy default: slow path overwrites r2 (return register), save it in r1 before returning and restore it later Message-ID: <584fdb8e.5298190a.2229f.5974@mx.google.com> Author: Richard Plangger Branch: Changeset: r89039:1656fa7e6390 Date: 2016-12-13 12:25 +0100 http://bitbucket.org/pypy/pypy/changeset/1656fa7e6390/ Log: slow path overwrites r2 (return register), save it in r1 before returning and restore it later diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -426,6 +426,8 @@ # Finish self._reload_frame_if_necessary(mc) + mc.LGR(r.SCRATCH2, r.r2) + self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_core_regs_from_jitframe(mc, saved_regs) if supports_floats: diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -376,6 +376,7 @@ def emit_cond_call(self, op, arglocs, regalloc): resloc = arglocs[0] arglocs = arglocs[1:] + fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond.value != c.cond_none.value @@ -413,7 +414,7 @@ # restoring the registers saved above, and doing pop_gcmap(), is left # to the cond_call_slowpath helper. We never have any result value. if resloc is not None: - self.mc.LGR(resloc, r.RES) + self.mc.LGR(resloc, r.SCRATCH2) relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) pmc.BRCL(fcond, l.imm(relative_target)) From pypy.commits at gmail.com Tue Dec 13 06:53:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 13 Dec 2016 03:53:26 -0800 (PST) Subject: [pypy-commit] pypy default: negate in prepare_cond_call, moved from the assembler to the regalloc Message-ID: <584fe136.0a0e2e0a.91b3.600f@mx.google.com> Author: Richard Plangger Branch: Changeset: r89040:d9a6afa06b18 Date: 2016-12-13 12:52 +0100 http://bitbucket.org/pypy/pypy/changeset/d9a6afa06b18/ Log: negate in prepare_cond_call, moved from the assembler to the regalloc diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -396,6 +396,7 @@ # * gcmap is pushed # * the old value of these regs must already be stored in the jitframe # * on exit, all registers are restored from the jitframe + # * the result of the call is moved to register r1 mc = InstrBuilder() self.mc = mc @@ -426,9 +427,10 @@ # Finish self._reload_frame_if_necessary(mc) - mc.LGR(r.SCRATCH2, r.r2) + self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller - self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller + mc.LGR(r.SCRATCH2, r.RES) + self._pop_core_regs_from_jitframe(mc, saved_regs) if supports_floats: self._pop_fp_regs_from_jitframe(mc) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1108,13 +1108,15 @@ def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) locs = [None] + self.assembler.guard_success_cc = c.negate( + self.assembler.guard_success_cc) # support between 0 and 4 integer arguments assert 2 <= op.numargs() <= 2 + 4 for i in range(1, op.numargs()): loc = self.loc(op.getarg(i)) assert loc.type != FLOAT locs.append(loc) - return locs + return locs # [None, function, arg0, ..., argn] def prepare_cond_call_value_i(self, op): x = self.ensure_reg(op.getarg(0)) From pypy.commits at gmail.com Tue Dec 13 07:26:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 04:26:15 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Add badly faked impl of PyUnicode_GetLength Message-ID: <584fe8e7.0faa190a.80718.55cd@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89041:6625a5970bb8 Date: 2016-12-13 12:25 +0000 http://bitbucket.org/pypy/pypy/changeset/6625a5970bb8/ Log: Add badly faked impl of PyUnicode_GetLength diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -32,6 +32,17 @@ Py_DECREF(s); return PyLong_FromLong(result); """), + ("test_GetLength", "METH_NOARGS", + """ + PyObject* s = PyUnicode_FromString("Hello world"); + int result = 0; + + if(PyUnicode_GetLength(s) != 11) { + result = -PyUnicode_GetSize(s); + } + Py_DECREF(s); + return PyLong_FromLong(result); + """), ("test_GetSize_exception", "METH_NOARGS", """ PyObject* f = PyFloat_FromDouble(1.0); @@ -48,6 +59,9 @@ assert module.test_GetSize() == 0 raises(TypeError, module.test_GetSize_exception) + # XXX: needs a test where it differs from GetSize + assert module.test_GetLength() == 0 + assert module.test_is_unicode(u"") assert not module.test_is_unicode(()) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -253,6 +253,11 @@ @cpython_api([PyObject], Py_ssize_t, error=-1) def PyUnicode_GetSize(space, ref): + """Return the size of the deprecated Py_UNICODE representation, in code + units (this includes surrogate pairs as 2 units). + + Please migrate to using PyUnicode_GetLength(). + """ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode: ref = rffi.cast(PyUnicodeObject, ref) return ref.c_length @@ -260,6 +265,15 @@ w_obj = from_ref(space, ref) return space.len_w(w_obj) + at cpython_api([PyObject], Py_ssize_t, error=-1) +def PyUnicode_GetLength(space, w_unicode): + """Return the length of the Unicode object, in code points.""" + # XXX: this is a stub + if not PyUnicode_Check(space, w_unicode): + PyErr_BadArgument(space) + #PyUnicode_READY(w_unicode) + return PyUnicode_GET_LENGTH(space, w_unicode) + @cpython_api([PyObject, rffi.CWCHARP, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_AsWideChar(space, ref, buf, size): """Copy the Unicode object contents into the wchar_t buffer w. At most From pypy.commits at gmail.com Tue Dec 13 07:50:32 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 13 Dec 2016 04:50:32 -0800 (PST) Subject: [pypy-commit] pypy utf8sp: A branch to experiment with handling unicode strings as either utf8 or Message-ID: <584fee98.a3212e0a.9f8fa.5a0d@mx.google.com> Author: Armin Rigo Branch: utf8sp Changeset: r89042:f0c4b96a4334 Date: 2016-12-11 15:29 +0100 http://bitbucket.org/pypy/pypy/changeset/f0c4b96a4334/ Log: A branch to experiment with handling unicode strings as either utf8 or wide-chars From pypy.commits at gmail.com Tue Dec 13 08:48:03 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 13 Dec 2016 05:48:03 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fixes in tests (running them with -A) and corresponding fixes in the code Message-ID: <584ffc13.194b2e0a.b8b6c.643e@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89043:dfa609c0bc20 Date: 2016-12-13 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/dfa609c0bc20/ Log: fixes in tests (running them with -A) and corresponding fixes in the code diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -292,7 +292,7 @@ def _frombytes(self, space, s): if len(s) % self.itemsize != 0: raise oefmt(space.w_ValueError, - "string length not a multiple of item size") + "bytes length not a multiple of item size") oldlen = self.len new = len(s) / self.itemsize if not new: @@ -316,14 +316,9 @@ raise MemoryError w_item = space.call_method(w_f, 'read', space.wrap(size)) item = space.bytes_w(w_item) + self._frombytes(space, item) if len(item) < size: - n = len(item) % self.itemsize - elems = max(0, len(item) - (len(item) % self.itemsize)) - if n != 0: - item = item[0:elems] - self._frombytes(space, item) raise oefmt(space.w_EOFError, "not enough items in file") - self._frombytes(space, item) def descr_tofile(self, space, w_f): """ tofile(f) @@ -339,7 +334,7 @@ Extends this array with data from the unicode string ustr. The array must be a type 'u' array; otherwise a ValueError - is raised. Use array.fromstring(ustr.decode(...)) to + is raised. Use array.frombytes(ustr.encode(...)) to append Unicode data to an array of some other type. """ # XXX the following probable bug is not emulated: @@ -358,7 +353,7 @@ Convert the array to a unicode string. The array must be a type 'u' array; otherwise a ValueError is raised. Use - array.tostring().decode() to obtain a unicode string from + array.tobytes().decode() to obtain a unicode string from an array of some other type. """ if self.typecode == 'u': @@ -908,6 +903,11 @@ item = rffi.cast(lltype.Signed, item) elif mytype.typecode == 'f': item = float(item) + elif mytype.typecode == 'u': + if ord(item) >= 0x110000: + raise oefmt(space.w_ValueError, + "array contains a unicode character out of " + "range(0x110000)") return space.wrap(item) # interface diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -222,10 +222,14 @@ a.fromfile(myfile(b'\x01', 20), 2) assert len(a) == 2 and a[0] == 257 and a[1] == 257 - for i in (0, 1): - a = self.array('h') - raises(EOFError, a.fromfile, myfile(b'\x01', 2 + i), 2) - assert len(a) == 1 and a[0] == 257 + a = self.array('h') + raises(EOFError, a.fromfile, myfile(b'\x01', 2), 2) + assert len(a) == 1 and a[0] == 257 + + a = self.array('h') + raises(ValueError, a.fromfile, myfile(b'\x01', 3), 2) + # ValueError: bytes length not a multiple of item size + assert len(a) == 0 def test_fromfile_no_warning(self): import warnings @@ -454,6 +458,9 @@ assert a.tostring() == b'helLo' def test_buffer_keepalive(self): + import sys + if '__pypy__' not in sys.builtin_module_names: + skip("CPython: cannot resize an array that is exporting buffers") buf = memoryview(self.array('b', b'text')) assert buf[2] == ord('x') # @@ -866,7 +873,7 @@ a = self.array('u', u'\x01\u263a\x00\ufeff') b = self.array('u', u'\x01\u263a\x00\ufeff') b.byteswap() - assert a != b + raises(ValueError, "a != b") def test_unicode_ord_positive(self): import sys @@ -874,11 +881,7 @@ skip("test for 32-bit unicodes") a = self.array('u', b'\xff\xff\xff\xff') assert len(a) == 1 - assert repr(a[0]) == r"'\Uffffffff'" - if sys.maxsize == 2147483647: - assert ord(a[0]) == -1 - else: - assert ord(a[0]) == 4294967295 + raises(ValueError, "a[0]") def test_weakref(self): import weakref From pypy.commits at gmail.com Tue Dec 13 09:49:19 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 13 Dec 2016 06:49:19 -0800 (PST) Subject: [pypy-commit] pypy py3.5: CPython's ``sys.settrace()`` sometimes reports an ``exception`` at the Message-ID: <58500a6f.194e2e0a.42e37.7e9b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89045:d86691a0c3ca Date: 2016-12-13 15:48 +0100 http://bitbucket.org/pypy/pypy/changeset/d86691a0c3ca/ Log: CPython's ``sys.settrace()`` sometimes reports an ``exception`` at the end of ``for`` or ``yield from`` lines for the ``StopIteration``, and sometimes not. The problem is that it occurs in an ill-defined subset of cases. PyPy attempts to emulate that but the precise set of cases is not exactly the same. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -478,6 +478,12 @@ from the Makefile used to build the interpreter. PyPy should bake the values in during compilation, but does not do that yet. +* CPython's ``sys.settrace()`` sometimes reports an ``exception`` at the + end of ``for`` or ``yield from`` lines for the ``StopIteration``, and + sometimes not. The problem is that it occurs in an ill-defined subset + of cases. PyPy attempts to emulate that but the precise set of cases + is not exactly the same. + .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -309,6 +309,9 @@ tb.frame.mark_as_escaped() return tb + def has_any_traceback(self): + return self._application_traceback is not None + def set_cause(self, space, w_cause): if w_cause is None: return diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -185,7 +185,7 @@ if not e.match(space, space.w_StopIteration): raise e.normalize_exception(space) - space.getexecutioncontext().exception_trace(frame, e) + frame._report_stopiteration_sometimes(w_yf, e) try: w_stop_value = space.getattr(e.get_w_value(space), space.wrap("value")) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1154,13 +1154,31 @@ if not e.match(self.space, self.space.w_StopIteration): raise # iterator exhausted - self.space.getexecutioncontext().exception_trace(self, e) + self._report_stopiteration_sometimes(w_iterator, e) self.popvalue() next_instr += jumpby else: self.pushvalue(w_nextitem) return next_instr + def _report_stopiteration_sometimes(self, w_iterator, operr): + # CPython 3.5 calls the exception trace in an ill-defined subset + # of cases: only if tp_iternext returned NULL and set a + # StopIteration exception, but not if tp_iternext returned NULL + # *without* setting an exception. We can't easily emulate that + # behavior at this point. For example, the generator's + # tp_iternext uses one or other case depending on whether the + # generator is already exhausted or just exhausted now. We'll + # classify that as a CPython incompatibility and use an + # approximative rule: if w_iterator is a generator-iterator, + # we always report it; if operr has already a stack trace + # attached (likely from a custom __iter__() method), we also + # report it; in other cases, we don't. + from pypy.interpreter.generator import GeneratorOrCoroutine + if (isinstance(w_iterator, GeneratorOrCoroutine) or + operr.has_any_traceback()): + self.space.getexecutioncontext().exception_trace(self, operr) + def FOR_LOOP(self, oparg, next_instr): raise BytecodeCorruption("old opcode, no longer in use") diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -666,10 +666,33 @@ sys.settrace(None) print('seen:', seen) # on Python 3 we get an extra 'exception' when 'for' catches - # StopIteration + # StopIteration (but not always! mess) assert seen == ['call', 'line', 'call', 'return', 'exception', 'return'] assert frames[-2].f_code.co_name == 'g' + def test_nongenerator_trace_stopiteration(self): + import sys + gen = iter([5]) + assert next(gen) == 5 + seen = [] + frames = [] + def trace_func(frame, event, *args): + print('TRACE:', frame, event, args) + seen.append(event) + frames.append(frame) + return trace_func + def g(): + for x in gen: + never_entered + sys.settrace(trace_func) + g() + sys.settrace(None) + print('seen:', seen) + # hack: don't report the StopIteration for some "simple" + # iterators. + assert seen == ['call', 'line', 'return'] + assert frames[-2].f_code.co_name == 'g' + def test_yieldfrom_trace_stopiteration(self): """ import sys def f2(): @@ -726,6 +749,30 @@ assert frames[-2].f_code.co_name == 'g' """ + def test_yieldfrom_trace_stopiteration_3(self): """ + import sys + def f(): + yield from [] + gen = f() + seen = [] + frames = [] + def trace_func(frame, event, *args): + print('TRACE:', frame, event, args) + seen.append(event) + frames.append(frame) + return trace_func + def g(): + for x in gen: + never_entered + sys.settrace(trace_func) + g() # invokes next_yield_from() from YIELD_FROM() + sys.settrace(None) + print('seen:', seen) + assert seen == ['call', 'line', 'call', 'line', + 'return', 'exception', 'return'] + assert frames[-4].f_code.co_name == 'f' + """ + def test_clear_locals(self): def make_frames(): def outer(): From pypy.commits at gmail.com Tue Dec 13 10:45:35 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 13 Dec 2016 07:45:35 -0800 (PST) Subject: [pypy-commit] pypy default: store dict of all builtin functions on the space Message-ID: <5850179f.93d7190a.368c0.77da@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r89046:c527788fc433 Date: 2016-12-13 16:43 +0100 http://bitbucket.org/pypy/pypy/changeset/c527788fc433/ Log: store dict of all builtin functions on the space this is conceptually more correct and should fix the strange test_ztranslation failures ("duplicate function ids with identifier...") that sometimes inexplicably pop up diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -428,6 +428,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,16 +247,15 @@ def descr_function_repr(self): return self.getrepr(self.space, 'function %s' % (self.name,)) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -264,10 +263,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -671,9 +671,9 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(indentifier): + def find(space, indentifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code + return Function.find(space, identifier).code find = staticmethod(find) def signature(self): diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -77,7 +77,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -86,7 +86,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) From pypy.commits at gmail.com Tue Dec 13 18:23:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 15:23:17 -0800 (PST) Subject: [pypy-commit] pypy default: fix translation (typo) Message-ID: <585082e5.84c3190a.cf463.9bf0@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89047:a12b1539331d Date: 2016-12-13 23:22 +0000 http://bitbucket.org/pypy/pypy/changeset/a12b1539331d/ Log: fix translation (typo) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -671,10 +671,10 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(space, indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function return Function.find(space, identifier).code - find = staticmethod(find) def signature(self): return self.sig From pypy.commits at gmail.com Tue Dec 13 18:53:16 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 15:53:16 -0800 (PST) Subject: [pypy-commit] pypy cling-support: downgrade long double to double as annotator support is not there Message-ID: <585089ec.a5002e0a.b2daa.8aac@mx.google.com> Author: Wim Lavrijsen Branch: cling-support Changeset: r89049:e04b47226aa3 Date: 2016-12-13 10:52 -0800 http://bitbucket.org/pypy/pypy/changeset/e04b47226aa3/ Log: downgrade long double to double as annotator support is not there diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,7 +386,9 @@ fval = float(rfloat.rstring_to_float(default)) else: fval = float(0.) - self.default = r_longfloat(fval) + # see ffitypes.LongDoubleTypeMixin: long double not really + # supported in rffi + self.default = fval #r_longfloat(fval) def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import oefmt from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rarithmetic import r_singlefloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from pypy.module._cffi_backend import newtype @@ -239,21 +239,25 @@ _mixin_ = True _immutable_fields_ = ['c_type', 'c_ptrtype', 'typecode'] - c_type = rffi.LONGDOUBLE - c_ptrtype = rffi.LONGDOUBLEP + # long double is not really supported, so work with normal + # double instead; doing it here keeps this localized + c_type = rffi.DOUBLE #rffi.LONGDOUBLE + c_ptrtype = rffi.DOUBLEP #rffi.LONGDOUBLEP typecode = 'g' def _unwrap_object(self, space, w_obj): - return space.float_w(w_obj) + #return r_longfloat(space.float_w(w_obj)) + return float(space.float_w(w_obj)) def _wrap_object(self, space, obj): - # long double not really supported, so force a cast to double + # return space.wrap(obj) dbl = rffi.cast(rffi.DOUBLE, obj) return space.wrap(float(dbl)) def cffi_type(self, space): state = space.fromcache(State) - return state.c_ldouble + #return state.c_ldouble + return state.c_double def typeid(c_type): "NOT_RPYTHON" From pypy.commits at gmail.com Tue Dec 13 18:53:14 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 15:53:14 -0800 (PST) Subject: [pypy-commit] pypy cling-support: merge default into cling-support branch and fix dummy_backend Message-ID: <585089ea.05052e0a.da98d.9922@mx.google.com> Author: Wim Lavrijsen Branch: cling-support Changeset: r89048:2cf0e94f1f19 Date: 2016-12-12 17:30 -0800 http://bitbucket.org/pypy/pypy/changeset/2cf0e94f1f19/ Log: merge default into cling-support branch and fix dummy_backend diff too long, truncating to 2000 out of 7197 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import shlex import imp from distutils.errors import DistutilsPlatformError @@ -62,11 +61,31 @@ def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} + g['CC'] = "gcc -pthread" + g['CXX'] = "g++ -pthread" + g['OPT'] = "-DNDEBUG -O2" + g['CFLAGS'] = "-DNDEBUG -O2" + g['CCSHARED'] = "-fPIC" + g['LDSHARED'] = "gcc -pthread -shared" + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] + g['AR'] = "ar" + g['ARFLAGS'] = "rc" g['EXE'] = "" - g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') - g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" + g['VERSION'] = get_python_version() + + if sys.platform[:6] == "darwin": + import platform + if platform.machine() == 'i386': + if platform.architecture()[0] == '32bit': + arch = 'i386' + else: + arch = 'x86_64' + else: + # just a guess + arch = platform.machine() + g['LDSHARED'] += ' -undefined dynamic_lookup' + g['CC'] += ' -arch %s' % (arch,) global _config_vars _config_vars = g @@ -103,6 +122,12 @@ _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers + if sys.platform == 'darwin': + import _osx_support + _osx_support.customize_config_vars(_config_vars) + if args: vals = [] for name in args: @@ -118,30 +143,80 @@ """ return get_config_vars().get(name) + def customize_compiler(compiler): - """Dummy method to let some easy_install packages that have - optional C speedup components. + """Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile (CPython) + or hard-coded in _init_posix() (PyPy). """ - def customize(executable, flags): - command = compiler.executables[executable] + flags - setattr(compiler, executable, command) + if compiler.compiler_type == "unix": + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + # Use get_config_var() to ensure _config_vars is initialized. + if not get_config_var('CUSTOMIZED_OSX_COMPILER'): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - if compiler.compiler_type == "unix": - # compiler_so can be c++ which has no -Wimplicit - #compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) - compiler.compiler_so.extend(['-O2', '-fPIC']) - compiler.shared_lib_extension = get_config_var('SO') - if "CPPFLAGS" in os.environ: - cppflags = shlex.split(os.environ["CPPFLAGS"]) - for executable in ('compiler', 'compiler_so', 'linker_so'): - customize(executable, cppflags) - if "CFLAGS" in os.environ: - cflags = shlex.split(os.environ["CFLAGS"]) - for executable in ('compiler', 'compiler_so', 'linker_so'): - customize(executable, cflags) - if "LDFLAGS" in os.environ: - ldflags = shlex.split(os.environ["LDFLAGS"]) - customize('linker_so', ldflags) + (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', + 'CCSHARED', 'LDSHARED', 'SO', 'AR', + 'ARFLAGS') + + if 'CC' in os.environ: + newcc = os.environ['CC'] + if (sys.platform == 'darwin' + and 'LDSHARED' not in os.environ + and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well + ldshared = newcc + ldshared[len(cc):] + cc = newcc + if 'CXX' in os.environ: + cxx = os.environ['CXX'] + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + if 'AR' in os.environ: + ar = os.environ['AR'] + if 'ARFLAGS' in os.environ: + archiver = ar + ' ' + os.environ['ARFLAGS'] + else: + archiver = ar + ' ' + ar_flags + + cc_cmd = cc + ' ' + cflags + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + compiler_cxx=cxx, + linker_so=ldshared, + linker_exe=cc, + archiver=archiver) + + compiler.shared_lib_extension = so_ext from sysconfig_cpython import ( diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,4 @@ +import os from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,6 +8,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait3(status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -16,6 +20,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait4(pid, status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info --- a/lib_pypy/greenlet.egg-info +++ b/lib_pypy/greenlet.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: greenlet -Version: 0.4.10 +Version: 0.4.11 Summary: Lightweight in-process concurrent programming Home-page: https://github.com/python-greenlet/greenlet Author: Ralf Schmitt (for CPython), PyPy team diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,7 +1,7 @@ import sys import _continuation -__version__ = "0.4.10" +__version__ = "0.4.11" # ____________________________________________________________ # Exceptions diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,31 +1,19 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection as extracted from header files. +The module itself is built into PyPy (an alternative exists for CPython), but +it requires a backend, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - Motivation ---------- - To provide bindings to another language in CPython, you program to a generic C-API that exposes many of the interpreter features. With PyPy, however, there is no such generic C-API, because several of the @@ -47,7 +35,7 @@ by the JIT up until the actual point of call into C++. This means for example, that if variables are already unboxed by the JIT, they can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features +Second, a backend such as Cling adds dynamic features to C++, thus greatly reducing impedance mismatches between the two languages. For example, Reflex is dynamic enough to allow writing runtime bindings generation in python (as opposed to RPython) and this is used to create very diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -71,8 +71,11 @@ Analyzing performance of applications is always tricky. We have various tools, for example a `jitviewer`_ that help us analyze performance. -The jitviewer shows the code generated by the PyPy JIT in a hierarchical way, -as shown by the screenshot below: +The old tool was partly rewritten and combined with vmprof. The service is +hosted at `vmprof.com`_. + +The following shows an old image of the jitviewer. +The code generated by the PyPy JIT in a hierarchical way: - at the bottom level, it shows the Python source code of the compiled loops @@ -84,13 +87,17 @@ .. image:: image/jitviewer.png -The jitviewer is a web application based on flask and jinja2 (and jQuery on -the client): if you have great web developing skills and want to help PyPy, +The jitviewer is a web application based on django and angularjs: +if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep -knowledge of the internals. +knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and +`vmprof-integration`_ to find open issues and documentation. -.. _jitviewer: http://bitbucket.org/pypy/jitviewer - +.. _jitviewer: http://vmprof.com +.. _vmprof.com: http://vmprof.com +.. _vmprof-python: https://github.com/vmprof/vmprof-python +.. _vmprof-server: https://github.com/vmprof/vmprof-server +.. _vmprof-integration: https://github.com/vmprof/vmprof-integration Optimized Unicode Representation -------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,15 @@ .. this is a revision shortly after release-pypy2.7-v5.6 .. startrev: 7e9787939641 + +Since a while now, PyPy preserves the order of dictionaries and sets. +However, the set literal syntax ``{x, y, z}`` would by mistake build a +set with the opposite order: ``set([z, y, x])``. This has been fixed. +Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be +``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior +changed in exactly the same way because of this fix. + + .. branch: rpython-error-to-systemerror Any uncaught RPython exception (from a PyPy bug) is turned into an @@ -20,3 +29,19 @@ .. branch: clean-exported-state Clean-ups in the jit optimizeopt + +.. branch: conditional_call_value_4 + +Add jit.conditional_call_elidable(), a way to tell the JIT "conditonally +call this function" returning a result. + +.. branch: desc-specialize + +Refactor FunctionDesc.specialize() and related code (RPython annotator). + +.. branch: raw-calloc + +.. branch: issue2446 + +Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key +so it will be picked up by app-level objects of that type diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -298,6 +298,12 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False + if config.objspace.usemodules.cpyext: + if config.translation.gc != 'incminimark': + raise Exception("The 'cpyext' module requires the 'incminimark'" + " GC. You need either 'targetpypystandalone.py" + " --withoutmod-cpyext' or '--gc=incminimark'") + config.translating = True import translate diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -111,7 +111,9 @@ self.keywords = self.keywords + keywords self.keywords_w = self.keywords_w + values_w return + is_dict = False if space.isinstance_w(w_starstararg, space.w_dict): + is_dict = True keys_w = space.unpackiterable(w_starstararg) else: try: @@ -125,7 +127,9 @@ keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) keywords = [None] * len(keys_w) - _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, self.keywords) + _do_combine_starstarargs_wrapped( + space, keys_w, w_starstararg, keywords, keywords_w, self.keywords, + is_dict) self.keyword_names_w = keys_w if self.keywords is None: self.keywords = keywords @@ -355,7 +359,7 @@ key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, - keywords_w, existingkeywords): + keywords_w, existingkeywords, is_dict): i = 0 for w_key in keys_w: try: @@ -374,7 +378,16 @@ "got multiple values for keyword argument '%s'", key) keywords[i] = key - keywords_w[i] = space.getitem(w_starstararg, w_key) + if is_dict: + # issue 2435: bug-to-bug compatibility with cpython. for a subclass of + # dict, just ignore the __getitem__ and access the underlying dict + # directly + from pypy.objspace.descroperation import dict_getitem + w_descr = dict_getitem(space) + w_value = space.get_and_call_function(w_descr, w_starstararg, w_key) + else: + w_value = space.getitem(w_starstararg, w_key) + keywords_w[i] = w_value i += 1 @jit.look_inside_iff( diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -183,6 +183,14 @@ assert self._finalize_.im_func is not W_Root._finalize_.im_func space.finalizer_queue.register_finalizer(self) + def may_unregister_rpython_finalizer(self, space): + """Optimization hint only: if there is no user-defined __del__() + method, pass the hint ``don't call any finalizer'' to rgc. + """ + if not self.getclass(space).hasuserdel: + from rpython.rlib import rgc + rgc.may_ignore_finalizer(self) + # hooks that the mapdict implementations needs: def _get_mapdict_map(self): return None diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -439,6 +439,7 @@ space.wrap(msg)) return OperationError(exc, w_error) + at specialize.arg(3) def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError', w_exception_class=None): assert isinstance(e, OSError) @@ -466,8 +467,8 @@ w_error = space.call_function(exc, space.wrap(errno), space.wrap(msg)) return OperationError(exc, w_error) -wrap_oserror2._annspecialcase_ = 'specialize:arg(3)' + at specialize.arg(3) def wrap_oserror(space, e, filename=None, exception_name='w_OSError', w_exception_class=None): if filename is not None: @@ -478,7 +479,6 @@ return wrap_oserror2(space, e, None, exception_name=exception_name, w_exception_class=w_exception_class) -wrap_oserror._annspecialcase_ = 'specialize:arg(3)' def exception_from_saved_errno(space, w_type): from rpython.rlib.rposix import get_saved_errno diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -547,6 +547,8 @@ @jit.dont_look_inside def _run_finalizers(self): + # called by perform() when we have to "perform" this action, + # and also directly at the end of gc.collect). while True: w_obj = self.space.finalizer_queue.next_dead() if w_obj is None: diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pyopcode import LoopBlock from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY -from rpython.rlib import jit +from rpython.rlib import jit, rgc class GeneratorIterator(W_Root): @@ -103,11 +103,11 @@ w_result = frame.execute_frame(w_arg, operr) except OperationError: # errors finish a frame - self.frame = None + self.frame_is_finished() raise # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: - self.frame = None + self.frame_is_finished() raise OperationError(space.w_StopIteration, space.w_None) else: return w_result # YIELDed @@ -209,7 +209,7 @@ finally: frame.f_backref = jit.vref_None self.running = False - self.frame = None + self.frame_is_finished() return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() @@ -228,6 +228,10 @@ break block = block.previous + def frame_is_finished(self): + self.frame = None + rgc.may_ignore_finalizer(self) + def get_printable_location_genentry(bytecode): return '%s ' % (bytecode.get_repr(),) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1295,9 +1295,10 @@ @jit.unroll_safe def BUILD_SET(self, itemcount, next_instr): w_set = self.space.newset() - for i in range(itemcount): - w_item = self.popvalue() + for i in range(itemcount-1, -1, -1): + w_item = self.peekvalue(i) self.space.call_method(w_set, 'add', w_item) + self.popvalues(itemcount) self.pushvalue(w_set) def STORE_MAP(self, oparg, next_instr): diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -120,6 +120,12 @@ raise OperationError(AttributeError, name) return method(*args) + def lookup_in_type(self, cls, name): + return getattr(cls, name) + + def get_and_call_function(self, w_descr, w_obj, *args): + return w_descr.__get__(w_obj)(*args) + def type(self, obj): class Type: def getname(self, space): @@ -805,3 +811,19 @@ assert str(e) == "myerror" else: assert False, "Expected TypeError" + + def test_dict_subclass_with_weird_getitem(self): + # issue 2435: bug-to-bug compatibility with cpython. for a subclass of + # dict, just ignore the __getitem__ and behave like ext_do_call in ceval.c + # which just uses the underlying dict + class d(dict): + def __getitem__(self, key): + return key + + for key in ["foo", u"foo"]: + q = d() + q[key] = "bar" + + def test(**kwargs): + return kwargs + assert test(**q) == {"foo": "bar"} diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -729,6 +729,10 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_host_is_pypy = cls.space.wrap( + '__pypy__' in sys.builtin_module_names) + def test_bom_with_future(self): s = '\xef\xbb\xbffrom __future__ import division\nx = 1/2' ns = {} @@ -771,6 +775,18 @@ assert math.copysign(1., c[0]) == -1.0 assert math.copysign(1., c[1]) == -1.0 + def test_dict_and_set_literal_order(self): + x = 1 + l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) + l2 = list({1, 3, 2, 4}) + l3 = list({x:'a', 3:'b', 2:'c', 4:'d'}) + l4 = list({x, 3, 2, 4}) + if not self.host_is_pypy: + # the full test relies on the host Python providing ordered dicts + assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4} + else: + assert l1 == l2 == l3 == l4 == [1, 3, 2, 4] + ##class TestPythonAstCompiler(BaseTestCompiler): ## def setup_method(self, method): diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -580,3 +580,25 @@ pass sys.settrace(None) assert seen == ['call', 'exception', 'return'] + + def test_generator_trace_stopiteration(self): + import sys + def f(): + yield 5 + gen = f() + assert next(gen) == 5 + seen = [] + def trace_func(frame, event, *args): + print('TRACE:', frame, event, args) + seen.append(event) + return trace_func + def g(): + for x in gen: + never_entered + sys.settrace(trace_func) + g() + sys.settrace(None) + print 'seen:', seen + # on Python 3 we get an extra 'exception' when 'for' catches + # StopIteration + assert seen == ['call', 'line', 'call', 'return', 'return'] diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -4,9 +4,11 @@ def test_Ellipsis(self): assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' + assert Ellipsis.__class__.__name__ == 'ellipsis' def test_NotImplemented(self): def f(): return NotImplemented assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' + assert NotImplemented.__class__.__name__ == 'NotImplementedType' diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -0,0 +1,26 @@ +from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class FakeSpace: + pass + +def test_encode_utf8(): + space = FakeSpace() + assert encode_utf8(space, u"abc") == "abc" + assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" + assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + +def test_decode_utf8(): + space = FakeSpace() + assert decode_utf8(space, "abc") == u"abc" + assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" + assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -770,12 +770,12 @@ ) assert not Cell.typedef.acceptable_as_base_class # no __new__ -Ellipsis.typedef = TypeDef("Ellipsis", +Ellipsis.typedef = TypeDef("ellipsis", __repr__ = interp2app(Ellipsis.descr__repr__), ) assert not Ellipsis.typedef.acceptable_as_base_class # no __new__ -NotImplemented.typedef = TypeDef("NotImplemented", +NotImplemented.typedef = TypeDef("NotImplementedType", __repr__ = interp2app(NotImplemented.descr__repr__), ) assert not NotImplemented.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -51,6 +51,10 @@ return result def decode_utf8(space, string): + # Surrogates are accepted and not treated specially at all. + # If there happen to be two 3-bytes encoding a pair of surrogates, + # you still get two surrogate unicode characters in the result. + # These are the Python2 rules; Python3 differs. result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), @@ -59,8 +63,9 @@ def encode_utf8(space, uni): # Note that this function never raises UnicodeEncodeError, - # since surrogate pairs are allowed. - # This is not the case with Python3. + # since surrogates are allowed, either paired or lone. + # A paired surrogate is considered like the non-BMP character + # it stands for. These are the Python2 rules; Python3 differs. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=raise_unicode_exception_encode, diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -397,7 +397,7 @@ space = self.space if space.is_none(w_destructor): if isinstance(self, W_CDataGCP): - self.w_destructor = None + self.detach_destructor() return space.w_None raise oefmt(space.w_TypeError, "Can remove destructor only on a object " @@ -604,6 +604,10 @@ self.w_destructor = None self.space.call_function(w_destructor, self.w_original_cdata) + def detach_destructor(self): + self.w_destructor = None + self.may_unregister_rpython_finalizer(self.space) + W_CData.typedef = TypeDef( '_cffi_backend.CData', diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py --- a/pypy/module/_cffi_backend/cdlopen.py +++ b/pypy/module/_cffi_backend/cdlopen.py @@ -55,6 +55,7 @@ if not libhandle: raise oefmt(self.ffi.w_FFIError, "library '%s' is already closed", self.libname) + self.may_unregister_rpython_finalizer(self.ffi.space) # Clear the dict to force further accesses to do cdlopen_fetch() # again, and fail because the library was closed. Note that the diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -401,7 +401,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/_collections/interp_deque.py b/pypy/module/_collections/interp_deque.py --- a/pypy/module/_collections/interp_deque.py +++ b/pypy/module/_collections/interp_deque.py @@ -1,4 +1,5 @@ import sys +from rpython.rlib.objectmodel import specialize from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, make_weakref_descr @@ -6,7 +7,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.debug import check_nonneg -from rpython.rlib.objectmodel import specialize # A `dequeobject` is composed of a doubly-linked list of `block` nodes. diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -172,6 +172,7 @@ self.newlines = self.stream.getnewlines() self.stream = None self.fd = -1 + self.may_unregister_rpython_finalizer(self.space) openstreams = getopenstreams(self.space) try: del openstreams[stream] diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -233,6 +233,7 @@ except SocketError: # cpython doesn't return any errors on close pass + self.may_unregister_rpython_finalizer(space) def connect_w(self, space, w_addr): """connect(address) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -217,7 +217,7 @@ return self.space.w_None return w_obj - def descr__eq__(self, space, w_ref2): + def compare(self, space, w_ref2, invert): if not isinstance(w_ref2, W_Weakref): return space.w_NotImplemented ref1 = self @@ -225,11 +225,18 @@ w_obj1 = ref1.dereference() w_obj2 = ref2.dereference() if w_obj1 is None or w_obj2 is None: - return space.is_(ref1, ref2) - return space.eq(w_obj1, w_obj2) + w_res = space.is_(ref1, ref2) + else: + w_res = space.eq(w_obj1, w_obj2) + if invert: + w_res = space.not_(w_res) + return w_res + + def descr__eq__(self, space, w_ref2): + return self.compare(space, w_ref2, invert=False) def descr__ne__(self, space, w_ref2): - return space.not_(space.eq(self, w_ref2)) + return self.compare(space, w_ref2, invert=True) def getlifeline(space, w_obj): lifeline = w_obj.getweakref() diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -150,6 +150,14 @@ assert not (ref1 == []) assert ref1 != [] + def test_ne(self): + import _weakref + class X(object): + pass + ref1 = _weakref.ref(X()) + assert ref1.__eq__(X()) is NotImplemented + assert ref1.__ne__(X()) is NotImplemented + def test_getweakrefs(self): import _weakref, gc class A(object): diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++14 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -63,7 +63,7 @@ includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing", "-std=c++14"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -76,51 +76,66 @@ static std::map s_methods; +int Pseudo_kNothing = 6; +int Pseudo_kSomething = 111; +int Pseudo_kLots = 42; + #define PUBLIC_CPPYY_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname, #dmtype, \ - offsetof(dummy::cppyy_test_data, m_##dmname), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname), false)); \ + /* get_() */ \ argtypes.clear(); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname, argtypes, #dmtype)); \ - s_methods["cppyy_test_data::get_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::get_"#dmname] = s_method_id++; \ + /* & get__r() */ \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_r", argtypes, #dmtype"&")); \ + s_methods["CppyyTestData::get_"#dmname"_r"] = s_method_id++; \ + /* const & get__cr() */ \ + methods.push_back(Cppyy_PseudoMethodInfo( \ + "get_"#dmname"_cr", argtypes, "const "#dmtype"&")); \ + s_methods["CppyyTestData::get_"#dmname"_cr"] = s_method_id++; \ + /* void set_() */ \ argtypes.push_back(#dmtype); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "set_"#dmname, argtypes, "void")); \ - s_methods["cppyy_test_data::set_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::set_"#dmname] = s_method_id++; \ argtypes.clear(); \ + /* void set_(const &) */ \ argtypes.push_back("const "#dmtype"&"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ - "set_"#dmname"_c", argtypes, "void")); \ - s_methods["cppyy_test_data::set_"#dmname"_c"] = s_method_id++ + "set_"#dmname"_cr", argtypes, "void")); \ + s_methods["CppyyTestData::set_"#dmname"_cr"] = s_method_id++ #define PUBLIC_CPPYY_DATA2(dmname, dmtype) \ PUBLIC_CPPYY_DATA(dmname, dmtype); \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array", #dmtype"[5]", \ - offsetof(dummy::cppyy_test_data, m_##dmname##_array), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname##_array), false)); \ data.push_back(Cppyy_PseudoDatambrInfo("m_"#dmname"_array2", #dmtype"*", \ - offsetof(dummy::cppyy_test_data, m_##dmname##_array2), false)); \ + offsetof(dummy::CppyyTestData, m_##dmname##_array2), false)); \ argtypes.clear(); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname"_array", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::get_"#dmname"_array"] = s_method_id++; \ + s_methods["CppyyTestData::get_"#dmname"_array"] = s_method_id++; \ methods.push_back(Cppyy_PseudoMethodInfo( \ "get_"#dmname"_array2", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::get_"#dmname"_array2"] = s_method_id++ + s_methods["CppyyTestData::get_"#dmname"_array2"] = s_method_id++ #define PUBLIC_CPPYY_DATA3(dmname, dmtype, key) \ PUBLIC_CPPYY_DATA2(dmname, dmtype); \ argtypes.push_back(#dmtype"*"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "pass_array", argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::pass_array_"#dmname] = s_method_id++; \ + s_methods["CppyyTestData::pass_array_"#dmname] = s_method_id++; \ argtypes.clear(); argtypes.push_back("void*"); \ methods.push_back(Cppyy_PseudoMethodInfo( \ "pass_void_array_"#key, argtypes, #dmtype"*")); \ - s_methods["cppyy_test_data::pass_void_array_"#key] = s_method_id++ + s_methods["CppyyTestData::pass_void_array_"#key] = s_method_id++ #define PUBLIC_CPPYY_STATIC_DATA(dmname, dmtype) \ data.push_back(Cppyy_PseudoDatambrInfo("s_"#dmname, #dmtype, \ - (ptrdiff_t)&dummy::cppyy_test_data::s_##dmname, true)) + (ptrdiff_t)&dummy::CppyyTestData::s_##dmname, true)) struct Cppyy_InitPseudoReflectionInfo { @@ -284,22 +299,23 @@ //==================================================================== - { // class cppyy_test_data -- - s_handles["cppyy_test_data"] = (cppyy_scope_t)++s_scope_id; + { // class CppyyTestData -- + s_handles["CppyyTestData"] = (cppyy_scope_t)++s_scope_id; std::vector methods; - // cppyy_test_data() + // CppyyTestData() std::vector argtypes; - methods.push_back(Cppyy_PseudoMethodInfo("cppyy_test_data", argtypes, "constructor", kConstructor)); - s_methods["cppyy_test_data::cppyy_test_data"] = s_method_id++; + methods.push_back(Cppyy_PseudoMethodInfo("CppyyTestData", argtypes, "constructor", kConstructor)); + s_methods["CppyyTestData::CppyyTestData"] = s_method_id++; methods.push_back(Cppyy_PseudoMethodInfo("destroy_arrays", argtypes, "void")); - s_methods["cppyy_test_data::destroy_arrays"] = s_method_id++; + s_methods["CppyyTestData::destroy_arrays"] = s_method_id++; std::vector data; PUBLIC_CPPYY_DATA2(bool, bool); PUBLIC_CPPYY_DATA (char, char); + PUBLIC_CPPYY_DATA (schar, signed char); PUBLIC_CPPYY_DATA (uchar, unsigned char); PUBLIC_CPPYY_DATA3(short, short, h); PUBLIC_CPPYY_DATA3(ushort, unsigned short, H); @@ -309,12 +325,16 @@ PUBLIC_CPPYY_DATA3(ulong, unsigned long, L); PUBLIC_CPPYY_DATA (llong, long long); PUBLIC_CPPYY_DATA (ullong, unsigned long long); + PUBLIC_CPPYY_DATA (long64, Long64_t); + PUBLIC_CPPYY_DATA (ulong64, ULong64_t); PUBLIC_CPPYY_DATA3(float, float, f); PUBLIC_CPPYY_DATA3(double, double, d); - PUBLIC_CPPYY_DATA (enum, cppyy_test_data::what); + PUBLIC_CPPYY_DATA (ldouble, long double); + PUBLIC_CPPYY_DATA (enum, CppyyTestData::EWhat); PUBLIC_CPPYY_DATA (voidp, void*); PUBLIC_CPPYY_STATIC_DATA(char, char); + PUBLIC_CPPYY_STATIC_DATA(schar, signed char); PUBLIC_CPPYY_STATIC_DATA(uchar, unsigned char); PUBLIC_CPPYY_STATIC_DATA(short, short); PUBLIC_CPPYY_STATIC_DATA(ushort, unsigned short); @@ -324,14 +344,25 @@ PUBLIC_CPPYY_STATIC_DATA(ulong, unsigned long); PUBLIC_CPPYY_STATIC_DATA(llong, long long); PUBLIC_CPPYY_STATIC_DATA(ullong, unsigned long long); + PUBLIC_CPPYY_STATIC_DATA(long64, Long64_t); + PUBLIC_CPPYY_STATIC_DATA(ulong64, ULong64_t); PUBLIC_CPPYY_STATIC_DATA(float, float); PUBLIC_CPPYY_STATIC_DATA(double, double); - PUBLIC_CPPYY_STATIC_DATA(enum, cppyy_test_data::what); + PUBLIC_CPPYY_STATIC_DATA(ldouble, long double); + PUBLIC_CPPYY_STATIC_DATA(enum, CppyyTestData::EWhat); PUBLIC_CPPYY_STATIC_DATA(voidp, void*); + // pretend enum values + data.push_back(Cppyy_PseudoDatambrInfo( + "kNothing", "CppyyTestData::EWhat", (ptrdiff_t)&Pseudo_kNothing, true)); + data.push_back(Cppyy_PseudoDatambrInfo( + "kSomething", "CppyyTestData::EWhat", (ptrdiff_t)&Pseudo_kSomething, true)); + data.push_back(Cppyy_PseudoDatambrInfo( + "kLots", "CppyyTestData::EWhat", (ptrdiff_t)&Pseudo_kLots, true)); + Cppyy_PseudoClassInfo info(methods, s_method_id - methods.size(), data); s_scopes[(cppyy_scope_t)s_scope_id] = info; - } // -- class cppyy_test_data + } // -- class CppyyTest_data } } _init; @@ -385,78 +416,78 @@ } else if (idx == s_methods["example01::setPayload_payload*"]) { assert(self && nargs == 1); ((dummy::example01*)self)->setPayload((dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::destroy_arrays"]) { + } else if (idx == s_methods["CppyyTestData::destroy_arrays"]) { assert(self && nargs == 0); - ((dummy::cppyy_test_data*)self)->destroy_arrays(); - } else if (idx == s_methods["cppyy_test_data::set_bool"]) { + ((dummy::CppyyTestData*)self)->destroy_arrays(); + } else if (idx == s_methods["CppyyTestData::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); - } else if (idx == s_methods["cppyy_test_data::set_char"]) { + ((dummy::CppyyTestData*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["CppyyTestData::set_char"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); - } else if (idx == s_methods["cppyy_test_data::set_uchar"]) { + ((dummy::CppyyTestData*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); + } else if (idx == s_methods["CppyyTestData::set_uchar"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); - } else if (idx == s_methods["cppyy_test_data::set_short"]) { + ((dummy::CppyyTestData*)self)->set_uchar(((CPPYY_G__value*)args)[0].obj.uch); + } else if (idx == s_methods["CppyyTestData::set_short"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); - } else if (idx == s_methods["cppyy_test_data::set_short_c"]) { + ((dummy::CppyyTestData*)self)->set_short(((CPPYY_G__value*)args)[0].obj.sh); + } else if (idx == s_methods["CppyyTestData::set_short_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_short_c(*(short*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ushort"]) { + ((dummy::CppyyTestData*)self)->set_short_cr(*(short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ushort"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); - } else if (idx == s_methods["cppyy_test_data::set_ushort_c"]) { + ((dummy::CppyyTestData*)self)->set_ushort(((CPPYY_G__value*)args)[0].obj.ush); + } else if (idx == s_methods["CppyyTestData::set_ushort_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ushort_c(*(unsigned short*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_int"]) { + ((dummy::CppyyTestData*)self)->set_ushort_cr(*(unsigned short*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_int"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); - } else if (idx == s_methods["cppyy_test_data::set_int_c"]) { + ((dummy::CppyyTestData*)self)->set_int(((CPPYY_G__value*)args)[0].obj.in); + } else if (idx == s_methods["CppyyTestData::set_int_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_int_c(*(int*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_uint"]) { + ((dummy::CppyyTestData*)self)->set_int_cr(*(int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_uint"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); - } else if (idx == s_methods["cppyy_test_data::set_uint_c"]) { + ((dummy::CppyyTestData*)self)->set_uint(((CPPYY_G__value*)args)[0].obj.uin); + } else if (idx == s_methods["CppyyTestData::set_uint_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_uint_c(*(unsigned int*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_long"]) { + ((dummy::CppyyTestData*)self)->set_uint_cr(*(unsigned int*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_long"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); - } else if (idx == s_methods["cppyy_test_data::set_long_c"]) { + ((dummy::CppyyTestData*)self)->set_long(((CPPYY_G__value*)args)[0].obj.i); + } else if (idx == s_methods["CppyyTestData::set_long_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_long_c(*(long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ulong"]) { + ((dummy::CppyyTestData*)self)->set_long_cr(*(long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ulong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); - } else if (idx == s_methods["cppyy_test_data::set_ulong_c"]) { + ((dummy::CppyyTestData*)self)->set_ulong(((CPPYY_G__value*)args)[0].obj.ulo); + } else if (idx == s_methods["CppyyTestData::set_ulong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ulong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_llong"]) { + ((dummy::CppyyTestData*)self)->set_ulong_cr(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_llong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); - } else if (idx == s_methods["cppyy_test_data::set_llong_c"]) { + ((dummy::CppyyTestData*)self)->set_llong(((CPPYY_G__value*)args)[0].obj.ll); + } else if (idx == s_methods["CppyyTestData::set_llong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_llong_c(*(long long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_ullong"]) { + ((dummy::CppyyTestData*)self)->set_llong_cr(*(long long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_ullong"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); - } else if (idx == s_methods["cppyy_test_data::set_ullong_c"]) { + ((dummy::CppyyTestData*)self)->set_ullong(((CPPYY_G__value*)args)[0].obj.ull); + } else if (idx == s_methods["CppyyTestData::set_ullong_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_ullong_c(*(unsigned long*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_float"]) { + ((dummy::CppyyTestData*)self)->set_ullong_cr(*(unsigned long*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_float"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); - } else if (idx == s_methods["cppyy_test_data::set_float_c"]) { + ((dummy::CppyyTestData*)self)->set_float(((CPPYY_G__value*)args)[0].obj.fl); + } else if (idx == s_methods["CppyyTestData::set_float_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_float_c(*(float*)&((CPPYY_G__value*)args)[0]); - } else if (idx == s_methods["cppyy_test_data::set_double"]) { + ((dummy::CppyyTestData*)self)->set_float_cr(*(float*)&((CPPYY_G__value*)args)[0]); + } else if (idx == s_methods["CppyyTestData::set_double"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); - } else if (idx == s_methods["cppyy_test_data::set_double_c"]) { + ((dummy::CppyyTestData*)self)->set_double(((CPPYY_G__value*)args)[0].obj.d); + } else if (idx == s_methods["CppyyTestData::set_double_cr"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_double_c(*(double*)&((CPPYY_G__value*)args)[0]); + ((dummy::CppyyTestData*)self)->set_double_cr(*(double*)&((CPPYY_G__value*)args)[0]); } else { assert(!"method unknown in cppyy_call_v"); } @@ -465,9 +496,9 @@ unsigned char cppyy_call_b(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { unsigned char result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_bool"]) { + if (idx == s_methods["CppyyTestData::get_bool"]) { assert(self && nargs == 0); - result = (unsigned char)((dummy::cppyy_test_data*)self)->get_bool(); + result = (unsigned char)((dummy::CppyyTestData*)self)->get_bool(); } else { assert(!"method unknown in cppyy_call_b"); } @@ -477,12 +508,12 @@ char cppyy_call_c(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { char result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_char"]) { + if (idx == s_methods["CppyyTestData::get_char"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_char(); - } else if (idx == s_methods["cppyy_test_data::get_uchar"]) { + result = ((dummy::CppyyTestData*)self)->get_char(); + } else if (idx == s_methods["CppyyTestData::get_uchar"]) { assert(self && nargs == 0); - result = (char)((dummy::cppyy_test_data*)self)->get_uchar(); + result = (char)((dummy::CppyyTestData*)self)->get_uchar(); } else { assert(!"method unknown in cppyy_call_c"); } @@ -492,12 +523,12 @@ short cppyy_call_h(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { short result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_short"]) { + if (idx == s_methods["CppyyTestData::get_short"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_short(); - } else if (idx == s_methods["cppyy_test_data::get_ushort"]) { + result = ((dummy::CppyyTestData*)self)->get_short(); + } else if (idx == s_methods["CppyyTestData::get_ushort"]) { assert(self && nargs == 0); - result = (short)((dummy::cppyy_test_data*)self)->get_ushort(); + result = (short)((dummy::CppyyTestData*)self)->get_ushort(); } else { assert(!"method unknown in cppyy_call_h"); } @@ -527,9 +558,9 @@ assert(self && nargs == 1); result = ((dummy::example01*)self)->addDataToAtoi( (const char*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::get_int"]) { + } else if (idx == s_methods["CppyyTestData::get_int"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_int(); + result = ((dummy::CppyyTestData*)self)->get_int(); } else { assert(!"method unknown in cppyy_call_i"); } @@ -556,120 +587,120 @@ assert(self && nargs == 1); result = (long)((dummy::example01*)self)->cyclePayload( (dummy::payload*)(*(long*)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::get_uint"]) { + } else if (idx == s_methods["CppyyTestData::get_uint"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint(); - } else if (idx == s_methods["cppyy_test_data::get_long"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint(); + } else if (idx == s_methods["CppyyTestData::get_long"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_long(); - } else if (idx == s_methods["cppyy_test_data::get_ulong"]) { + result = ((dummy::CppyyTestData*)self)->get_long(); + } else if (idx == s_methods["CppyyTestData::get_ulong"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong(); - } else if (idx == s_methods["cppyy_test_data::get_bool_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ulong(); + } else if (idx == s_methods["CppyyTestData::get_bool_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_bool_array(); - } else if (idx == s_methods["cppyy_test_data::get_bool_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_bool_array(); + } else if (idx == s_methods["CppyyTestData::get_bool_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_bool_array2(); - } else if (idx == s_methods["cppyy_test_data::get_short_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_bool_array2(); + } else if (idx == s_methods["CppyyTestData::get_short_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_short_array(); - } else if (idx == s_methods["cppyy_test_data::get_short_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_short_array(); + } else if (idx == s_methods["CppyyTestData::get_short_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_short_array2(); - } else if (idx == s_methods["cppyy_test_data::get_ushort_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_short_array2(); + } else if (idx == s_methods["CppyyTestData::get_ushort_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array(); - } else if (idx == s_methods["cppyy_test_data::get_ushort_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ushort_array(); + } else if (idx == s_methods["CppyyTestData::get_ushort_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ushort_array2(); - } else if (idx == s_methods["cppyy_test_data::get_int_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ushort_array2(); + } else if (idx == s_methods["CppyyTestData::get_int_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_int_array(); - } else if (idx == s_methods["cppyy_test_data::get_int_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_int_array(); + } else if (idx == s_methods["CppyyTestData::get_int_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_int_array2(); - } else if (idx == s_methods["cppyy_test_data::get_uint_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_int_array2(); + } else if (idx == s_methods["CppyyTestData::get_uint_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint_array(); - } else if (idx == s_methods["cppyy_test_data::get_uint_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint_array(); + } else if (idx == s_methods["CppyyTestData::get_uint_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_uint_array2(); - } else if (idx == s_methods["cppyy_test_data::get_long_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_uint_array2(); + } else if (idx == s_methods["CppyyTestData::get_long_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_long_array(); - } else if (idx == s_methods["cppyy_test_data::get_long_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_long_array(); + } else if (idx == s_methods["CppyyTestData::get_long_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_long_array2(); - } else if (idx == s_methods["cppyy_test_data::get_ulong_array"]) { + result = (long)((dummy::CppyyTestData*)self)->get_long_array2(); + } else if (idx == s_methods["CppyyTestData::get_ulong_array"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array(); - } else if (idx == s_methods["cppyy_test_data::get_ulong_array2"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ulong_array(); + } else if (idx == s_methods["CppyyTestData::get_ulong_array2"]) { assert(self && nargs == 0); - result = (long)((dummy::cppyy_test_data*)self)->get_ulong_array2(); - } else if (idx == s_methods["cppyy_test_data::pass_array_short"]) { + result = (long)((dummy::CppyyTestData*)self)->get_ulong_array2(); + } else if (idx == s_methods["CppyyTestData::pass_array_short"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_h"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_h"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_h( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_h( (*(short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_ushort"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_ushort"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_H"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_H"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_H( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_H( (*(unsigned short**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_int"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_int"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_i"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_i"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_i( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_i( (*(int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_uint"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_uint"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_I"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_I"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_I( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_I( (*(unsigned int**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_long"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_long"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_l"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_l"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_l( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_l( (*(long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_ulong"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_ulong"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(unsigned long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_L"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_L"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_L( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_L( (*(unsigned long**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_float"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_float"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(float**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_f"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_f"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_f( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_f( (*(float**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_array_double"]) { + } else if (idx == s_methods["CppyyTestData::pass_array_double"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_array( + result = (long)((dummy::CppyyTestData*)self)->pass_array( (*(double**)&((CPPYY_G__value*)args)[0])); - } else if (idx == s_methods["cppyy_test_data::pass_void_array_d"]) { + } else if (idx == s_methods["CppyyTestData::pass_void_array_d"]) { assert(self && nargs == 1); - result = (long)((dummy::cppyy_test_data*)self)->pass_void_array_d( + result = (long)((dummy::CppyyTestData*)self)->pass_void_array_d( (*(double**)&((CPPYY_G__value*)args)[0])); } else { assert(!"method unknown in cppyy_call_l"); @@ -680,12 +711,12 @@ long long cppyy_call_ll(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { long long result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_llong"]) { + if (idx == s_methods["CppyyTestData::get_llong"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_llong(); - } else if (idx == s_methods["cppyy_test_data::get_ullong"]) { + result = ((dummy::CppyyTestData*)self)->get_llong(); + } else if (idx == s_methods["CppyyTestData::get_ullong"]) { assert(self && nargs == 0); - result = (long long)((dummy::cppyy_test_data*)self)->get_ullong(); + result = (long long)((dummy::CppyyTestData*)self)->get_ullong(); } else { assert(!"method unknown in cppyy_call_ll"); } @@ -695,9 +726,9 @@ float cppyy_call_f(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { float result = 0; const long idx = (long)method; - if (idx == s_methods["cppyy_test_data::get_float"]) { + if (idx == s_methods["CppyyTestData::get_float"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_float(); + result = ((dummy::CppyyTestData*)self)->get_float(); } else { assert(!"method unknown in cppyy_call_f"); } @@ -716,15 +747,48 @@ } else if (idx == s_methods["payload::getData"]) { assert(self && nargs == 0); result = ((dummy::payload*)self)->getData(); - } else if (idx == s_methods["cppyy_test_data::get_double"]) { + } else if (idx == s_methods["CppyyTestData::get_double"]) { assert(self && nargs == 0); - result = ((dummy::cppyy_test_data*)self)->get_double(); + result = ((dummy::CppyyTestData*)self)->get_double(); } else { assert(!"method unknown in cppyy_call_d"); } return result; } +#define DISPATCH_CALL_R_GET(tpname) \ + else if (idx == s_methods["CppyyTestData::get_"#tpname"_r"]) { \ + assert(self && nargs == 0); \ + result = (void*)&((dummy::CppyyTestData*)self)->get_##tpname##_r(); \ + } else if (idx == s_methods["CppyyTestData::get_"#tpname"_cr"]) { \ + assert(self && nargs == 0); \ + result = (void*)&((dummy::CppyyTestData*)self)->get_##tpname##_cr(); \ + } + +void* cppyy_call_r(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + void* result = nullptr; + const long idx = (long)method; + if (0) {} + DISPATCH_CALL_R_GET(bool) + DISPATCH_CALL_R_GET(short) + DISPATCH_CALL_R_GET(ushort) + DISPATCH_CALL_R_GET(int) + DISPATCH_CALL_R_GET(uint) + DISPATCH_CALL_R_GET(long) + DISPATCH_CALL_R_GET(ulong) + DISPATCH_CALL_R_GET(llong) + DISPATCH_CALL_R_GET(ullong) + DISPATCH_CALL_R_GET(long64) + DISPATCH_CALL_R_GET(ulong64) + DISPATCH_CALL_R_GET(float) + DISPATCH_CALL_R_GET(double) + DISPATCH_CALL_R_GET(ldouble) + else { + assert(!"method unknown in cppyy_call_r"); + } + return result; +} + char* cppyy_call_s(cppyy_method_t method, cppyy_object_t self, int nargs, void* args, size_t* /* length */) { char* result = 0; const long idx = (long)method; @@ -750,9 +814,9 @@ assert(nargs == 0 || nargs == 1); if (nargs == 0) result = new dummy::payload; else if (nargs == 1) result = new dummy::payload(((CPPYY_G__value*)args)[0].obj.d); - } else if (idx == s_methods["cppyy_test_data::cppyy_test_data"]) { + } else if (idx == s_methods["CppyyTestData::CppyyTestData"]) { assert(nargs == 0); - result = new dummy::cppyy_test_data; + result = new dummy::CppyyTestData; } else { assert(!"method unknown in cppyy_constructor"); } @@ -792,11 +856,17 @@ return 0; } +int cppyy_is_template(const char* /* template_name */) { + return 0; +} + int cppyy_is_abstract(cppyy_type_t /* type) */) { return 0; } -int cppyy_is_enum(const char* /* type_name */) { +int cppyy_is_enum(const char* type_name) { + if (strcmp(type_name, "CppyyTestData::EWhat") == 0) + return 1; return 0; } diff --git a/pypy/module/cppyy/test/Makefile b/pypy/module/cppyy/test/Makefile --- a/pypy/module/cppyy/test/Makefile +++ b/pypy/module/cppyy/test/Makefile @@ -29,7 +29,7 @@ endif ifeq ($(DUMMY),t) - cppflags2=-O3 -fPIC -rdynamic + cppflags2=-O3 -fPIC -rdynamic -std=c++11 -DCPPYY_DUMMY_BACKEND else ifeq ($(CLING),t) cppflags2=-O3 -fPIC -rdynamic else @@ -55,7 +55,7 @@ else # reflex %Dict.so: %_rflx.cpp %.cxx - g++ -o $@ $^ -shared -std=c++14 $(cppflags) $(cppflags2) + g++ -o $@ $^ -shared $(cppflags) $(cppflags2) %_rflx.cpp: %.h %.xml $(genreflex) $< $(genreflexflags) --selection=$*.xml --rootmap=$*Dict.rootmap --rootmap-lib=$*Dict.so @@ -66,7 +66,7 @@ # TODO: methptrgetter causes these tests to crash, so don't use it for now std_streamsDict.so: std_streams.cxx std_streams.h std_streams.xml $(genreflex) std_streams.h --selection=std_streams.xml - g++ -o $@ std_streams_rflx.cpp std_streams.cxx -shared -std=c++14 $(cppflags) $(cppflags2) + g++ -o $@ std_streams_rflx.cpp std_streams.cxx -shared -std=c++11 $(cppflags) $(cppflags2) endif .PHONY: clean diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -50,7 +50,8 @@ eci = ExternalCompilationInfo( separate_module_files=[srcpath.join('dummy_backend.cxx')], include_dirs=[incpath, tstpath, cdir], - compile_extra=['-DRPY_EXTERN=RPY_EXPORTED'], + compile_extra=['-DRPY_EXTERN=RPY_EXPORTED', '-DCPPYY_DUMMY_BACKEND', + '-fno-strict-aliasing', '-std=c++11'], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -1,4 +1,15 @@ +#ifndef CPPYY_DUMMY_BACKEND #include "RtypesCore.h" +#else +// copied from RtypesCore.h ... +#if defined(R__WIN32) && !defined(__CINT__) +typedef __int64 Long64_t; //Portable signed long integer 8 bytes +typedef unsigned __int64 ULong64_t; //Portable unsigned long integer 8 bytes +#else +typedef long long Long64_t; //Portable signed long integer 8 bytes +typedef unsigned long long ULong64_t;//Portable unsigned long integer 8 bytes +#endif +#endif #include const int N = 5; diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -157,13 +157,13 @@ assert isinstance(w_obj, FakeFloat) return w_obj.val + @specialize.arg(1) def interp_w(self, RequiredClass, w_obj, can_be_None=False): if can_be_None and w_obj is None: return None if not isinstance(w_obj, RequiredClass): raise TypeError return w_obj - interp_w._annspecialcase_ = 'specialize:arg(1)' def getarg_w(self, code, w_obj): # for retrieving buffers return FakeBuffer(w_obj) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -36,8 +36,6 @@ from rpython.rlib.objectmodel import specialize from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions -# CPython 2.4 compatibility -from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount @@ -985,7 +983,7 @@ py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook - _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], + _reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) def reinit_tls(space): _reinit_tls() @@ -1614,9 +1612,8 @@ miniglobals = {'__name__': __name__, # for module name propagation } exec source.compile() in miniglobals - call_external_function = miniglobals['cpy_call_external'] + call_external_function = specialize.ll()(miniglobals['cpy_call_external']) call_external_function._dont_inline_ = True - call_external_function._annspecialcase_ = 'specialize:ll' call_external_function._gctransformer_hint_close_stack_ = True # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -137,8 +137,7 @@ """This is the same as PyDict_Merge(a, b, 1) in C, or a.update(b) in Python. Return 0 on success or -1 if an exception was raised. """ - space.call_method(space.w_dict, "update", w_obj, w_other) - return 0 + return PyDict_Merge(space, w_obj, w_other, 1) @cpython_api([PyObject], PyObject) def PyDict_Keys(space, w_obj): diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -62,12 +62,14 @@ position must be positive, indexing from the end of the list is not supported. If pos is out of bounds, return NULL and set an IndexError exception.""" + from pypy.module.cpyext.sequence import CPyListStrategy if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) if index < 0 or index >= w_list.length(): raise oefmt(space.w_IndexError, "list index out of range") - w_list.ensure_object_strategy() # make sure we can return a borrowed obj - # XXX ^^^ how does this interact with CPyListStrategy? + cpy_strategy = space.fromcache(CPyListStrategy) + if w_list.strategy is not cpy_strategy: + w_list.ensure_object_strategy() # make sure we can return a borrowed obj w_res = w_list.getitem(index) return w_res # borrowed ref diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, cpython_struct, \ METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING -from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyobject import PyObject, as_pyobj from pypy.interpreter.module import Module from pypy.module.cpyext.methodobject import ( W_PyCFunctionObject, PyCFunction_NewEx, PyDescr_NewMethod, @@ -124,11 +124,17 @@ else: PyErr_BadInternalCall(space) - at cpython_api([PyObject], rffi.CCHARP, error=0) -def PyModule_GetName(space, module): + at cpython_api([PyObject], rffi.CCHARP) +def PyModule_GetName(space, w_mod): """ Return module's __name__ value. If the module does not provide one, - or if it is not a string, SystemError is raised and NULL is returned.""" - raise NotImplementedError - - + or if it is not a string, SystemError is raised and NULL is returned. + """ + # NOTE: this version of the code works only because w_mod.w_name is + # a wrapped string object attached to w_mod; so it makes a + # PyStringObject that will live as long as the module itself, + # and returns a "char *" inside this PyStringObject. + if not isinstance(w_mod, Module): + raise oefmt(space.w_SystemError, "PyModule_GetName(): not a module") + from pypy.module.cpyext.bytesobject import PyString_AsString + return PyString_AsString(space, as_pyobj(space, w_mod.w_name)) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -15,6 +15,7 @@ from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rawrefcount +from rpython.rlib.debug import fatalerror #________________________________________________________ @@ -24,11 +25,9 @@ basestruct = PyObject.TO W_BaseObject = W_ObjectObject - def get_dealloc(self, space): + def get_dealloc(self): from pypy.module.cpyext.typeobject import subtype_dealloc - return llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + return subtype_dealloc.api_func def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? @@ -108,10 +107,8 @@ return tp_alloc(space, w_type, itemcount) if tp_dealloc: - def get_dealloc(self, space): - return llhelper( - tp_dealloc.api_func.functype, - tp_dealloc.api_func.get_wrapper(space)) + def get_dealloc(self): + return tp_dealloc.api_func if tp_attach: def attach(self, space, pyobj, w_obj): @@ -192,6 +189,8 @@ rawrefcount.create_link_pypy(w_obj, py_obj) +w_marker_deallocating = W_Root() + def from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the @@ -202,7 +201,23 @@ return None w_obj = rawrefcount.to_obj(W_Root, ref) if w_obj is not None: - return w_obj + if w_obj is not w_marker_deallocating: + return w_obj + fatalerror( + "*** Invalid usage of a dying CPython object ***\n" + "\n" + "cpyext, the emulation layer, detected that while it is calling\n" + "an object's tp_dealloc, the C code calls back a function that\n" + "tries to recreate the PyPy version of the object. Usually it\n" + "means that tp_dealloc calls some general PyXxx() API. It is\n" + "a dangerous and potentially buggy thing to do: even in CPython\n" + "the PyXxx() function could, in theory, cause a reference to the\n" + "object to be taken and stored somewhere, for an amount of time\n" + "exceeding tp_dealloc itself. Afterwards, the object will be\n" + "freed, making that reference point to garbage.\n" + ">>> PyPy could contain some workaround to still work if\n" + "you are lucky, but it is not done so far; better fix the bug in\n" + "the CPython extension.") # This reference is not yet a real interpreter object. # Realize it. @@ -233,7 +248,8 @@ INTERPLEVEL_API['as_pyobj'] = as_pyobj def pyobj_has_w_obj(pyobj): - return rawrefcount.to_obj(W_Root, pyobj) is not None + w_obj = rawrefcount.to_obj(W_Root, pyobj) + return w_obj is not None and w_obj is not w_marker_deallocating INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) @@ -335,6 +351,7 @@ pto = obj.c_ob_type #print >>sys.stderr, "Calling dealloc slot", pto.c_tp_dealloc, "of", obj, \ # "'s type which is", rffi.charp2str(pto.c_tp_name) + rawrefcount.mark_deallocating(w_marker_deallocating, obj) generic_cpy_call(space, pto.c_tp_dealloc, obj) @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -8,12 +8,12 @@ cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl, Py_buffer, Py_bufferP) from pypy.module.cpyext.typeobjectdefs import ( - unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, + unaryfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc, getbufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef +from pypy.module.cpyext.pyobject import make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State @@ -21,8 +21,10 @@ from pypy.interpreter.argument import Arguments from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, not_rpython from rpython.tool.sourcetools import func_renamer +from rpython.flowspace.model import Constant +from rpython.flowspace.specialcase import register_flow_sc from rpython.rtyper.annlowlevel import llhelper from pypy.module.sys.version import CPYTHON_VERSION @@ -59,6 +61,17 @@ "expected %d-%d arguments, got %d", low, high, space.len_w(w_ob)) + at not_rpython +def llslot(space, func): + return llhelper(func.api_func.functype, func.api_func.get_wrapper(space)) + + at register_flow_sc(llslot) +def sc_llslot(ctx, v_space, v_func): + assert isinstance(v_func, Constant) + get_llhelper = v_func.value.api_func.get_llhelper + return ctx.appcall(get_llhelper, v_space) + + def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs) @@ -106,7 +119,7 @@ args_w = space.fixedview(w_args) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, w_self, args_w[0], arg3) def wrap_ternaryfunc_r(space, w_self, w_args, func): @@ -121,7 +134,7 @@ Py_DecRef(space, ref) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, args_w[0], w_self, arg3) @@ -322,7 +335,7 @@ self.strides = [1] else: self.strides = strides - self.ndim = ndim + self.ndim = ndim self.itemsize = itemsize self.readonly = readonly @@ -437,9 +450,10 @@ try: return SLOTS[key] except KeyError: - ret = build_slot_tp_function(space, typedef, name) - SLOTS[key] = ret - return ret + slot_func = build_slot_tp_function(space, typedef, name) + api_func = slot_func.api_func if slot_func else None + SLOTS[key] = api_func + return api_func def build_slot_tp_function(space, typedef, name): w_type = space.gettypeobject(typedef) @@ -472,7 +486,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self): return space.call_function(slot_fn, w_self) - api_func = slot_func.api_func handled = True # binary functions @@ -499,7 +512,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg): return space.call_function(slot_fn, w_self, w_arg) - api_func = slot_func.api_func handled = True # binary-with-Py_ssize_t-type @@ -517,7 +529,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, arg): return space.call_function(slot_fn, w_self, space.wrap(arg)) - api_func = slot_func.api_func handled = True # ternary functions @@ -532,7 +543,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg1, w_arg2): return space.call_function(slot_fn, w_self, w_arg1, w_arg2) - api_func = slot_func.api_func handled = True if handled: @@ -552,7 +562,7 @@ else: space.call_function(delattr_fn, w_self, w_name) return 0 - api_func = slot_tp_setattro.api_func + slot_func = slot_tp_setattro elif name == 'tp_getattro': getattr_fn = w_type.getdictvalue(space, '__getattribute__') if getattr_fn is None: @@ -562,7 +572,7 @@ @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) - api_func = slot_tp_getattro.api_func + slot_func = slot_tp_getattro elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -574,7 +584,7 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(call_fn, args) - api_func = slot_tp_call.api_func + slot_func = slot_tp_call elif name == 'tp_iternext': iternext_fn = w_type.getdictvalue(space, 'next') @@ -590,7 +600,7 @@ if not e.match(space, space.w_StopIteration): raise return None - api_func = slot_tp_iternext.api_func + slot_func = slot_tp_iternext elif name == 'tp_init': init_fn = w_type.getdictvalue(space, '__init__') @@ -605,7 +615,7 @@ w_stararg=w_args, w_starstararg=w_kwds) space.call_args(init_fn, args) return 0 - api_func = slot_tp_init.api_func + slot_func = slot_tp_init elif name == 'tp_new': new_fn = w_type.getdictvalue(space, '__new__') if new_fn is None: @@ -617,12 +627,12 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) - api_func = slot_tp_new.api_func + slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': buff_fn = w_type.getdictvalue(space, '__buffer__') if buff_fn is None: return - @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, header=None, error=-1) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def buff_w(space, w_self, view, flags): @@ -646,14 +656,14 @@ return 0 # XXX remove this when it no longer crashes a translated PyPy return - api_func = buff_w.api_func + slot_func = buff_w else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length # richcmpfunc(s) return - return lambda: llhelper(api_func.functype, api_func.get_wrapper(space)) + return slot_func PyWrapperFlag_KEYWORDS = 1 @@ -984,8 +994,8 @@ slotdefs = sorted(slotdefs, key=slotdef_sort_key) slotdefs_for_tp_slots = unrolling_iterable( - [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in slotdefs]) + [(x.method_name, x.slot_name, x.slot_names, + x.slot_func.api_func if x.slot_func else None) for x in slotdefs]) slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py From pypy.commits at gmail.com Tue Dec 13 18:53:18 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 15:53:18 -0800 (PST) Subject: [pypy-commit] pypy cling-support: resolve signed v.s. unsigned annotator problem Message-ID: <585089ee.094c2e0a.db00b.9bed@mx.google.com> Author: Wim Lavrijsen Branch: cling-support Changeset: r89050:0fa46e7e36a8 Date: 2016-12-13 11:33 -0800 http://bitbucket.org/pypy/pypy/changeset/0fa46e7e36a8/ Log: resolve signed v.s. unsigned annotator problem diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -313,7 +313,7 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, random_effects_on_gcobjs=False) @@ -490,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): From pypy.commits at gmail.com Tue Dec 13 18:53:21 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 15:53:21 -0800 (PST) Subject: [pypy-commit] pypy cling-support: give up on long double for now ... this just doesn't work :( Message-ID: <585089f1.c14d190a.bde5d.381c@mx.google.com> Author: Wim Lavrijsen Branch: cling-support Changeset: r89051:33831e11eab1 Date: 2016-12-13 13:00 -0800 http://bitbucket.org/pypy/pypy/changeset/33831e11eab1/ Log: give up on long double for now ... this just doesn't work :( diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -386,14 +386,7 @@ fval = float(rfloat.rstring_to_float(default)) else: fval = float(0.) - # see ffitypes.LongDoubleTypeMixin: long double not really - # supported in rffi - self.default = fval #r_longfloat(fval) - - def from_memory(self, space, w_obj, w_pycppclass, offset): - address = self._get_raw_address(space, w_obj, offset) - rffiptr = rffi.cast(self.c_ptrtype, address) - return self._wrap_object(space, rffiptr[0]) + self.default = r_longfloat(fval) class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): _immutable_fields_ = ['typecode'] @@ -746,8 +739,8 @@ _converters["const float&"] = ConstFloatRefConverter _converters["double"] = DoubleConverter _converters["const double&"] = ConstDoubleRefConverter -_converters["long double"] = LongDoubleConverter -_converters["const long double&"] = ConstLongDoubleRefConverter +#_converters["long double"] = LongDoubleConverter +#_converters["const long double&"] = ConstLongDoubleRefConverter _converters["const char*"] = CStringConverter _converters["void*"] = VoidPtrConverter _converters["void**"] = VoidPtrPtrConverter @@ -841,7 +834,7 @@ ('Q', rffi.sizeof(rffi.ULONGLONG), ("unsigned long long", "unsigned long long int", "ULong64_t")), ('f', rffi.sizeof(rffi.FLOAT), ("float",)), ('d', rffi.sizeof(rffi.DOUBLE), ("double",)), - ('g', rffi.sizeof(rffi.LONGDOUBLE), ("long double",)), +# ('g', rffi.sizeof(rffi.LONGDOUBLE), ("long double",)), ) for tcode, tsize, names in array_info: diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -329,7 +329,7 @@ (rffi.ULONGLONG, capi.c_call_ll, ("unsigned long long", "unsigned long long int", "ULong64_t")), (rffi.FLOAT, capi.c_call_f, ("float",)), (rffi.DOUBLE, capi.c_call_d, ("double",)), - (rffi.LONGDOUBLE, capi.c_call_ld, ("long double",)), +# (rffi.LONGDOUBLE, capi.c_call_ld, ("long double",)), ) for c_type, stub, names in type_info: diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -239,25 +239,20 @@ _mixin_ = True _immutable_fields_ = ['c_type', 'c_ptrtype', 'typecode'] - # long double is not really supported, so work with normal - # double instead; doing it here keeps this localized - c_type = rffi.DOUBLE #rffi.LONGDOUBLE - c_ptrtype = rffi.DOUBLEP #rffi.LONGDOUBLEP + c_type = rffi.LONGDOUBLE + c_ptrtype = rffi.LONGDOUBLEP typecode = 'g' + # long double is not really supported ... def _unwrap_object(self, space, w_obj): - #return r_longfloat(space.float_w(w_obj)) - return float(space.float_w(w_obj)) + return r_longfloat(space.float_w(w_obj)) def _wrap_object(self, space, obj): - # return space.wrap(obj) - dbl = rffi.cast(rffi.DOUBLE, obj) - return space.wrap(float(dbl)) + return space.wrap(obj) def cffi_type(self, space): state = space.fromcache(State) - #return state.c_ldouble - return state.c_double + return state.c_ldouble def typeid(c_type): "NOT_RPYTHON" diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -64,9 +64,9 @@ assert round(c.m_double + 77., 11) == 0 assert round(c.get_double_cr() + 77., 11) == 0 assert round(c.get_double_r() + 77., 11) == 0 - assert round(c.m_ldouble + 88., 24) == 0 - assert round(c.get_ldouble_cr() + 88., 24) == 0 - assert round(c.get_ldouble_r() + 88., 24) == 0 + #assert round(c.m_ldouble + 88., 24) == 0 + #assert round(c.get_ldouble_cr() + 88., 24) == 0 + #assert round(c.get_ldouble_r() + 88., 24) == 0 assert round(c.m_double + 77., 8) == 0 # reading of enum types From pypy.commits at gmail.com Tue Dec 13 18:53:23 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 15:53:23 -0800 (PST) Subject: [pypy-commit] pypy cling-support: tell about the pip Message-ID: <585089f3.8dcd190a.d236e.a34d@mx.google.com> Author: Wim Lavrijsen Branch: cling-support Changeset: r89052:f3ba0bbf10c8 Date: 2016-12-13 15:44 -0800 http://bitbucket.org/pypy/pypy/changeset/f3ba0bbf10c8/ Log: tell about the pip diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -2,9 +2,12 @@ ============================ The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++. It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ -reflection as extracted from header files. -The module itself is built into PyPy (an alternative exists for CPython), but +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but it requires a backend, installable through pip, to interface with Cling. .. _Cling: https://root.cern.ch/cling @@ -12,112 +15,22 @@ .. _clang: http://clang.llvm.org/ -Motivation ----------- -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Cling adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html - - Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build) and Python2.7 (for LLVM). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ pypy-c -m pip install PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ - -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling. Basic bindings example From pypy.commits at gmail.com Tue Dec 13 19:17:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 16:17:27 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Copy PyBytes_FromFormat from CPython 3.5 (c3da1ee47e6b) Message-ID: <58508f97.1a082e0a.4fb26.5229@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89053:f52e625db5b2 Date: 2016-12-14 00:16 +0000 http://bitbucket.org/pypy/pypy/changeset/f52e625db5b2/ Log: Copy PyBytes_FromFormat from CPython 3.5 (c3da1ee47e6b) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1346,6 +1346,7 @@ source_dir / "pythread.c", source_dir / "missing.c", source_dir / "pymem.c", + source_dir / "bytesobject.c", ] def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): diff --git a/pypy/module/cpyext/include/bytesobject.h b/pypy/module/cpyext/include/bytesobject.h --- a/pypy/module/cpyext/include/bytesobject.h +++ b/pypy/module/cpyext/include/bytesobject.h @@ -56,6 +56,9 @@ #define PyString_CHECK_INTERNED(op) (((PyStringObject *)(op))->ob_sstate) +PyAPI_FUNC(PyObject *) PyBytes_FromFormatV(const char*, va_list); +PyAPI_FUNC(PyObject *) PyBytes_FromFormat(const char*, ...); + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/bytesobject.c b/pypy/module/cpyext/src/bytesobject.c new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/src/bytesobject.c @@ -0,0 +1,213 @@ +#include "Python.h" + +#if defined(Py_ISDIGIT) || defined(Py_ISALPHA) +#error remove these definitions +#endif +#define Py_ISDIGIT isdigit +#define Py_ISALPHA isalpha + +PyObject * +PyBytes_FromFormatV(const char *format, va_list vargs) +{ + va_list count; + Py_ssize_t n = 0; + const char* f; + char *s; + PyObject* string; + + Py_VA_COPY(count, vargs); + /* step 1: figure out how large a buffer we need */ + for (f = format; *f; f++) { + if (*f == '%') { + const char* p = f; + while (*++f && *f != '%' && !Py_ISALPHA(*f)) + ; + + /* skip the 'l' or 'z' in {%ld, %zd, %lu, %zu} since + * they don't affect the amount of space we reserve. + */ + if ((*f == 'l' || *f == 'z') && + (f[1] == 'd' || f[1] == 'u')) + ++f; + + switch (*f) { + case 'c': + { + int c = va_arg(count, int); + if (c < 0 || c > 255) { + PyErr_SetString(PyExc_OverflowError, + "PyBytes_FromFormatV(): %c format " + "expects an integer in range [0; 255]"); + return NULL; + } + n++; + break; + } + case '%': + n++; + break; + case 'd': case 'u': case 'i': case 'x': + (void) va_arg(count, int); + /* 20 bytes is enough to hold a 64-bit + integer. Decimal takes the most space. + This isn't enough for octal. */ + n += 20; + break; + case 's': + s = va_arg(count, char*); + n += strlen(s); + break; + case 'p': + (void) va_arg(count, int); + /* maximum 64-bit pointer representation: + * 0xffffffffffffffff + * so 19 characters is enough. + * XXX I count 18 -- what's the extra for? + */ + n += 19; + break; + default: + /* if we stumble upon an unknown + formatting code, copy the rest of + the format string to the output + string. (we cannot just skip the + code, since there's no way to know + what's in the argument list) */ + n += strlen(p); + goto expand; + } + } else + n++; + } + expand: + /* step 2: fill the buffer */ + /* Since we've analyzed how much space we need for the worst case, + use sprintf directly instead of the slower PyOS_snprintf. */ + string = PyBytes_FromStringAndSize(NULL, n); + if (!string) + return NULL; + + s = PyBytes_AsString(string); + + for (f = format; *f; f++) { + if (*f == '%') { + const char* p = f++; + Py_ssize_t i; + int longflag = 0; + int size_tflag = 0; + /* parse the width.precision part (we're only + interested in the precision value, if any) */ + n = 0; + while (Py_ISDIGIT(*f)) + n = (n*10) + *f++ - '0'; + if (*f == '.') { + f++; + n = 0; + while (Py_ISDIGIT(*f)) + n = (n*10) + *f++ - '0'; + } + while (*f && *f != '%' && !Py_ISALPHA(*f)) + f++; + /* handle the long flag, but only for %ld and %lu. + others can be added when necessary. */ + if (*f == 'l' && (f[1] == 'd' || f[1] == 'u')) { + longflag = 1; + ++f; + } + /* handle the size_t flag. */ + if (*f == 'z' && (f[1] == 'd' || f[1] == 'u')) { + size_tflag = 1; + ++f; + } + + switch (*f) { + case 'c': + { + int c = va_arg(vargs, int); + /* c has been checked for overflow in the first step */ + *s++ = (unsigned char)c; + break; + } + case 'd': + if (longflag) + sprintf(s, "%ld", va_arg(vargs, long)); + else if (size_tflag) + sprintf(s, "%" PY_FORMAT_SIZE_T "d", + va_arg(vargs, Py_ssize_t)); + else + sprintf(s, "%d", va_arg(vargs, int)); + s += strlen(s); + break; + case 'u': + if (longflag) + sprintf(s, "%lu", + va_arg(vargs, unsigned long)); + else if (size_tflag) + sprintf(s, "%" PY_FORMAT_SIZE_T "u", + va_arg(vargs, size_t)); + else + sprintf(s, "%u", + va_arg(vargs, unsigned int)); + s += strlen(s); + break; + case 'i': + sprintf(s, "%i", va_arg(vargs, int)); + s += strlen(s); + break; + case 'x': + sprintf(s, "%x", va_arg(vargs, int)); + s += strlen(s); + break; + case 's': + p = va_arg(vargs, char*); + i = strlen(p); + if (n > 0 && i > n) + i = n; + Py_MEMCPY(s, p, i); + s += i; + break; + case 'p': + sprintf(s, "%p", va_arg(vargs, void*)); + /* %p is ill-defined: ensure leading 0x. */ + if (s[1] == 'X') + s[1] = 'x'; + else if (s[1] != 'x') { + memmove(s+2, s, strlen(s)+1); + s[0] = '0'; + s[1] = 'x'; + } + s += strlen(s); + break; + case '%': + *s++ = '%'; + break; + default: + strcpy(s, p); + s += strlen(s); + goto end; + } + } else + *s++ = *f; + } + + end: + _PyBytes_Resize(&string, s - PyBytes_AS_STRING(string)); + return string; +} + +PyObject * +PyBytes_FromFormat(const char *format, ...) +{ + PyObject* ret; + va_list vargs; + +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, format); +#else + va_start(vargs); +#endif + ret = PyBytes_FromFormatV(format, vargs); + va_end(vargs); + return ret; +} + diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -202,6 +202,19 @@ module.getbytes() module.c_only() + def test_FromFormat(self): + module = self.import_extension('foo', [ + ("fmt", "METH_VARARGS", + """ + PyObject* fmt = PyTuple_GetItem(args, 0); + int n = PyLong_AsLong(PyTuple_GetItem(args, 1)); + PyObject* result = PyBytes_FromFormat(PyBytes_AsString(fmt), n); + return result; + """), + ]) + print(module.fmt(b'd:%d', 10)) + assert module.fmt(b'd:%d', 10) == b'd:10' + class TestBytes(BaseApiTest): def test_bytes_resize(self, space, api): From pypy.commits at gmail.com Tue Dec 13 19:49:29 2016 From: pypy.commits at gmail.com (wlav) Date: Tue, 13 Dec 2016 16:49:29 -0800 (PST) Subject: [pypy-commit] pypy default: merge cling-support Message-ID: <58509719.46052e0a.af613.9f71@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r89054:39430359cc41 Date: 2016-12-13 16:41 -0800 http://bitbucket.org/pypy/pypy/changeset/39430359cc41/ Log: merge cling-support diff too long, truncating to 2000 out of 11481 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,135 +1,36 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++. +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a backend, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html - Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build) and Python2.7 (for LLVM). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ pypy-c -m pip install PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ - -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling. Basic bindings example diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.wrap('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.wrap(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.wrap(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.wrap('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.wrap('_'+attr), space.wrap(cdata)) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) - - def iter_w(self): - return self.space.wrap(self) - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.wrap("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.wrap("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), 'is_enum' : ([c_ccharp], c_int), # type/class reflection information @@ -216,8 +249,14 @@ 'strtoull' : ([c_ccharp], c_ullong), 'free' : ([c_voidp], c_void), - 'charp2stdstring' : ([c_ccharp], c_object), + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + } # size/offset are backend-specific but fixed after load @@ -277,87 +316,99 @@ ptr = w_cdata.unsafe_escaping_ptr() return rffi.cast(rffi.VOIDP, ptr) +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + def c_load_dictionary(name): return libffi.CDLL(name) # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] + args = [_ArgH(cppscope.handle), _ArgL(iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): - return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) -def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + args = [_ArgH(cppclass.handle), _ArgH(cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_ArgH(cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'deallocate', [_ArgH(cppclass.handle), _ArgH(cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'destruct', [_ArgH(cppclass.handle), _ArgH(cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return call_capi(space, 'call_s', args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'call_s', + [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), + _ArgP(rffi.cast(rffi.VOIDP, length))]) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return _cdata_to_ccharp(space, w_cstr), cstr_len def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), _ArgH(cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) -def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHPTRGETTER_PTR, - _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) +def c_get_function_address(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return rffi.cast(C_FUNC_PTR, + _cdata_to_ptr(space, call_capi(space, 'get_function_address', args))) # handling of function argument buffer --------------------------------------- def c_allocate_function_args(space, size): - return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) + return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_ArgL(size)])) def c_deallocate_function_args(space, cargs): - call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) + call_capi(space, 'deallocate_function_args', [_ArgP(cargs)]) def c_function_arg_sizeof(space): state = space.fromcache(State) return state.c_sizeof_farg @@ -367,30 +418,34 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_ArgH(scope)])) +def c_is_template(space, name): + return space.bool_w(call_capi(space, 'is_template', [_ArgS(name)])) +def c_is_abstract(space, cpptype): + return space.bool_w(call_capi(space, 'is_abstract', [_ArgH(cpptype)])) def c_is_enum(space, name): - return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) + return space.bool_w(call_capi(space, 'is_enum', [_ArgS(name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_ArgH(cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_ArgH(cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_ArgH(handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_ArgH(cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] + args = [_ArgH(cppclass.handle), _ArgL(base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_ArgH(derived.handle), _ArgH(base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + args = [_ArgH(derived_h), _ArgH(base_h), _ArgH(address), _ArgL(direction)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -401,13 +456,13 @@ # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(h=cppscope.handle)] + args = [_ArgH(cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] + args = [_ArgH(cppscope.handle), _ArgL(imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -423,91 +478,91 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(h=cppscope.handle) - arg2 = _Arg(l=index) + arg1 = _ArgH(cppscope.handle) + arg2 = _ArgL(index) args = [c_resolve_name(space, charp2str_free(space, - call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) + call_capi(space, 'method_template_arg_name', [arg1, arg2, _ArgL(iarg)])) ) for iarg in range(nargs)] return args def c_get_method(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] + args = [_ArgH(nss.handle), _ArgH(lc.handle), _ArgH(rc.handle), _ArgS(op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_ArgH(cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- def c_strtoll(space, svalue): - return space.r_longlong_w(call_capi(space, 'strtoll', [_Arg(s=svalue)])) + return space.r_longlong_w(call_capi(space, 'strtoll', [_ArgS(svalue)])) def c_strtoull(space, svalue): - return space.r_ulonglong_w(call_capi(space, 'strtoull', [_Arg(s=svalue)])) + return space.r_ulonglong_w(call_capi(space, 'strtoull', [_ArgS(svalue)])) def c_free(space, voidp): - call_capi(space, 'free', [_Arg(vp=voidp)]) + call_capi(space, 'free', [_ArgP(voidp)]) def charp2str_free(space, cdata): charp = rffi.cast(rffi.CCHARP, _cdata_to_ptr(space, cdata)) @@ -515,15 +570,60 @@ c_free(space, rffi.cast(rffi.VOIDP, charp)) return pystr -def c_charp2stdstring(space, svalue): - return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) +def c_charp2stdstring(space, svalue, sz): + return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', + [_ArgS(svalue), _ArgH(rffi.cast(rffi.ULONG, sz))])) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'stdstring2charp', + [_ArgH(cppstr), _ArgP(rffi.cast(rffi.VOIDP, sz))]) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(_cdata_to_ccharp(space, w_cstr), cstr_len) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_ArgH(cppobject)])) -# loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) + +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) +def c_stdvector_valuesize(space, pystr): + return _cdata_to_size_t(space, call_capi(space, 'stdvector_valuesize', [_ArgS(pystr)])) + + +# TODO: factor these out ... +# pythonizations +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") diff --git a/pypy/module/cppyy/capi/reflex_capi.py b/pypy/module/cppyy/capi/reflex_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/reflex_capi.py +++ /dev/null @@ -1,59 +0,0 @@ -import py, os - -from rpython.rlib import libffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -import commands -(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") - -if os.environ.get("ROOTSYS"): - if config_stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - if config_stat == 0: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() - else: - rootincpath = [] - rootlibpath = [] - -def identify(): - return 'Reflex' - -ts_reflect = False -ts_call = 'auto' -ts_memory = 'auto' -ts_helper = 'auto' - -std_string_name = 'std::basic_string' - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("reflexcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["reflexcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Reflex"], - use_cpp_linker=True, -) - -def c_load_dictionary(name): - return libffi.CDLL(name) - - -# Reflex-specific pythonizations -def register_pythonizations(space): - "NOT_RPYTHON" - pass - -def pythonize(space, name, w_pycppclass): - pass diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -3,8 +3,8 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import r_singlefloat -from rpython.rlib import jit_libffi, rfloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib import rfloat from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -81,11 +81,11 @@ class TypeConverter(object): - _immutable_fields_ = ['libffitype', 'uses_local', 'name'] + _immutable_fields_ = ['cffi_name', 'uses_local', 'name'] - libffitype = lltype.nullptr(jit_libffi.FFI_TYPE_P.TO) + cffi_name = None uses_local = False - name = "" + name = "" def __init__(self, space, extra): pass @@ -103,6 +103,10 @@ raise oefmt(space.w_TypeError, "no converter available for '%s'", self.name) + def cffi_type(self, space): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -143,9 +147,7 @@ class ArrayTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): if array_size <= 0: @@ -153,6 +155,10 @@ else: self.size = array_size + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def from_memory(self, space, w_obj, w_pycppclass, offset): # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) @@ -172,13 +178,15 @@ class PtrTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): self.size = sys.maxint + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): w_tc = space.findattr(w_obj, space.wrap('typecode')) if w_tc is not None and space.str_w(w_tc) != self.typecode: @@ -241,6 +249,10 @@ uses_local = True + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument_libffi(self, space, w_obj, address, call_local): assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP) # see interp_cppyy.py obj = self._unwrap_object(space, w_obj) @@ -255,6 +267,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = self.typecode class FloatTypeConverterMixin(NumericTypeConverterMixin): _mixin_ = True @@ -267,13 +281,15 @@ class VoidConverter(TypeConverter): - _immutable_fields_ = ['libffitype', 'name'] - - libffitype = jit_libffi.types.void + _immutable_fields_ = ['name'] def __init__(self, space, name): self.name = name + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_void + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -282,6 +298,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) @@ -305,6 +323,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.CCHARP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) @@ -331,13 +351,15 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) rffiptr = rffi.cast(self.c_ptrtype, address) - return space.wrap(float(rffiptr[0])) + return self._wrap_object(space, rffiptr[0]) class ConstFloatRefConverter(FloatConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'f' - libffitype = jit_libffi.types.pointer - typecode = 'F' + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -353,11 +375,22 @@ self.default = rffi.cast(self.c_type, 0.) class ConstDoubleRefConverter(ConstRefNumericTypeConverterMixin, DoubleConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'd' - libffitype = jit_libffi.types.pointer - typecode = 'D' +class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default'] + def __init__(self, space, default): + if default: + fval = float(rfloat.rstring_to_float(default)) + else: + fval = float(0.) + self.default = r_longfloat(fval) + +class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['typecode'] + typecode = 'g' class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): @@ -377,10 +410,6 @@ class VoidPtrConverter(TypeConverter): - _immutable_fields_ = ['libffitype'] - - libffitype = jit_libffi.types.pointer - def _unwrap_object(self, space, w_obj): try: obj = get_rawbuffer(space, w_obj) @@ -393,6 +422,10 @@ obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) @@ -422,9 +455,10 @@ address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) class VoidPtrPtrConverter(TypeConverter): - _immutable_fields_ = ['uses_local'] + _immutable_fields_ = ['uses_local', 'typecode'] uses_local = True + typecode = 'a' def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) @@ -435,7 +469,7 @@ except TypeError: r[0] = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) x[0] = rffi.cast(rffi.VOIDP, call_local) - ba[capi.c_function_arg_typeoffset(space)] = 'a' + ba[capi.c_function_arg_typeoffset(space)] = self.typecode def finalize_call(self, space, w_obj, call_local): r = rffi.cast(rffi.VOIDPP, call_local) @@ -445,13 +479,13 @@ pass # no set on buffer/array/None class VoidPtrRefConverter(VoidPtrPtrConverter): - _immutable_fields_ = ['uses_local'] + _immutable_fields_ = ['uses_local', 'typecode'] uses_local = True + typecode = 'V' class InstanceRefConverter(TypeConverter): - _immutable_fields_ = ['libffitype', 'cppclass'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['typecode', 'cppclass'] + typecode = 'V' def __init__(self, space, cppclass): from pypy.module.cppyy.interp_cppyy import W_CPPClass @@ -469,12 +503,16 @@ raise oefmt(space.w_TypeError, "cannot pass %T as %s", w_obj, self.cppclass.name) + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) address = rffi.cast(capi.C_OBJECT, address) ba = rffi.cast(rffi.CCHARP, address) - ba[capi.c_function_arg_typeoffset(space)] = 'o' + ba[capi.c_function_arg_typeoffset(space)] = self.typecode def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) @@ -496,6 +534,7 @@ class InstancePtrConverter(InstanceRefConverter): + typecode = 'o' def _unwrap_object(self, space, w_obj): try: @@ -509,8 +548,7 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) @@ -541,6 +579,11 @@ r = rffi.cast(rffi.VOIDPP, call_local) w_obj._rawobject = rffi.cast(capi.C_OBJECT, r[0]) + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) class StdStringConverter(InstanceConverter): @@ -555,7 +598,7 @@ arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.c_stdstring2stdstring(space, arg) else: - return capi.c_charp2stdstring(space, space.str_w(w_obj)) + return capi.c_charp2stdstring(space, space.str_w(w_obj), space.len_w(w_obj)) def to_memory(self, space, w_obj, w_value, offset): try: @@ -571,7 +614,9 @@ capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) class StdStringRefConverter(InstancePtrConverter): - _immutable_fields_ = ['cppclass'] + _immutable_fields_ = ['cppclass', 'typecode'] + + typecode = 'V' def __init__(self, space, extra): From pypy.commits at gmail.com Tue Dec 13 21:51:32 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 18:51:32 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Update PyXXXProcs declarations to match CPython 3.5 Message-ID: <5850b3b4.8dcd190a.d236e.a8e7@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89055:673a63622053 Date: 2016-12-14 02:50 +0000 http://bitbucket.org/pypy/pypy/changeset/673a63622053/ Log: Update PyXXXProcs declarations to match CPython 3.5 diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -196,83 +196,86 @@ typedef int (*visitproc)(PyObject *, void *); typedef int (*traverseproc)(PyObject *, visitproc, void *); + typedef struct { /* Number implementations must check *both* arguments for proper type and implement the necessary conversions in the slot functions themselves. */ - binaryfunc nb_add; - binaryfunc nb_subtract; - binaryfunc nb_multiply; - binaryfunc nb_divide; - binaryfunc nb_remainder; - binaryfunc nb_divmod; - ternaryfunc nb_power; - unaryfunc nb_negative; - unaryfunc nb_positive; - unaryfunc nb_absolute; - inquiry nb_bool; - unaryfunc nb_invert; - binaryfunc nb_lshift; - binaryfunc nb_rshift; - binaryfunc nb_and; - binaryfunc nb_xor; - binaryfunc nb_or; - coercion nb_coerce; - unaryfunc nb_int; - unaryfunc nb_long; - unaryfunc nb_float; - /* Added in release 2.0 */ - binaryfunc nb_inplace_add; - binaryfunc nb_inplace_subtract; - binaryfunc nb_inplace_multiply; - binaryfunc nb_inplace_divide; - binaryfunc nb_inplace_remainder; - ternaryfunc nb_inplace_power; - binaryfunc nb_inplace_lshift; - binaryfunc nb_inplace_rshift; - binaryfunc nb_inplace_and; - binaryfunc nb_inplace_xor; - binaryfunc nb_inplace_or; + binaryfunc nb_add; + binaryfunc nb_subtract; + binaryfunc nb_multiply; + binaryfunc nb_remainder; + binaryfunc nb_divmod; + ternaryfunc nb_power; + unaryfunc nb_negative; + unaryfunc nb_positive; + unaryfunc nb_absolute; + inquiry nb_bool; + unaryfunc nb_invert; + binaryfunc nb_lshift; + binaryfunc nb_rshift; + binaryfunc nb_and; + binaryfunc nb_xor; + binaryfunc nb_or; + unaryfunc nb_int; + void *nb_reserved; /* the slot formerly known as nb_long */ + unaryfunc nb_float; - /* Added in release 2.2 */ - /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ - binaryfunc nb_floor_divide; - binaryfunc nb_true_divide; - binaryfunc nb_inplace_floor_divide; - binaryfunc nb_inplace_true_divide; + binaryfunc nb_inplace_add; + binaryfunc nb_inplace_subtract; + binaryfunc nb_inplace_multiply; + binaryfunc nb_inplace_remainder; + ternaryfunc nb_inplace_power; + binaryfunc nb_inplace_lshift; + binaryfunc nb_inplace_rshift; + binaryfunc nb_inplace_and; + binaryfunc nb_inplace_xor; + binaryfunc nb_inplace_or; - /* Added in release 2.5 */ - unaryfunc nb_index; + binaryfunc nb_floor_divide; + binaryfunc nb_true_divide; + binaryfunc nb_inplace_floor_divide; + binaryfunc nb_inplace_true_divide; + + unaryfunc nb_index; + + binaryfunc nb_matrix_multiply; + binaryfunc nb_inplace_matrix_multiply; } PyNumberMethods; typedef struct { - lenfunc sq_length; - binaryfunc sq_concat; - ssizeargfunc sq_repeat; - ssizeargfunc sq_item; - void *was_sq_slice; - ssizeobjargproc sq_ass_item; - void *was_sq_ass_slice; - objobjproc sq_contains; - /* Added in release 2.0 */ - binaryfunc sq_inplace_concat; - ssizeargfunc sq_inplace_repeat; + lenfunc sq_length; + binaryfunc sq_concat; + ssizeargfunc sq_repeat; + ssizeargfunc sq_item; + void *was_sq_slice; + ssizeobjargproc sq_ass_item; + void *was_sq_ass_slice; + objobjproc sq_contains; + + binaryfunc sq_inplace_concat; + ssizeargfunc sq_inplace_repeat; } PySequenceMethods; typedef struct { - lenfunc mp_length; - binaryfunc mp_subscript; - objobjargproc mp_ass_subscript; + lenfunc mp_length; + binaryfunc mp_subscript; + objobjargproc mp_ass_subscript; } PyMappingMethods; typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; +} PyAsyncMethods; + +typedef struct { getbufferproc bf_getbuffer; releasebufferproc bf_releasebuffer; } PyBufferProcs; - typedef struct _typeobject { PyObject_VAR_HEAD const char *tp_name; /* For printing, in format "." */ From pypy.commits at gmail.com Tue Dec 13 22:18:30 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 13 Dec 2016 19:18:30 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Reduce diff with CPython's object.h: remove obsolete definitions, fix some members, ... Message-ID: <5850ba06.04202e0a.eea40.97c3@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89056:1fbddcd392b1 Date: 2016-12-14 03:17 +0000 http://bitbucket.org/pypy/pypy/changeset/1fbddcd392b1/ Log: Reduce diff with CPython's object.h: remove obsolete definitions, fix some members, ... diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -119,13 +119,8 @@ typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *); typedef int (*inquiry)(PyObject *); typedef Py_ssize_t (*lenfunc)(PyObject *); -typedef int (*coercion)(PyObject **, PyObject **); -typedef PyObject *(*intargfunc)(PyObject *, int) Py_DEPRECATED(2.5); -typedef PyObject *(*intintargfunc)(PyObject *, int, int) Py_DEPRECATED(2.5); typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t); typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t); -typedef int(*intobjargproc)(PyObject *, int, PyObject *); -typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *); typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *); typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *); typedef int(*objobjargproc)(PyObject *, PyObject *, PyObject *); @@ -138,10 +133,8 @@ void *buf; PyObject *obj; /* owned reference */ Py_ssize_t len; - - /* This is Py_ssize_t so it can be - pointed to by strides in simple case.*/ - Py_ssize_t itemsize; + Py_ssize_t itemsize; /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ int readonly; int ndim; char *format; @@ -157,7 +150,6 @@ void *internal; /* always NULL for app-level objects */ } Py_buffer; - typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); @@ -202,46 +194,46 @@ arguments for proper type and implement the necessary conversions in the slot functions themselves. */ - binaryfunc nb_add; - binaryfunc nb_subtract; - binaryfunc nb_multiply; - binaryfunc nb_remainder; - binaryfunc nb_divmod; - ternaryfunc nb_power; - unaryfunc nb_negative; - unaryfunc nb_positive; - unaryfunc nb_absolute; - inquiry nb_bool; - unaryfunc nb_invert; - binaryfunc nb_lshift; - binaryfunc nb_rshift; - binaryfunc nb_and; - binaryfunc nb_xor; - binaryfunc nb_or; - unaryfunc nb_int; - void *nb_reserved; /* the slot formerly known as nb_long */ - unaryfunc nb_float; + binaryfunc nb_add; + binaryfunc nb_subtract; + binaryfunc nb_multiply; + binaryfunc nb_remainder; + binaryfunc nb_divmod; + ternaryfunc nb_power; + unaryfunc nb_negative; + unaryfunc nb_positive; + unaryfunc nb_absolute; + inquiry nb_bool; + unaryfunc nb_invert; + binaryfunc nb_lshift; + binaryfunc nb_rshift; + binaryfunc nb_and; + binaryfunc nb_xor; + binaryfunc nb_or; + unaryfunc nb_int; + void *nb_reserved; /* the slot formerly known as nb_long */ + unaryfunc nb_float; - binaryfunc nb_inplace_add; - binaryfunc nb_inplace_subtract; - binaryfunc nb_inplace_multiply; - binaryfunc nb_inplace_remainder; - ternaryfunc nb_inplace_power; - binaryfunc nb_inplace_lshift; - binaryfunc nb_inplace_rshift; - binaryfunc nb_inplace_and; - binaryfunc nb_inplace_xor; - binaryfunc nb_inplace_or; + binaryfunc nb_inplace_add; + binaryfunc nb_inplace_subtract; + binaryfunc nb_inplace_multiply; + binaryfunc nb_inplace_remainder; + ternaryfunc nb_inplace_power; + binaryfunc nb_inplace_lshift; + binaryfunc nb_inplace_rshift; + binaryfunc nb_inplace_and; + binaryfunc nb_inplace_xor; + binaryfunc nb_inplace_or; - binaryfunc nb_floor_divide; - binaryfunc nb_true_divide; - binaryfunc nb_inplace_floor_divide; - binaryfunc nb_inplace_true_divide; + binaryfunc nb_floor_divide; + binaryfunc nb_true_divide; + binaryfunc nb_inplace_floor_divide; + binaryfunc nb_inplace_true_divide; - unaryfunc nb_index; + unaryfunc nb_index; - binaryfunc nb_matrix_multiply; - binaryfunc nb_inplace_matrix_multiply; + binaryfunc nb_matrix_multiply; + binaryfunc nb_inplace_matrix_multiply; } PyNumberMethods; typedef struct { @@ -277,84 +269,85 @@ typedef struct _typeobject { - PyObject_VAR_HEAD - const char *tp_name; /* For printing, in format "." */ - Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ + PyObject_VAR_HEAD + const char *tp_name; /* For printing, in format "." */ + Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ - /* Methods to implement standard operations */ + /* Methods to implement standard operations */ - destructor tp_dealloc; - printfunc tp_print; - getattrfunc tp_getattr; - setattrfunc tp_setattr; - cmpfunc tp_compare; - reprfunc tp_repr; + destructor tp_dealloc; + printfunc tp_print; + getattrfunc tp_getattr; + setattrfunc tp_setattr; + PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2) + or tp_reserved (Python 3) */ + reprfunc tp_repr; - /* Method suites for standard classes */ + /* Method suites for standard classes */ - PyNumberMethods *tp_as_number; - PySequenceMethods *tp_as_sequence; - PyMappingMethods *tp_as_mapping; + PyNumberMethods *tp_as_number; + PySequenceMethods *tp_as_sequence; + PyMappingMethods *tp_as_mapping; - /* More standard operations (here for binary compatibility) */ + /* More standard operations (here for binary compatibility) */ - hashfunc tp_hash; - ternaryfunc tp_call; - reprfunc tp_str; - getattrofunc tp_getattro; - setattrofunc tp_setattro; + hashfunc tp_hash; + ternaryfunc tp_call; + reprfunc tp_str; + getattrofunc tp_getattro; + setattrofunc tp_setattro; - /* Functions to access object as input/output buffer */ - PyBufferProcs *tp_as_buffer; + /* Functions to access object as input/output buffer */ + PyBufferProcs *tp_as_buffer; - /* Flags to define presence of optional/expanded features */ - long tp_flags; + /* Flags to define presence of optional/expanded features */ + long tp_flags; - const char *tp_doc; /* Documentation string */ + const char *tp_doc; /* Documentation string */ - /* Assigned meaning in release 2.0 */ - /* call function for all accessible objects */ - traverseproc tp_traverse; + /* Assigned meaning in release 2.0 */ + /* call function for all accessible objects */ + traverseproc tp_traverse; - /* delete references to contained objects */ - inquiry tp_clear; + /* delete references to contained objects */ + inquiry tp_clear; - /* Assigned meaning in release 2.1 */ - /* rich comparisons */ - richcmpfunc tp_richcompare; + /* Assigned meaning in release 2.1 */ + /* rich comparisons */ + richcmpfunc tp_richcompare; - /* weak reference enabler */ - Py_ssize_t tp_weaklistoffset; + /* weak reference enabler */ + Py_ssize_t tp_weaklistoffset; - /* Added in release 2.2 */ - /* Iterators */ - getiterfunc tp_iter; - iternextfunc tp_iternext; + /* Iterators */ + getiterfunc tp_iter; + iternextfunc tp_iternext; - /* Attribute descriptor and subclassing stuff */ - struct PyMethodDef *tp_methods; - struct PyMemberDef *tp_members; - struct PyGetSetDef *tp_getset; - struct _typeobject *tp_base; - PyObject *tp_dict; - descrgetfunc tp_descr_get; - descrsetfunc tp_descr_set; - Py_ssize_t tp_dictoffset; - initproc tp_init; - allocfunc tp_alloc; - newfunc tp_new; - freefunc tp_free; /* Low-level free-memory routine */ - inquiry tp_is_gc; /* For PyObject_IS_GC */ - PyObject *tp_bases; - PyObject *tp_mro; /* method resolution order */ - PyObject *tp_cache; - PyObject *tp_subclasses; - PyObject *tp_weaklist; - destructor tp_del; + /* Attribute descriptor and subclassing stuff */ + struct PyMethodDef *tp_methods; + struct PyMemberDef *tp_members; + struct PyGetSetDef *tp_getset; + struct _typeobject *tp_base; + PyObject *tp_dict; + descrgetfunc tp_descr_get; + descrsetfunc tp_descr_set; + Py_ssize_t tp_dictoffset; + initproc tp_init; + allocfunc tp_alloc; + newfunc tp_new; + freefunc tp_free; /* Low-level free-memory routine */ + inquiry tp_is_gc; /* For PyObject_IS_GC */ + PyObject *tp_bases; + PyObject *tp_mro; /* method resolution order */ + PyObject *tp_cache; + PyObject *tp_subclasses; + PyObject *tp_weaklist; + destructor tp_del; - /* Type attribute cache version tag. Added in version 2.6 */ - unsigned int tp_version_tag; + /* Type attribute cache version tag. Added in version 2.6 */ + unsigned int tp_version_tag; + destructor tp_finalize; } PyTypeObject; typedef struct { @@ -369,10 +362,10 @@ #define PyObject_Bytes PyObject_Str /* Flag bits for printing: */ -#define Py_PRINT_RAW 1 /* No string quotes etc. */ +#define Py_PRINT_RAW 1 /* No string quotes etc. */ /* -Type flags (tp_flags) +`Type flags (tp_flags) These flags are used to extend the type structure in a backwards-compatible fashion. Extensions can use the flags to indicate (and test) when a given @@ -384,18 +377,14 @@ all extension writers who publically release their extensions (this will be fewer than you might expect!).. -Python 1.5.2 introduced the bf_getcharbuffer slot into PyBufferProcs. +Most flags were removed as of Python 3.0 to make room for new flags. (Some +flags are not for backwards compatibility but to indicate the presence of an +optional feature; these flags remain of course.) Type definitions should use Py_TPFLAGS_DEFAULT for their tp_flags value. Code can use PyType_HasFeature(type_ob, flag_value) to test whether the given type object has a specified feature. - -NOTE: when building the core, Py_TPFLAGS_DEFAULT includes -Py_TPFLAGS_HAVE_VERSION_TAG; outside the core, it doesn't. This is so -that extensions that modify tp_dict of their own types directly don't -break, since this was allowed in 2.5. In 3.0 they will have to -manually remove this flag though! */ /* PyBufferProcs contains bf_getcharbuffer */ From pypy.commits at gmail.com Wed Dec 14 10:04:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 14 Dec 2016 07:04:50 -0800 (PST) Subject: [pypy-commit] pypy default: Don't replace space fixture with weird CAPI object in cpyext tests Message-ID: <58515f92.43052e0a.8b3ca.ce9b@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89057:862a6db9bc74 Date: 2016-12-14 15:03 +0000 http://bitbucket.org/pypy/pypy/changeset/862a6db9bc74/ Log: Don't replace space fixture with weird CAPI object in cpyext tests diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -21,9 +21,6 @@ import pypy.module.cpyext.test.test_cpyext return False -def pytest_funcarg__space(request): - return request.cls.api - def pytest_funcarg__api(request): return request.cls.api From pypy.commits at gmail.com Wed Dec 14 11:46:38 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 14 Dec 2016 08:46:38 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: merge default Message-ID: <5851776e.9325190a.dec53.c3de@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89058:66c3c930e1f2 Date: 2016-12-13 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/66c3c930e1f2/ Log: merge default diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py --- a/lib_pypy/_pypy_wait.py +++ b/lib_pypy/_pypy_wait.py @@ -1,3 +1,4 @@ +import os from resource import ffi, lib, _make_struct_rusage __all__ = ["wait3", "wait4"] @@ -7,6 +8,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait3(status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) @@ -16,6 +20,9 @@ status = ffi.new("int *") ru = ffi.new("struct rusage *") pid = lib.wait4(pid, status, options, ru) + if pid == -1: + errno = ffi.errno + raise OSError(errno, os.strerror(errno)) rusage = _make_struct_rusage(ru) diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info --- a/lib_pypy/greenlet.egg-info +++ b/lib_pypy/greenlet.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: greenlet -Version: 0.4.10 +Version: 0.4.11 Summary: Lightweight in-process concurrent programming Home-page: https://github.com/python-greenlet/greenlet Author: Ralf Schmitt (for CPython), PyPy team diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,7 +1,7 @@ import sys import _continuation -__version__ = "0.4.10" +__version__ = "0.4.11" # ____________________________________________________________ # Exceptions diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -71,8 +71,11 @@ Analyzing performance of applications is always tricky. We have various tools, for example a `jitviewer`_ that help us analyze performance. -The jitviewer shows the code generated by the PyPy JIT in a hierarchical way, -as shown by the screenshot below: +The old tool was partly rewritten and combined with vmprof. The service is +hosted at `vmprof.com`_. + +The following shows an old image of the jitviewer. +The code generated by the PyPy JIT in a hierarchical way: - at the bottom level, it shows the Python source code of the compiled loops @@ -84,13 +87,17 @@ .. image:: image/jitviewer.png -The jitviewer is a web application based on flask and jinja2 (and jQuery on -the client): if you have great web developing skills and want to help PyPy, +The jitviewer is a web application based on django and angularjs: +if you have great web developing skills and want to help PyPy, this is an ideal task to get started, because it does not require any deep -knowledge of the internals. +knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and +`vmprof-integration`_ to find open issues and documentation. -.. _jitviewer: http://bitbucket.org/pypy/jitviewer - +.. _jitviewer: http://vmprof.com +.. _vmprof.com: http://vmprof.com +.. _vmprof-python: https://github.com/vmprof/vmprof-python +.. _vmprof-server: https://github.com/vmprof/vmprof-server +.. _vmprof-integration: https://github.com/vmprof/vmprof-integration Optimized Unicode Representation -------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,15 @@ .. this is a revision shortly after release-pypy2.7-v5.6 .. startrev: 7e9787939641 + +Since a while now, PyPy preserves the order of dictionaries and sets. +However, the set literal syntax ``{x, y, z}`` would by mistake build a +set with the opposite order: ``set([z, y, x])``. This has been fixed. +Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be +``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior +changed in exactly the same way because of this fix. + + .. branch: rpython-error-to-systemerror Any uncaught RPython exception (from a PyPy bug) is turned into an @@ -29,3 +38,10 @@ .. branch: desc-specialize Refactor FunctionDesc.specialize() and related code (RPython annotator). + +.. branch: raw-calloc + +.. branch: issue2446 + +Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key +so it will be picked up by app-level objects of that type diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -433,6 +433,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -547,6 +547,8 @@ @jit.dont_look_inside def _run_finalizers(self): + # called by perform() when we have to "perform" this action, + # and also directly at the end of gc.collect). while True: w_obj = self.space.finalizer_queue.next_dead() if w_obj is None: diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,16 +247,15 @@ def descr_function_repr(self): return self.getrepr(self.space, 'function %s' % (self.name,)) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -264,10 +263,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -689,9 +689,9 @@ return space.newtuple([builtin_code, space.newtuple([space.newtext(self.identifier)])]) - def find(indentifier): + def find(space, indentifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code + return Function.find(space, identifier).code find = staticmethod(find) def signature(self): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1292,9 +1292,10 @@ @jit.unroll_safe def BUILD_SET(self, itemcount, next_instr): w_set = self.space.newset() - for i in range(itemcount): - w_item = self.popvalue() + for i in range(itemcount-1, -1, -1): + w_item = self.peekvalue(i) self.space.call_method(w_set, 'add', w_item) + self.popvalues(itemcount) self.pushvalue(w_set) def STORE_MAP(self, oparg, next_instr): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -729,6 +729,10 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_host_is_pypy = cls.space.wrap( + '__pypy__' in sys.builtin_module_names) + def test_bom_with_future(self): s = '\xef\xbb\xbffrom __future__ import division\nx = 1/2' ns = {} @@ -771,6 +775,18 @@ assert math.copysign(1., c[0]) == -1.0 assert math.copysign(1., c[1]) == -1.0 + def test_dict_and_set_literal_order(self): + x = 1 + l1 = list({1:'a', 3:'b', 2:'c', 4:'d'}) + l2 = list({1, 3, 2, 4}) + l3 = list({x:'a', 3:'b', 2:'c', 4:'d'}) + l4 = list({x, 3, 2, 4}) + if not self.host_is_pypy: + # the full test relies on the host Python providing ordered dicts + assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4} + else: + assert l1 == l2 == l3 == l4 == [1, 3, 2, 4] + ##class TestPythonAstCompiler(BaseTestCompiler): ## def setup_method(self, method): diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -580,3 +580,25 @@ pass sys.settrace(None) assert seen == ['call', 'exception', 'return'] + + def test_generator_trace_stopiteration(self): + import sys + def f(): + yield 5 + gen = f() + assert next(gen) == 5 + seen = [] + def trace_func(frame, event, *args): + print('TRACE:', frame, event, args) + seen.append(event) + return trace_func + def g(): + for x in gen: + never_entered + sys.settrace(trace_func) + g() + sys.settrace(None) + print 'seen:', seen + # on Python 3 we get an extra 'exception' when 'for' catches + # StopIteration + assert seen == ['call', 'line', 'call', 'return', 'return'] diff --git a/pypy/interpreter/test/test_special.py b/pypy/interpreter/test/test_special.py --- a/pypy/interpreter/test/test_special.py +++ b/pypy/interpreter/test/test_special.py @@ -4,9 +4,11 @@ def test_Ellipsis(self): assert Ellipsis == Ellipsis assert repr(Ellipsis) == 'Ellipsis' + assert Ellipsis.__class__.__name__ == 'ellipsis' def test_NotImplemented(self): def f(): return NotImplemented assert f() == NotImplemented assert repr(NotImplemented) == 'NotImplemented' + assert NotImplemented.__class__.__name__ == 'NotImplementedType' diff --git a/pypy/interpreter/test/test_unicodehelper.py b/pypy/interpreter/test/test_unicodehelper.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_unicodehelper.py @@ -0,0 +1,26 @@ +from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8 + +class FakeSpace: + pass + +def test_encode_utf8(): + space = FakeSpace() + assert encode_utf8(space, u"abc") == "abc" + assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4" + assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80" + assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80" + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80" + +def test_decode_utf8(): + space = FakeSpace() + assert decode_utf8(space, "abc") == u"abc" + assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234" + assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800" + assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00" + got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80") + assert map(ord, got) == [0xd800, 0xdc00] + got = decode_utf8(space, "\xf0\x90\x80\x80") + assert map(ord, got) == [0x10000] diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -771,12 +771,12 @@ ) assert not Cell.typedef.acceptable_as_base_class # no __new__ -Ellipsis.typedef = TypeDef("Ellipsis", +Ellipsis.typedef = TypeDef("ellipsis", __repr__ = interp2app(Ellipsis.descr__repr__), ) assert not Ellipsis.typedef.acceptable_as_base_class # no __new__ -NotImplemented.typedef = TypeDef("NotImplemented", +NotImplemented.typedef = TypeDef("NotImplementedType", __repr__ = interp2app(NotImplemented.descr__repr__), ) assert not NotImplemented.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -51,6 +51,10 @@ return result def decode_utf8(space, string): + # Surrogates are accepted and not treated specially at all. + # If there happen to be two 3-bytes encoding a pair of surrogates, + # you still get two surrogate unicode characters in the result. + # These are the Python2 rules; Python3 differs. result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), @@ -59,8 +63,9 @@ def encode_utf8(space, uni): # Note that this function never raises UnicodeEncodeError, - # since surrogate pairs are allowed. - # This is not the case with Python3. + # since surrogates are allowed, either paired or lone. + # A paired surrogate is considered like the non-BMP character + # it stands for. These are the Python2 rules; Python3 differs. return runicode.unicode_encode_utf_8( uni, len(uni), "strict", errorhandler=raise_unicode_exception_encode, diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -401,7 +401,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -77,7 +77,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -86,7 +86,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -25,11 +25,9 @@ basestruct = PyObject.TO W_BaseObject = W_ObjectObject - def get_dealloc(self, space): + def get_dealloc(self): from pypy.module.cpyext.typeobject import subtype_dealloc - return llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + return subtype_dealloc.api_func def allocate(self, space, w_type, itemcount=0): # similar to PyType_GenericAlloc? @@ -109,10 +107,8 @@ return tp_alloc(space, w_type, itemcount) if tp_dealloc: - def get_dealloc(self, space): - return llhelper( - tp_dealloc.api_func.functype, - tp_dealloc.api_func.get_wrapper(space)) + def get_dealloc(self): + return tp_dealloc.api_func if tp_attach: def attach(self, space, pyobj, w_obj): diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -8,12 +8,12 @@ cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl, Py_buffer, Py_bufferP) from pypy.module.cpyext.typeobjectdefs import ( - unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, + unaryfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc, getbufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef +from pypy.module.cpyext.pyobject import make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State @@ -21,8 +21,10 @@ from pypy.interpreter.argument import Arguments from rpython.rlib.buffer import Buffer from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, not_rpython from rpython.tool.sourcetools import func_renamer +from rpython.flowspace.model import Constant +from rpython.flowspace.specialcase import register_flow_sc from rpython.rtyper.annlowlevel import llhelper from pypy.module.sys.version import CPYTHON_VERSION @@ -59,6 +61,17 @@ "expected %d-%d arguments, got %d", low, high, space.len_w(w_ob)) + at not_rpython +def llslot(space, func): + return llhelper(func.api_func.functype, func.api_func.get_wrapper(space)) + + at register_flow_sc(llslot) +def sc_llslot(ctx, v_space, v_func): + assert isinstance(v_func, Constant) + get_llhelper = v_func.value.api_func.get_llhelper + return ctx.appcall(get_llhelper, v_space) + + def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs) @@ -106,7 +119,7 @@ args_w = space.fixedview(w_args) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, w_self, args_w[0], arg3) def wrap_ternaryfunc_r(space, w_self, w_args, func): @@ -121,7 +134,7 @@ Py_DecRef(space, ref) arg3 = space.w_None if len(args_w) > 1: - arg3 = args_w[1] + arg3 = args_w[1] return generic_cpy_call(space, func_ternary, args_w[0], w_self, arg3) @@ -322,7 +335,7 @@ self.strides = [1] else: self.strides = strides - self.ndim = ndim + self.ndim = ndim self.itemsize = itemsize self.readonly = readonly @@ -437,9 +450,10 @@ try: return SLOTS[key] except KeyError: - ret = build_slot_tp_function(space, typedef, name) - SLOTS[key] = ret - return ret + slot_func = build_slot_tp_function(space, typedef, name) + api_func = slot_func.api_func if slot_func else None + SLOTS[key] = api_func + return api_func def build_slot_tp_function(space, typedef, name): w_type = space.gettypeobject(typedef) @@ -472,7 +486,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self): return space.call_function(slot_fn, w_self) - api_func = slot_func.api_func handled = True # binary functions @@ -499,7 +512,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg): return space.call_function(slot_fn, w_self, w_arg) - api_func = slot_func.api_func handled = True # binary-with-Py_ssize_t-type @@ -517,7 +529,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, arg): return space.call_function(slot_fn, w_self, space.newint(arg)) - api_func = slot_func.api_func handled = True # ternary functions @@ -532,7 +543,6 @@ @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_self, w_arg1, w_arg2): return space.call_function(slot_fn, w_self, w_arg1, w_arg2) - api_func = slot_func.api_func handled = True if handled: @@ -552,7 +562,7 @@ else: space.call_function(delattr_fn, w_self, w_name) return 0 - api_func = slot_tp_setattro.api_func + slot_func = slot_tp_setattro elif name == 'tp_getattro': getattr_fn = w_type.getdictvalue(space, '__getattribute__') if getattr_fn is None: @@ -562,7 +572,7 @@ @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) - api_func = slot_tp_getattro.api_func + slot_func = slot_tp_getattro elif name == 'tp_call': call_fn = w_type.getdictvalue(space, '__call__') if call_fn is None: @@ -574,7 +584,7 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(call_fn, args) - api_func = slot_tp_call.api_func + slot_func = slot_tp_call elif name == 'tp_iternext': iternext_fn = w_type.getdictvalue(space, 'next') @@ -590,7 +600,7 @@ if not e.match(space, space.w_StopIteration): raise return None - api_func = slot_tp_iternext.api_func + slot_func = slot_tp_iternext elif name == 'tp_init': init_fn = w_type.getdictvalue(space, '__init__') @@ -605,7 +615,7 @@ w_stararg=w_args, w_starstararg=w_kwds) space.call_args(init_fn, args) return 0 - api_func = slot_tp_init.api_func + slot_func = slot_tp_init elif name == 'tp_new': new_fn = w_type.getdictvalue(space, '__new__') if new_fn is None: @@ -617,12 +627,12 @@ args = Arguments(space, [w_self], w_stararg=w_args, w_starstararg=w_kwds) return space.call_args(space.get(new_fn, w_self), args) - api_func = slot_tp_new.api_func + slot_func = slot_tp_new elif name == 'tp_as_buffer.c_bf_getbuffer': buff_fn = w_type.getdictvalue(space, '__buffer__') if buff_fn is None: return - @cpython_api([PyObject, Py_bufferP, rffi.INT_real], + @cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, header=None, error=-1) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def buff_w(space, w_self, view, flags): @@ -647,14 +657,14 @@ return 0 # XXX remove this when it no longer crashes a translated PyPy return - api_func = buff_w.api_func + slot_func = buff_w else: # missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce # tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length # richcmpfunc(s) return - return lambda: llhelper(api_func.functype, api_func.get_wrapper(space)) + return slot_func PyWrapperFlag_KEYWORDS = 1 @@ -985,8 +995,8 @@ slotdefs = sorted(slotdefs, key=slotdef_sort_key) slotdefs_for_tp_slots = unrolling_iterable( - [(x.method_name, x.slot_name, x.slot_names, x.slot_func) - for x in slotdefs]) + [(x.method_name, x.slot_name, x.slot_names, + x.slot_func.api_func if x.slot_func else None) for x in slotdefs]) slotdefs_for_wrappers = unrolling_iterable( [(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -142,7 +142,7 @@ assert fuu2(u"abc").baz().escape() raises(TypeError, module.fooType.object_member.__get__, 1) - def test_multiple_inheritance(self): + def test_multiple_inheritance1(self): module = self.import_module(name='foo') obj = module.UnicodeSubtype(u'xyz') obj2 = module.UnicodeSubtype2() @@ -422,7 +422,7 @@ assert space.int_w(space.getattr(w_class, w_name)) == 1 space.delitem(w_dict, w_name) - def test_multiple_inheritance(self, space, api): + def test_multiple_inheritance2(self, space, api): w_class = space.appexec([], """(): class A(object): pass @@ -1167,3 +1167,38 @@ __metaclass__ = FooType print repr(X) X() + + def test_multiple_inheritance3(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + ''' + PyObject *obj; + PyTypeObject *Base1, *Base2, *Base12; + Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0); + Base1->tp_name = "Base1"; + Base2->tp_name = "Base2"; + Base12->tp_name = "Base12"; + Base1->tp_basicsize = sizeof(PyHeapTypeObject); + Base2->tp_basicsize = sizeof(PyHeapTypeObject); + Base12->tp_basicsize = sizeof(PyHeapTypeObject); + Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; + Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_base = Base1; + Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); + Base12->tp_doc = "The Base12 type or object"; + if (PyType_Ready(Base1) < 0) return NULL; + if (PyType_Ready(Base2) < 0) return NULL; + if (PyType_Ready(Base12) < 0) return NULL; + obj = PyObject_New(PyObject, Base12); + return obj; + ''' + )]) + obj = module.new_obj() + assert 'Base12' in str(obj) + assert type(obj).__doc__ == "The Base12 type or object" + assert obj.__doc__ == "The Base12 type or object" + + diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -3,7 +3,6 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rstring import rsplit -from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root, DescrMismatch @@ -28,7 +27,8 @@ PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, track_reference, Py_DecRef, as_pyobj) from pypy.module.cpyext.slotdefs import ( - slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) + slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function, + llslot) from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( @@ -249,27 +249,21 @@ # coming from a parent C type. typedef = w_type.layout.typedef - for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots: + for method_name, slot_name, slot_names, slot_apifunc in slotdefs_for_tp_slots: w_descr = w_type.lookup(method_name) if w_descr is None: # XXX special case iternext continue - slot_func_helper = None - - if slot_func is None and typedef is not None: - get_slot = get_slot_tp_function(space, typedef, slot_name) - if get_slot: - slot_func_helper = get_slot() - elif slot_func: - slot_func_helper = llhelper(slot_func.api_func.functype, - slot_func.api_func.get_wrapper(space)) - - if slot_func_helper is None: + if slot_apifunc is None and typedef is not None: + slot_apifunc = get_slot_tp_function(space, typedef, slot_name) + if not slot_apifunc: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: - os.write(2, "%s defined by %s but no slot function defined!\n" % ( + os.write(2, + "%s defined by %s but no slot function defined!\n" % ( method_name, w_type.getname(space))) continue + slot_func_helper = slot_apifunc.get_llhelper(space) # XXX special case wrapper-functions and use a "specific" slot func @@ -329,6 +323,8 @@ w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func, wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = w_obj + if pto.c_tp_doc: + dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) @@ -373,9 +369,8 @@ def setup_new_method_def(space): ptr = get_new_method_def(space) - ptr.c_ml_meth = rffi.cast(PyCFunction_typedef, - llhelper(tp_new_wrapper.api_func.functype, - tp_new_wrapper.api_func.get_wrapper(space))) + ptr.c_ml_meth = rffi.cast( + PyCFunction_typedef, llslot(space, tp_new_wrapper)) def add_tp_new_wrapper(space, dict_w, pto): if "__new__" in dict_w: @@ -465,13 +460,17 @@ convert_member_defs(space, dict_w, pto.c_tp_members, self) name = rffi.charp2str(pto.c_tp_name) - new_layout = (pto.c_tp_basicsize > rffi.sizeof(PyObject.TO) or - pto.c_tp_itemsize > 0) + flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE + if flag_heaptype: + minsize = rffi.sizeof(PyHeapTypeObject.TO) + else: + minsize = rffi.sizeof(PyObject.TO) + new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0) W_TypeObject.__init__(self, space, name, - bases_w or [space.w_object], dict_w, force_new_layout=new_layout) + bases_w or [space.w_object], dict_w, force_new_layout=new_layout, + is_heaptype=flag_heaptype) self.flag_cpytype = True - self.flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE # if a sequence or a mapping, then set the flag to force it if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item: self.flag_map_or_seq = 'S' @@ -494,8 +493,7 @@ def subtype_dealloc(space, obj): pto = obj.c_ob_type base = pto - this_func_ptr = llhelper(subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + this_func_ptr = llslot(space, subtype_dealloc) while base.c_tp_dealloc == this_func_ptr: base = base.c_tp_base assert base @@ -597,46 +595,31 @@ return c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) lltype.render_immortal(c_buf) - c_buf.c_bf_getsegcount = llhelper(bf_segcount.api_func.functype, - bf_segcount.api_func.get_wrapper(space)) + c_buf.c_bf_getsegcount = llslot(space, bf_segcount) if space.is_w(w_type, space.w_bytes): # Special case: str doesn't support get_raw_address(), so we have a # custom get*buffer that instead gives the address of the char* in the # PyBytesObject*! - c_buf.c_bf_getreadbuffer = llhelper( - str_getreadbuffer.api_func.functype, - str_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper( - str_getcharbuffer.api_func.functype, - str_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, str_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, str_getcharbuffer) elif space.is_w(w_type, space.w_unicode): # Special case: unicode doesn't support get_raw_address(), so we have a # custom get*buffer that instead gives the address of the char* in the # PyUnicodeObject*! - c_buf.c_bf_getreadbuffer = llhelper( - unicode_getreadbuffer.api_func.functype, - unicode_getreadbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, unicode_getreadbuffer) elif space.is_w(w_type, space.w_buffer): # Special case: we store a permanent address on the cpyext wrapper, # so we'll reuse that. # Note: we could instead store a permanent address on the buffer object, # and use get_raw_address() - c_buf.c_bf_getreadbuffer = llhelper( - buf_getreadbuffer.api_func.functype, - buf_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper( - buf_getcharbuffer.api_func.functype, - buf_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, buf_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, buf_getcharbuffer) else: # use get_raw_address() - c_buf.c_bf_getreadbuffer = llhelper(bf_getreadbuffer.api_func.functype, - bf_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper(bf_getcharbuffer.api_func.functype, - bf_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getreadbuffer = llslot(space, bf_getreadbuffer) + c_buf.c_bf_getcharbuffer = llslot(space, bf_getcharbuffer) if bufspec == 'read-write': - c_buf.c_bf_getwritebuffer = llhelper( - bf_getwritebuffer.api_func.functype, - bf_getwritebuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getwritebuffer = llslot(space, bf_getwritebuffer) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER @@ -697,12 +680,10 @@ # dealloc if space.gettypeobject(w_type.layout.typedef) is w_type: # only for the exact type, like 'space.w_tuple' or 'space.w_list' - pto.c_tp_dealloc = typedescr.get_dealloc(space) + pto.c_tp_dealloc = typedescr.get_dealloc().get_llhelper(space) else: # for all subtypes, use subtype_dealloc() - pto.c_tp_dealloc = llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + pto.c_tp_dealloc = llslot(space, subtype_dealloc) if space.is_w(w_type, space.w_bytes): pto.c_tp_itemsize = 1 elif space.is_w(w_type, space.w_tuple): @@ -710,10 +691,8 @@ # buffer protocol setup_buffer_procs(space, w_type, pto) - pto.c_tp_free = llhelper(PyObject_Free.api_func.functype, - PyObject_Free.api_func.get_wrapper(space)) - pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, - PyType_GenericAlloc.api_func.get_wrapper(space)) + pto.c_tp_free = llslot(space, PyObject_Free) + pto.c_tp_alloc = llslot(space, PyType_GenericAlloc) builder = space.fromcache(StaticObjectBuilder) if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0 and builder.cpyext_type_init is None): @@ -904,15 +883,11 @@ if not pto.c_tp_setattro: from pypy.module.cpyext.object import PyObject_GenericSetAttr - pto.c_tp_setattro = llhelper( - PyObject_GenericSetAttr.api_func.functype, - PyObject_GenericSetAttr.api_func.get_wrapper(space)) + pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr) if not pto.c_tp_getattro: from pypy.module.cpyext.object import PyObject_GenericGetAttr - pto.c_tp_getattro = llhelper( - PyObject_GenericGetAttr.api_func.functype, - PyObject_GenericGetAttr.api_func.get_wrapper(space)) + pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr) if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -14,7 +14,28 @@ cache.clear() cache = space.fromcache(MapAttrCache) cache.clear() + rgc.collect() + + # if we are running in gc.disable() mode but gc.collect() is called, + # we should still call the finalizers now. We do this as an attempt + # to get closer to CPython's behavior: in Py3.5 some tests + # specifically rely on that. This is similar to how, in CPython, an + # explicit gc.collect() will invoke finalizers from cycles and fully + # ignore the gc.disable() mode. + temp_reenable = not space.user_del_action.enabled_at_app_level + if temp_reenable: + enable_finalizers(space) + try: + # fetch the pending finalizers from the queue, where they are + # likely to have been added by rgc.collect() above, and actually + # run them now. This forces them to run before this function + # returns, and also always in the enable_finalizers() mode. + space.user_del_action._run_finalizers() + finally: + if temp_reenable: + disable_finalizers(space) + return space.newint(0) def enable(space): diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -70,6 +70,19 @@ gc.enable() assert gc.isenabled() + def test_gc_collect_overrides_gc_disable(self): + import gc + deleted = [] + class X(object): + def __del__(self): + deleted.append(1) + assert gc.isenabled() + gc.disable() + X() + gc.collect() + assert deleted == [1] + gc.enable() + class AppTestGcDumpHeap(object): pytestmark = py.test.mark.xfail(run=False) diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -374,17 +374,7 @@ def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_vectorized(1, 1) - - def define_sum(): - return """ - a = |30| - sum(a) - """ - def test_sum(self): - result = self.run("sum") - assert result == sum(range(30)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_int(): return """ @@ -408,7 +398,7 @@ def test_sum_multi(self): result = self.run("sum_multi") assert result == sum(range(30)) + sum(range(60)) - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_sum_float_to_int16(): return """ @@ -490,7 +480,7 @@ assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(2, 1) + self.check_vectorized(2, 0) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -501,7 +491,7 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 - self.check_vectorized(3, 1) + self.check_vectorized(3, 0) def define_prod(): return """ @@ -518,12 +508,10 @@ def test_prod(self): result = self.run("prod") assert int(result) == 576 - self.check_vectorized(1, 1) def test_prod_zero(self): result = self.run("prod_zero") assert int(result) == 0 - self.check_vectorized(1, 1) def define_max(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -75,8 +75,6 @@ arith_comb = [ ('sum','int', 1742, 1742, 1), - ('sum','float', 2581, 2581, 1), - ('prod','float', 1, 3178, 1), ('prod','int', 1, 3178, 1), ('any','int', 1, 2239, 1), ('any','int', 0, 4912, 0), diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -494,3 +494,15 @@ def test_negative_array_size(self): ffi = FFI() py.test.raises(ValueError, ffi.cast, "int[-5]", 0) + + def test_cannot_instantiate_manually(self): + ffi = FFI() + ct = type(ffi.typeof("void *")) + py.test.raises(TypeError, ct) + py.test.raises(TypeError, ct, ffi.NULL) + for cd in [type(ffi.cast("void *", 0)), + type(ffi.new("char[]", 3)), + type(ffi.gc(ffi.NULL, lambda x: None))]: + py.test.raises(TypeError, cd) + py.test.raises(TypeError, cd, ffi.NULL) + py.test.raises(TypeError, cd, ffi.typeof("void *")) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -361,7 +361,8 @@ retries += 1 assert retries <= 5 import gc; gc.collect() - assert seen == [40, 40, raw1, raw2] + assert (seen == [40, 40, raw1, raw2] or + seen == [40, 40, raw2, raw1]) assert repr(seen[2]) == "" assert repr(seen[3]) == "" diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -34,3 +34,7 @@ assert os.WEXITSTATUS(status) == exit_status assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) + +def test_errors(): + py.test.raises(OSError, _pypy_wait.wait3, -999) + py.test.raises(OSError, _pypy_wait.wait4, -999, -999) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -162,7 +162,8 @@ @dont_look_inside def __init__(self, space, name, bases_w, dict_w, - overridetypedef=None, force_new_layout=False): + overridetypedef=None, force_new_layout=False, + is_heaptype=True): self.space = space self.name = name self.bases_w = bases_w @@ -172,7 +173,7 @@ self.weakrefable = False self.w_doc = space.w_None self.weak_subclasses = [] - self.flag_heaptype = False + self.flag_heaptype = is_heaptype self.flag_cpytype = False self.flag_abstract = False self.flag_sequence_bug_compat = False @@ -740,7 +741,7 @@ dict_w[key] = space.getitem(w_dict, w_key) w_type = space.allocate_instance(W_TypeObject, w_typetype) W_TypeObject.__init__(w_type, space, name, bases_w or [space.w_object], - dict_w) + dict_w, is_heaptype=True) w_type.ready() return w_type @@ -1136,7 +1137,6 @@ if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w) - w_self.flag_heaptype = True for w_base in w_self.bases_w: if not isinstance(w_base, W_TypeObject): continue @@ -1161,7 +1161,6 @@ w_doc = w_self.space.newtext_or_none(instancetypedef.doc) w_self.w_doc = w_doc ensure_common_attributes(w_self) - w_self.flag_heaptype = instancetypedef.heaptype # # usually 'instancetypedef' is new, i.e. not seen in any base, # but not always (see Exception class) @@ -1335,7 +1334,8 @@ else: overridetypedef = typedef w_type = W_TypeObject(space, typedef.name, bases_w, dict_w, - overridetypedef=overridetypedef) + overridetypedef=overridetypedef, + is_heaptype=overridetypedef.heaptype) if typedef is not overridetypedef: w_type.w_doc = space.newtext_or_none(typedef.doc) if hasattr(typedef, 'flag_sequence_bug_compat'): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1128,7 +1128,7 @@ value = sum(value) elif info.accum_operation == '*': def prod(acc, x): return acc * x - value = reduce(prod, value, 1) + value = reduce(prod, value, 1.0) else: raise NotImplementedError("accum operator in fail guard") values[i] = value diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1066,7 +1066,6 @@ prepare_cond_call_value_r = prepare_cond_call_value_i - def notimplemented(self, op): msg = '[PPC/regalloc] %s not implemented\n' % op.getopname() if we_are_translated(): diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -396,6 +396,7 @@ # * gcmap is pushed # * the old value of these regs must already be stored in the jitframe # * on exit, all registers are restored from the jitframe + # * the result of the call is moved to register r1 mc = InstrBuilder() self.mc = mc @@ -427,6 +428,9 @@ self._reload_frame_if_necessary(mc) self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller + + mc.LGR(r.SCRATCH2, r.RES) + self._pop_core_regs_from_jitframe(mc, saved_regs) if supports_floats: self._pop_fp_regs_from_jitframe(mc) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -374,10 +374,12 @@ _COND_CALL_SAVE_REGS = [r.r11, r.r2, r.r3, r.r4, r.r5] def emit_cond_call(self, op, arglocs, regalloc): + resloc = arglocs[0] + arglocs = arglocs[1:] + fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond.value != c.cond_none.value - fcond = c.negate(fcond) jmp_adr = self.mc.get_relative_pos() self.mc.reserve_cond_jump() # patched later to a relative branch @@ -411,6 +413,8 @@ self.mc.BASR(r.r14, r.r14) # restoring the registers saved above, and doing pop_gcmap(), is left # to the cond_call_slowpath helper. We never have any result value. + if resloc is not None: + self.mc.LGR(resloc, r.SCRATCH2) relative_target = self.mc.currpos() - jmp_adr pmc = OverwritingBuilder(self.mc, jmp_adr, 1) pmc.BRCL(fcond, l.imm(relative_target)) @@ -419,6 +423,9 @@ # guard_no_exception too self.previous_cond_call_jcond = jmp_adr, fcond + emit_cond_call_value_i = emit_cond_call + emit_cond_call_value_r = emit_cond_call + class AllocOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1107,14 +1107,32 @@ def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) - locs = [] + locs = [None] + self.assembler.guard_success_cc = c.negate( + self.assembler.guard_success_cc) # support between 0 and 4 integer arguments assert 2 <= op.numargs() <= 2 + 4 for i in range(1, op.numargs()): loc = self.loc(op.getarg(i)) assert loc.type != FLOAT locs.append(loc) - return locs + return locs # [None, function, arg0, ..., argn] + + def prepare_cond_call_value_i(self, op): + x = self.ensure_reg(op.getarg(0)) + self.load_condition_into_cc(op.getarg(0)) + self.rm.force_allocate_reg(op, selected_reg=x) # spilled if survives + # ^^^ if arg0!=0, we jump over the next block of code (the call) + locs = [x] + # support between 0 and 4 integer arguments + assert 2 <= op.numargs() <= 2 + 4 + for i in range(1, op.numargs()): + loc = self.loc(op.getarg(i)) + assert loc.type != FLOAT + locs.append(loc) + return locs # [res, function, args...] + + prepare_cond_call_value_r = prepare_cond_call_value_i def prepare_cond_call_gc_wb(self, op): arglocs = [self.ensure_reg(op.getarg(0))] diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -593,6 +593,8 @@ log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): + # NB. the operation 'raw_malloc' is not supported; this is for + # the operation 'malloc'/'malloc_varsize' with {flavor: 'gc'} d = op.args[1].value.copy() d.pop('flavor') add_memory_pressure = d.pop('add_memory_pressure', False) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -142,10 +142,14 @@ assert len(lst) == len(args_v), ( "not supported so far: 'greens' variables contain Void") # a crash here means that you have to reorder the variable named in - # the JitDriver. Indeed, greens and reds must both be sorted: first - # all INTs, followed by all REFs, followed by all FLOATs. + # the JitDriver. lst2 = sort_vars(lst) - assert lst == lst2 + assert lst == lst2, ("You have to reorder the variables named in " + "the JitDriver (both the 'greens' and 'reds' independently). " + "They must be sorted like this: first all the integer-like, " + "then all the pointer-like, and finally the floats.\n" + "Got: %r\n" + "Expected: %r" % (lst, lst2)) return lst # return (_sort(greens_v, True), _sort(reds_v, False)) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -978,10 +978,7 @@ self.right is other.right class AccumPack(Pack): - SUPPORTED = { rop.FLOAT_ADD: '+', - rop.INT_ADD: '+', - rop.FLOAT_MUL: '*', - } + SUPPORTED = { rop.INT_ADD: '+', } def __init__(self, nodes, operator, position): Pack.__init__(self, nodes) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py @@ -197,7 +197,7 @@ f13 = float_add(f12, f11) """) savings = self.savings(loop1) - assert savings == 2 + assert savings == -2 @py.test.mark.parametrize("bytes,s", [(4,0),(8,0)]) def test_sum_float_to_int(self, bytes, s): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1162,32 +1162,32 @@ vopt = self.vectorize(loop,1) self.assert_equal(loop, self.parse_loop(opt)) - def test_accumulate_basic(self): - trace = """ - [p0, i0, f0] - f1 = raw_load_f(p0, i0, descr=floatarraydescr) - f2 = float_add(f0, f1) - i1 = int_add(i0, 8) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, f2] - jump(p0, i1, f2) - """ - trace_opt = """ - [p0, i0, f0] - v6[0xf64] = vec_f() - v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) - v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) - label(p0, i0, v2[2xf64]) - i1 = int_add(i0, 16) - i2 = int_lt(i1, 100) - guard_true(i2) [p0, i0, v2[2xf64]] - v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) - v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) - jump(p0, i1, v3[2xf64]) - """ - loop = self.parse_loop(trace) - opt = self.vectorize(loop) - self.assert_equal(loop, self.parse_loop(trace_opt)) + #def test_accumulate_basic(self): + # trace = """ + # [p0, i0, f0] + # f1 = raw_load_f(p0, i0, descr=floatarraydescr) + # f2 = float_add(f0, f1) + # i1 = int_add(i0, 8) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, f2] + # jump(p0, i1, f2) + # """ + # trace_opt = """ + # [p0, i0, f0] + # v6[0xf64] = vec_f() + # v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64]) + # v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1) + # label(p0, i0, v2[2xf64]) + # i1 = int_add(i0, 16) + # i2 = int_lt(i1, 100) + # guard_true(i2) [p0, i0, v2[2xf64]] + # v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr) + # v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64]) + # jump(p0, i1, v3[2xf64]) + # """ + # loop = self.parse_loop(trace) + # opt = self.vectorize(loop) + # self.assert_equal(loop, self.parse_loop(trace_opt)) def test_element_f45_in_guard_failargs(self): trace = self.parse_loop(""" diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -842,11 +842,16 @@ oplist.append(vecop) opnum = rop.VEC_INT_XOR if datatype == FLOAT: - opnum = rop.VEC_FLOAT_XOR + # see PRECISION loss below + raise NotImplementedError vecop = VecOperation(opnum, [vecop, vecop], vecop, count) oplist.append(vecop) elif pack.reduce_init() == 1: + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied. giving an + # inprecision result, thus this is disabled now + raise NotImplementedError # multiply is only supported by floats vecop = OpHelpers.create_vec_expand(ConstFloat(1.0), bytesize, signed, count) diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -414,7 +414,9 @@ lambda a,b: lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed) small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False, allow_infinity=False) test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE) - test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) + # PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold) + # you can end up a small number and a huge number that is finally multiplied losing precision + # test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE) def test_constant_expand(self): diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -427,6 +427,13 @@ return result mh._ll_malloc_fixedsize = _ll_malloc_fixedsize + def _ll_malloc_fixedsize_zero(size): + result = mh.allocate(size, zero=True) + if not result: + raise MemoryError() + return result + mh._ll_malloc_fixedsize_zero = _ll_malloc_fixedsize_zero + def _ll_compute_size(length, size, itemsize): try: varsize = ovfcheck(itemsize * length) @@ -453,10 +460,9 @@ def _ll_malloc_varsize_no_length_zero(length, size, itemsize): tot_size = _ll_compute_size(length, size, itemsize) - result = mh.allocate(tot_size) + result = mh.allocate(tot_size, zero=True) if not result: raise MemoryError() - llmemory.raw_memclear(result, tot_size) return result mh.ll_malloc_varsize_no_length_zero = _ll_malloc_varsize_no_length_zero @@ -470,17 +476,16 @@ mh = mallocHelpers() mh.allocate = llmemory.raw_malloc ll_raw_malloc_fixedsize = mh._ll_malloc_fixedsize + ll_raw_malloc_fixedsize_zero = mh._ll_malloc_fixedsize_zero ll_raw_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length ll_raw_malloc_varsize = mh.ll_malloc_varsize ll_raw_malloc_varsize_no_length_zero = mh.ll_malloc_varsize_no_length_zero - stack_mh = mallocHelpers() - stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address, size) - ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize - if self.translator: self.raw_malloc_fixedsize_ptr = self.inittime_helper( ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address) + self.raw_malloc_fixedsize_zero_ptr = self.inittime_helper( + ll_raw_malloc_fixedsize_zero, [lltype.Signed], llmemory.Address) self.raw_malloc_varsize_no_length_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False) self.raw_malloc_varsize_ptr = self.inittime_helper( @@ -488,9 +493,6 @@ self.raw_malloc_varsize_no_length_zero_ptr = self.inittime_helper( ll_raw_malloc_varsize_no_length_zero, [lltype.Signed]*3, llmemory.Address, inline=False) - self.stack_malloc_fixedsize_ptr = self.inittime_helper( - ll_stack_malloc_fixedsize, [lltype.Signed], llmemory.Address) - def gct_malloc(self, hop, add_flags=None): TYPE = hop.spaceop.result.concretetype.TO assert not TYPE._is_varsize() @@ -503,21 +505,16 @@ hop.cast_result(v_raw) def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr, c_size], + if flags.get('zero'): + ll_func = self.raw_malloc_fixedsize_zero_ptr + else: + ll_func = self.raw_malloc_fixedsize_ptr + v_raw = hop.genop("direct_call", [ll_func, c_size], resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) if flags.get('track_allocation', True): hop.genop("track_alloc_start", [v_raw]) return v_raw - def gct_fv_stack_malloc(self, hop, flags, TYPE, c_size): - v_raw = hop.genop("direct_call", [self.stack_malloc_fixedsize_ptr, c_size], - resulttype=llmemory.Address) - if flags.get('zero'): - hop.genop("raw_memclear", [v_raw, c_size]) - return v_raw - def gct_malloc_varsize(self, hop, add_flags=None): flags = hop.spaceop.args[1].value if add_flags: diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1778,25 +1778,23 @@ finally: lltype.free(l_utsbuf, flavor='raw') -# These are actually macros on some/most systems -c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT) -c_major = external('major', [rffi.INT], rffi.INT) -c_minor = external('minor', [rffi.INT], rffi.INT) +if sys.platform != 'win32': + # These are actually macros on some/most systems + c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True) + c_major = external('major', [rffi.INT], rffi.INT, macro=True) + c_minor = external('minor', [rffi.INT], rffi.INT, macro=True) - at replace_os_function('makedev') - at jit.dont_look_inside -def makedev(maj, min): - return c_makedev(maj, min) + @replace_os_function('makedev') + def makedev(maj, min): + return c_makedev(maj, min) - at replace_os_function('major') - at jit.dont_look_inside -def major(dev): - return c_major(dev) + @replace_os_function('major') + def major(dev): + return c_major(dev) - at replace_os_function('minor') - at jit.dont_look_inside -def minor(dev): - return c_minor(dev) + @replace_os_function('minor') + def minor(dev): + return c_minor(dev) #___________________________________________________________________ diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -398,7 +398,7 @@ baseofs = offsetof(_c.sockaddr_un, 'c_sun_path') self.setdata(sun, baseofs + len(path)) rffi.setintfield(sun, 'c_sun_family', AF_UNIX) - if _c.linux and path.startswith('\x00'): + if _c.linux and path[0] == '\x00': # Linux abstract namespace extension if len(path) > sizeof(_c.sockaddr_un.c_sun_path): raise RSocketError("AF_UNIX path too long") diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -327,6 +327,16 @@ def unicode_encode_utf_8(s, size, errors, errorhandler=None, allow_surrogates=allow_surrogate_by_default): + # In this function, allow_surrogates can be: + # + # * True: surrogates are always allowed. A valid surrogate pair + # is replaced with the non-BMP unicode char it stands for, + # which is then encoded as 4 bytes. + # + # * False: surrogates are always forbidden. + # + # See also unicode_encode_utf8sp(). + # if errorhandler is None: errorhandler = default_unicode_error_encode return unicode_encode_utf_8_impl(s, size, errors, errorhandler, @@ -391,6 +401,33 @@ _encodeUCS4(result, ch) return result.build() +def unicode_encode_utf8sp(s, size): + # Surrogate-preserving utf-8 encoding. Any surrogate character + # turns into its 3-bytes encoding, whether it is paired or not. + # This should always be reversible, and the reverse is the regular + # str_decode_utf_8() with allow_surrogates=True. + assert(size >= 0) + result = StringBuilder(size) + pos = 0 + while pos < size: + ch = ord(s[pos]) + pos += 1 + if ch < 0x80: + # Encode ASCII + result.append(chr(ch)) + elif ch < 0x0800: + # Encode Latin-1 + result.append(chr((0xc0 | (ch >> 6)))) + result.append(chr((0x80 | (ch & 0x3f)))) + elif ch < 0x10000: + # Encode UCS2 Unicode ordinals, and surrogates + result.append((chr((0xe0 | (ch >> 12))))) + result.append((chr((0x80 | ((ch >> 6) & 0x3f))))) + result.append((chr((0x80 | (ch & 0x3f))))) + else: + _encodeUCS4(result, ch) + return result.build() + # ____________________________________________________________ # utf-16 diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -281,6 +281,12 @@ def test_isatty(self): assert rposix.isatty(-1) is False + @py.test.mark.skipif("not hasattr(rposix, 'makedev')") + def test_makedev(self): + dev = rposix.makedev(24, 7) + assert rposix.major(dev) == 24 + assert rposix.minor(dev) == 7 + @py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -812,6 +812,21 @@ py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) assert encoder(u'u\u1234', 2, 'replace') == 'u?' + def test_encode_utf8sp(self): + # for the following test, go to lengths to avoid CPython's optimizer + # and .pyc file storage, which collapse the two surrogates into one + c = u"\udc00" + for input, expected in [ + (u"", ""), + (u"abc", "abc"), + (u"\u1234", "\xe1\x88\xb4"), + (u"\ud800", "\xed\xa0\x80"), + (u"\udc00", "\xed\xb0\x80"), + (u"\ud800" + c, "\xed\xa0\x80\xed\xb0\x80"), + ]: + got = runicode.unicode_encode_utf8sp(input, len(input)) + assert got == expected + class TestTranslation(object): def setup_class(cls): diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -997,11 +997,14 @@ # __________________________________________________________ # operations on addresses - def op_raw_malloc(self, size): + def op_raw_malloc(self, size, zero): + assert lltype.typeOf(size) == lltype.Signed + return llmemory.raw_malloc(size, zero=zero) + + def op_boehm_malloc(self, size): assert lltype.typeOf(size) == lltype.Signed return llmemory.raw_malloc(size) - - op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc + op_boehm_malloc_atomic = op_boehm_malloc def op_boehm_register_finalizer(self, p, finalizer): pass @@ -1069,9 +1072,6 @@ assert offset.TYPE == ARGTYPE getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value - def op_stack_malloc(self, size): # mmh - raise NotImplementedError("backend only") - def op_track_alloc_start(self, addr): # we don't do tracking at this level checkadr(addr) diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -506,13 +506,17 @@ llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address, sandboxsafe=True, _nowrapper=True) +llimpl_calloc = rffi.llexternal('calloc', [lltype.Signed, lltype.Signed], + llmemory.Address, + sandboxsafe=True, _nowrapper=True) llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void, sandboxsafe=True, _nowrapper=True) def llimpl_arena_malloc(nbytes, zero): - addr = llimpl_malloc(nbytes) - if bool(addr): - llimpl_arena_reset(addr, nbytes, zero) + if zero: + addr = llimpl_calloc(nbytes, 1) + else: + addr = llimpl_malloc(nbytes) return addr llimpl_arena_malloc._always_inline_ = True register_external(arena_malloc, [int, int], llmemory.Address, diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -7,6 +7,7 @@ import weakref from rpython.annotator.bookkeeper import analyzer_for from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool +from rpython.annotator.model import SomeBool from rpython.rlib.objectmodel import Symbolic, specialize from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lltype import SomePtr @@ -936,14 +937,15 @@ # ____________________________________________________________ -def raw_malloc(size): +def raw_malloc(size, zero=False): if not isinstance(size, AddressOffset): raise NotImplementedError(size) - return size._raw_malloc([], zero=False) + return size._raw_malloc([], zero=zero) @analyzer_for(raw_malloc) -def ann_raw_malloc(s_size): +def ann_raw_malloc(s_size, s_zero=None): assert isinstance(s_size, SomeInteger) # XXX add noneg...? + assert s_zero is None or isinstance(s_zero, SomeBool) return SomeAddress() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -396,7 +396,6 @@ 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), - 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), 'adr_add': LLOp(canfold=True), diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -574,10 +574,14 @@ # memory addresses @typer_for(llmemory.raw_malloc) -def rtype_raw_malloc(hop): - v_size, = hop.inputargs(lltype.Signed) +def rtype_raw_malloc(hop, i_zero=None): + v_size = hop.inputarg(lltype.Signed, arg=0) + v_zero, = parse_kwds(hop, (i_zero, None)) + if v_zero is None: + v_zero = hop.inputconst(lltype.Bool, False) hop.exception_cannot_occur() - return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address) + return hop.genop('raw_malloc', [v_size, v_zero], + resulttype=llmemory.Address) @typer_for(llmemory.raw_malloc_usage) def rtype_raw_malloc_usage(hop): diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -372,19 +372,6 @@ result = interpret(getids, [i, j]) assert result -def test_stack_malloc(): - py.test.skip("stack-flavored mallocs no longer supported") - class A(object): - pass - def f(): - a = A() - a.i = 1 - return a.i - interp, graph = get_interpreter(f, []) - graph.startblock.operations[0].args[1] = inputconst(Void, {'flavor': "stack"}) - result = interp.eval_graph(graph, []) - assert result == 1 - def test_invalid_stack_access(): py.test.skip("stack-flavored mallocs no longer supported") class A(object): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -608,16 +608,6 @@ return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \ % (self.expr(op.args[0]), self.expr(op.args[1])) - def OP_RAW_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult) - - def OP_STACK_MALLOC(self, op): - eresult = self.expr(op.result) - esize = self.expr(op.args[0]) - return "OP_STACK_MALLOC(%s, %s, void *);" % (esize, eresult) - def OP_DIRECT_FIELDPTR(self, op): return self.OP_GETFIELD(op, ampersand='&') diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -8,11 +8,14 @@ #define OP_STACK_CURRENT(r) r = (Signed)&r -#define OP_RAW_MALLOC(size, r, restype) { \ - r = (restype) malloc(size); \ - if (r != NULL) { \ - COUNT_MALLOC; \ - } \ +#define OP_RAW_MALLOC(size, zero, result) { \ + if (zero) \ + result = calloc(size, 1); \ + else \ + result = malloc(size); \ + if (result != NULL) { \ + COUNT_MALLOC; \ + } \ } #define OP_RAW_FREE(p, r) free(p); COUNT_FREE; @@ -26,10 +29,6 @@ #define alloca _alloca #endif -#define OP_STACK_MALLOC(size,r,restype) \ - r = (restype) alloca(size); \ - if (r != NULL) memset((void*) r, 0, size); - #define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size); #define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size); diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -32,7 +32,29 @@ assert res == 42 res = fc(1) assert res == 1 - + +def test_memory_access_zero(): + def f(): + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=False) + addr.signed[1] = 10000 + i + blocks.append(addr) + for addr in blocks: + raw_free(addr) + result = 0 + blocks = [] + for i in range(1000): + addr = raw_malloc(16, zero=True) + result |= addr.signed[1] + blocks.append(addr) + for addr in blocks: + raw_free(addr) + return result + fc = compile(f, []) + res = fc() + assert res == 0 + def test_memory_float(): S = lltype.GcStruct("S", ("x", lltype.Float), ("y", lltype.Float)) offset = FieldOffset(S, 'x') @@ -155,18 +177,6 @@ fn = compile(f, [int]) assert fn(1) == 2 -def test_flavored_malloc_stack(): - class A(object): - _alloc_flavor_ = "stack" - def __init__(self, val): - self.val = val - def f(x): - a = A(x + 1) - result = a.val - return result - fn = compile(f, [int]) - assert fn(1) == 2 - def test_gcref(): if sys.platform == 'darwin': py.test.skip("'boehm' may crash") From pypy.commits at gmail.com Wed Dec 14 11:46:41 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 14 Dec 2016 08:46:41 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: a few tweaks Message-ID: <58517771.85452e0a.7242c.e0ba@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89059:83148ff60b21 Date: 2016-12-14 17:37 +0100 http://bitbucket.org/pypy/pypy/changeset/83148ff60b21/ Log: a few tweaks diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -388,7 +388,7 @@ if space.isinstance_w(w_s, space.w_unicode): raise oefmt(space.w_TypeError, "Expected utf8-encoded str, got unicode") - s = space.str_w(w_s) + s = space.bytes_w(w_s) decoder = JSONDecoder(space, s) try: w_res = decoder.decode_any(0) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -314,7 +314,7 @@ return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) + return rffi.cast(rffi.CHAR, space.text_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -181,10 +181,10 @@ def convert_argument(self, space, w_obj, address, call_local): w_tc = space.findattr(w_obj, space.newtext('typecode')) - if w_tc is not None and space.str_w(w_tc) != self.typecode: + if w_tc is not None and space.text_w(w_tc) != self.typecode: raise oefmt(space.w_TypeError, "expected %s pointer type, but received %s", - self.typecode, space.str_w(w_tc)) + self.typecode, space.text_w(w_tc)) x = rffi.cast(rffi.VOIDPP, address) try: x[0] = rffi.cast(rffi.VOIDP, get_rawbuffer(space, w_obj)) @@ -362,7 +362,7 @@ class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) - arg = space.str_w(w_obj) + arg = space.text_w(w_obj) x[0] = rffi.cast(rffi.LONG, rffi.str2charp(arg)) ba = rffi.cast(rffi.CCHARP, address) ba[capi.c_function_arg_typeoffset(space)] = 'o' @@ -555,7 +555,7 @@ arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.c_stdstring2stdstring(space, arg) else: - return capi.c_charp2stdstring(space, space.str_w(w_obj)) + return capi.c_charp2stdstring(space, space.text_w(w_obj)) def to_memory(self, space, w_obj, w_value, offset): try: @@ -838,7 +838,7 @@ arg = InstanceConverter._unwrap_object(self, space, w_obj) return capi.backend.c_TString2TString(space, arg) else: - return capi.backend.c_charp2TString(space, space.str_w(w_obj)) + return capi.backend.c_charp2TString(space, space.text_w(w_obj)) def free_argument(self, space, arg, call_local): capi.c_destruct(space, self.cppclass, rffi.cast(capi.C_OBJECT, rffi.cast(rffi.VOIDPP, arg)[0])) diff --git a/pypy/module/cppyy/ffitypes.py b/pypy/module/cppyy/ffitypes.py --- a/pypy/module/cppyy/ffitypes.py +++ b/pypy/module/cppyy/ffitypes.py @@ -48,7 +48,7 @@ value = rffi.cast(rffi.CHAR, space.c_int_w(w_value)) else: - value = space.str_w(w_value) + value = space.bytes_w(w_value) if len(value) != 1: raise oefmt(space.w_ValueError, diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -429,9 +429,9 @@ assert lltype.typeOf(cppthis) == capi.C_OBJECT for i in range(len(args_w)): try: - s = self.space.str_w(args_w[i]) + s = self.space.text_w(args_w[i]) except OperationError: - s = self.space.str_w(self.space.getattr(args_w[i], self.space.newtext('__name__'))) + s = self.space.text_w(self.space.getattr(args_w[i], self.space.newtext('__name__'))) s = capi.c_resolve_name(self.space, s) if s != self.templ_args[i]: raise oefmt(self.space.w_TypeError, @@ -1009,7 +1009,7 @@ @unwrap_spec(args_w='args_w') def __call__(self, args_w): # TODO: this is broken but unused (see pythonify.py) - fullname = "".join([self.name, '<', self.space.str_w(args_w[0]), '>']) + fullname = "".join([self.name, '<', self.space.text_w(args_w[0]), '>']) return scope_byname(self.space, fullname) W_CPPTemplateType.typedef = TypeDef( @@ -1286,9 +1286,9 @@ rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.newtext("_cpp_proxy")) if not w_cppclass: - w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) + w_cppclass = scope_byname(space, space.text_w(w_pycppclass)) if not w_cppclass: raise oefmt(space.w_TypeError, - "no such class: %s", space.str_w(w_pycppclass)) + "no such class: %s", space.text_w(w_pycppclass)) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -580,18 +580,16 @@ len_result = len(result) result_w = [None] * len_result for i in range(len_result): - if type(result[i]) is unicode: - result_w[i] = space.newunicode(result[i]) - else: - w_bytes = space.newtext(result[i]) - try: - result_w[i] = space.call_method(w_bytes, - "decode", w_fs_encoding) - except OperationError as e: - # fall back to the original byte string - if e.async(space): - raise - result_w[i] = w_bytes + res = result[i] + w_bytes = space.newtext(res) + try: + result_w[i] = space.call_method(w_bytes, + "decode", w_fs_encoding) + except OperationError as e: + # fall back to the original byte string + if e.async(space): + raise + result_w[i] = w_bytes return space.newlist(result_w) else: dirname = space.str0_w(w_dirname) From pypy.commits at gmail.com Wed Dec 14 11:46:43 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 14 Dec 2016 08:46:43 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: merge default Message-ID: <58517773.17052e0a.7f223.d370@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89060:8e5b0b6e8166 Date: 2016-12-14 17:42 +0100 http://bitbucket.org/pypy/pypy/changeset/8e5b0b6e8166/ Log: merge default diff too long, truncating to 2000 out of 11227 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,135 +1,36 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++. +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a backend, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html - Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build) and Python2.7 (for LLVM). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ pypy-c -m pip install PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ - -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling. Basic bindings example diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -689,10 +689,10 @@ return space.newtuple([builtin_code, space.newtuple([space.newtext(self.identifier)])]) - def find(space, indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function return Function.find(space, identifier).code - find = staticmethod(find) def signature(self): return self.sig diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath, translator_c_dir] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.newtext('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.newlong(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.newint(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.newtext('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.newint(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.newlong(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.newlong(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.newtext('_'+attr), cdata) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.newtext("*"), space.newint(0)) - - def iter_w(self): - return self - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = interp2app(f).spacebind(space) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.newtext(m1), - space.getattr(w_pycppclass, space.newtext(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.newtext("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.newtext("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.newtext("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.newtext("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), 'is_enum' : ([c_ccharp], c_int), # type/class reflection information @@ -216,8 +249,14 @@ 'strtoull' : ([c_ccharp], c_ullong), 'free' : ([c_voidp], c_void), - 'charp2stdstring' : ([c_ccharp], c_object), + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + } # size/offset are backend-specific but fixed after load @@ -277,87 +316,99 @@ ptr = w_cdata.unsafe_escaping_ptr() return rffi.cast(rffi.VOIDP, ptr) +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + def c_load_dictionary(name): return libffi.CDLL(name) # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] + args = [_ArgH(cppscope.handle), _ArgL(iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): - return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) -def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + args = [_ArgH(cppclass.handle), _ArgH(cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_ArgH(cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'deallocate', [_ArgH(cppclass.handle), _ArgH(cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'destruct', [_ArgH(cppclass.handle), _ArgH(cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.CHAR, space.text_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return call_capi(space, 'call_s', args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'call_s', + [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), + _ArgP(rffi.cast(rffi.VOIDP, length))]) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return _cdata_to_ccharp(space, w_cstr), cstr_len def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), _ArgH(cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) -def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHPTRGETTER_PTR, - _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) +def c_get_function_address(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return rffi.cast(C_FUNC_PTR, + _cdata_to_ptr(space, call_capi(space, 'get_function_address', args))) # handling of function argument buffer --------------------------------------- def c_allocate_function_args(space, size): - return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) + return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_ArgL(size)])) def c_deallocate_function_args(space, cargs): - call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) + call_capi(space, 'deallocate_function_args', [_ArgP(cargs)]) def c_function_arg_sizeof(space): state = space.fromcache(State) return state.c_sizeof_farg @@ -367,30 +418,34 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_ArgH(scope)])) +def c_is_template(space, name): + return space.bool_w(call_capi(space, 'is_template', [_ArgS(name)])) +def c_is_abstract(space, cpptype): + return space.bool_w(call_capi(space, 'is_abstract', [_ArgH(cpptype)])) def c_is_enum(space, name): - return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) + return space.bool_w(call_capi(space, 'is_enum', [_ArgS(name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_ArgH(cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_ArgH(cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_ArgH(handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_ArgH(cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] + args = [_ArgH(cppclass.handle), _ArgL(base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_ArgH(derived.handle), _ArgH(base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + args = [_ArgH(derived_h), _ArgH(base_h), _ArgH(address), _ArgL(direction)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -401,13 +456,13 @@ # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(h=cppscope.handle)] + args = [_ArgH(cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] + args = [_ArgH(cppscope.handle), _ArgL(imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -423,91 +478,91 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(h=cppscope.handle) - arg2 = _Arg(l=index) + arg1 = _ArgH(cppscope.handle) + arg2 = _ArgL(index) args = [c_resolve_name(space, charp2str_free(space, - call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) + call_capi(space, 'method_template_arg_name', [arg1, arg2, _ArgL(iarg)])) ) for iarg in range(nargs)] return args def c_get_method(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] + args = [_ArgH(nss.handle), _ArgH(lc.handle), _ArgH(rc.handle), _ArgS(op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_ArgH(cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- def c_strtoll(space, svalue): - return space.r_longlong_w(call_capi(space, 'strtoll', [_Arg(s=svalue)])) + return space.r_longlong_w(call_capi(space, 'strtoll', [_ArgS(svalue)])) def c_strtoull(space, svalue): - return space.r_ulonglong_w(call_capi(space, 'strtoull', [_Arg(s=svalue)])) + return space.r_ulonglong_w(call_capi(space, 'strtoull', [_ArgS(svalue)])) def c_free(space, voidp): - call_capi(space, 'free', [_Arg(vp=voidp)]) + call_capi(space, 'free', [_ArgP(voidp)]) def charp2str_free(space, cdata): charp = rffi.cast(rffi.CCHARP, _cdata_to_ptr(space, cdata)) @@ -515,15 +570,60 @@ c_free(space, rffi.cast(rffi.VOIDP, charp)) return pystr -def c_charp2stdstring(space, svalue): - return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) +def c_charp2stdstring(space, svalue, sz): + return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', + [_ArgS(svalue), _ArgH(rffi.cast(rffi.ULONG, sz))])) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'stdstring2charp', + [_ArgH(cppstr), _ArgP(rffi.cast(rffi.VOIDP, sz))]) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(_cdata_to_ccharp(space, w_cstr), cstr_len) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_ArgH(cppobject)])) -# loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) + +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) +def c_stdvector_valuesize(space, pystr): + return _cdata_to_size_t(space, call_capi(space, 'stdvector_valuesize', [_ArgS(pystr)])) + + +# TODO: factor these out ... +# pythonizations +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") diff --git a/pypy/module/cppyy/capi/reflex_capi.py b/pypy/module/cppyy/capi/reflex_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/reflex_capi.py +++ /dev/null @@ -1,63 +0,0 @@ -import py, os - -from rpython.rlib import libffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -# require local translator path to pickup common defs -from rpython.translator import cdir -translator_c_dir = py.path.local(cdir) - -import commands -(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") - -if os.environ.get("ROOTSYS"): - if config_stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - if config_stat == 0: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() - else: - rootincpath = [] - rootlibpath = [] - -def identify(): - return 'Reflex' - -ts_reflect = False -ts_call = 'auto' -ts_memory = 'auto' -ts_helper = 'auto' - -std_string_name = 'std::basic_string' - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("reflexcwrapper.cxx")], - include_dirs=[incpath, translator_c_dir] + rootincpath, - includes=["reflexcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Reflex"], - use_cpp_linker=True, -) - -def c_load_dictionary(name): - return libffi.CDLL(name) - - -# Reflex-specific pythonizations -def register_pythonizations(space): - "NOT_RPYTHON" - pass - -def pythonize(space, name, w_pycppclass): - pass diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -3,8 +3,8 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import r_singlefloat -from rpython.rlib import jit_libffi, rfloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib import rfloat from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -81,11 +81,11 @@ class TypeConverter(object): - _immutable_fields_ = ['libffitype', 'uses_local', 'name'] + _immutable_fields_ = ['cffi_name', 'uses_local', 'name'] - libffitype = lltype.nullptr(jit_libffi.FFI_TYPE_P.TO) + cffi_name = None uses_local = False - name = "" + name = "" def __init__(self, space, extra): pass @@ -103,6 +103,10 @@ raise oefmt(space.w_TypeError, "no converter available for '%s'", self.name) + def cffi_type(self, space): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -143,9 +147,7 @@ class ArrayTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): if array_size <= 0: @@ -153,6 +155,10 @@ else: self.size = array_size + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def from_memory(self, space, w_obj, w_pycppclass, offset): # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) @@ -172,13 +178,15 @@ class PtrTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): self.size = sys.maxint + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): w_tc = space.findattr(w_obj, space.newtext('typecode')) if w_tc is not None and space.text_w(w_tc) != self.typecode: @@ -241,6 +249,10 @@ uses_local = True + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument_libffi(self, space, w_obj, address, call_local): assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP) # see interp_cppyy.py obj = self._unwrap_object(space, w_obj) @@ -255,6 +267,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = self.typecode class FloatTypeConverterMixin(NumericTypeConverterMixin): _mixin_ = True @@ -267,13 +281,15 @@ class VoidConverter(TypeConverter): - _immutable_fields_ = ['libffitype', 'name'] - - libffitype = jit_libffi.types.void + _immutable_fields_ = ['name'] def __init__(self, space, name): self.name = name + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_void + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -282,6 +298,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) @@ -305,6 +323,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.CCHARP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) @@ -331,13 +351,15 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) rffiptr = rffi.cast(self.c_ptrtype, address) - return space.newfloat(float(rffiptr[0])) + return self._wrap_object(space, rffiptr[0]) class ConstFloatRefConverter(FloatConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'f' - libffitype = jit_libffi.types.pointer - typecode = 'F' + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -353,11 +375,22 @@ self.default = rffi.cast(self.c_type, 0.) class ConstDoubleRefConverter(ConstRefNumericTypeConverterMixin, DoubleConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'd' - libffitype = jit_libffi.types.pointer - typecode = 'D' +class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default'] + def __init__(self, space, default): + if default: + fval = float(rfloat.rstring_to_float(default)) + else: + fval = float(0.) + self.default = r_longfloat(fval) + +class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['typecode'] + typecode = 'g' class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): @@ -377,10 +410,6 @@ class VoidPtrConverter(TypeConverter): - _immutable_fields_ = ['libffitype'] - - libffitype = jit_libffi.types.pointer - def _unwrap_object(self, space, w_obj): try: obj = get_rawbuffer(space, w_obj) @@ -393,6 +422,10 @@ obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) @@ -422,9 +455,10 @@ address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) class VoidPtrPtrConverter(TypeConverter): - _immutable_fields_ = ['uses_local'] + _immutable_fields_ = ['uses_local', 'typecode'] uses_local = True + typecode = 'a' def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) @@ -435,7 +469,7 @@ except TypeError: r[0] = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) x[0] = rffi.cast(rffi.VOIDP, call_local) - ba[capi.c_function_arg_typeoffset(space)] = 'a' + ba[capi.c_function_arg_typeoffset(space)] = self.typecode def finalize_call(self, space, w_obj, call_local): r = rffi.cast(rffi.VOIDPP, call_local) @@ -445,13 +479,13 @@ pass # no set on buffer/array/None class VoidPtrRefConverter(VoidPtrPtrConverter): - _immutable_fields_ = ['uses_local'] + _immutable_fields_ = ['uses_local', 'typecode'] uses_local = True + typecode = 'V' class InstanceRefConverter(TypeConverter): - _immutable_fields_ = ['libffitype', 'cppclass'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['typecode', 'cppclass'] + typecode = 'V' def __init__(self, space, cppclass): from pypy.module.cppyy.interp_cppyy import W_CPPClass @@ -469,12 +503,16 @@ raise oefmt(space.w_TypeError, "cannot pass %T as %s", w_obj, self.cppclass.name) + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) x[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_obj)) address = rffi.cast(capi.C_OBJECT, address) ba = rffi.cast(rffi.CCHARP, address) - ba[capi.c_function_arg_typeoffset(space)] = 'o' + ba[capi.c_function_arg_typeoffset(space)] = self.typecode def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) @@ -496,6 +534,7 @@ class InstancePtrConverter(InstanceRefConverter): + typecode = 'o' def _unwrap_object(self, space, w_obj): try: @@ -509,8 +548,7 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) from pypy.module.cppyy import interp_cppyy - return interp_cppyy.wrap_cppobject(space, address, self.cppclass, - do_cast=False, is_ref=True) + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, do_cast=False) def to_memory(self, space, w_obj, w_value, offset): address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) @@ -541,6 +579,11 @@ r = rffi.cast(rffi.VOIDPP, call_local) w_obj._rawobject = rffi.cast(capi.C_OBJECT, r[0]) + def from_memory(self, space, w_obj, w_pycppclass, offset): + address = rffi.cast(capi.C_OBJECT, self._get_raw_address(space, w_obj, offset)) + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.wrap_cppobject(space, address, self.cppclass, + do_cast=False, is_ref=True) class StdStringConverter(InstanceConverter): From pypy.commits at gmail.com Wed Dec 14 12:26:54 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 14 Dec 2016 09:26:54 -0800 (PST) Subject: [pypy-commit] pypy default: Simplify the unwrapper_raise/unwrapper_catch mess a little Message-ID: <585180de.46052e0a.af613.d9a5@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89061:c36941f291c3 Date: 2016-12-14 17:26 +0000 http://bitbucket.org/pypy/pypy/changeset/c36941f291c3/ Log: Simplify the unwrapper_raise/unwrapper_catch mess a little diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -379,103 +379,97 @@ if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) - def make_unwrapper(catch_exception): - # ZZZ is this whole logic really needed??? It seems to be only - # for RPython code calling PyXxx() functions directly. I would - # think that usually directly calling the function is clean - # enough now - names = api_function.argnames - types_names_enum_ui = unrolling_iterable(enumerate( - zip(api_function.argtypes, - [tp_name.startswith("w_") for tp_name in names]))) + names = api_function.argnames + types_names_enum_ui = unrolling_iterable(enumerate( + zip(api_function.argtypes, + [tp_name.startswith("w_") for tp_name in names]))) - @specialize.ll() - def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj - from pypy.module.cpyext.pyobject import from_ref, as_pyobj - newargs = () - keepalives = () - assert len(args) == len(api_function.argtypes) - for i, (ARG, is_wrapped) in types_names_enum_ui: - input_arg = args[i] - if is_PyObject(ARG) and not is_wrapped: - # build a 'PyObject *' (not holding a reference) - if not is_pyobj(input_arg): - keepalives += (input_arg,) - arg = rffi.cast(ARG, as_pyobj(space, input_arg)) - else: - arg = rffi.cast(ARG, input_arg) - elif ARG == rffi.VOIDP and not is_wrapped: - # unlike is_PyObject case above, we allow any kind of - # argument -- just, if it's an object, we assume the - # caller meant for it to become a PyObject*. - if input_arg is None or isinstance(input_arg, W_Root): - keepalives += (input_arg,) - arg = rffi.cast(ARG, as_pyobj(space, input_arg)) - else: - arg = rffi.cast(ARG, input_arg) - elif (is_PyObject(ARG) or ARG == rffi.VOIDP) and is_wrapped: - # build a W_Root, possibly from a 'PyObject *' - if is_pyobj(input_arg): - arg = from_ref(space, input_arg) - else: - arg = input_arg + @specialize.ll() + def unwrapper(space, *args): + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj + from pypy.module.cpyext.pyobject import from_ref, as_pyobj + newargs = () + keepalives = () + assert len(args) == len(api_function.argtypes) + for i, (ARG, is_wrapped) in types_names_enum_ui: + input_arg = args[i] + if is_PyObject(ARG) and not is_wrapped: + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif ARG == rffi.VOIDP and not is_wrapped: + # unlike is_PyObject case above, we allow any kind of + # argument -- just, if it's an object, we assume the + # caller meant for it to become a PyObject*. + if input_arg is None or isinstance(input_arg, W_Root): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif (is_PyObject(ARG) or ARG == rffi.VOIDP) and is_wrapped: + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_ref(space, input_arg) + else: + arg = input_arg - ## ZZZ: for is_pyobj: - ## try: - ## arg = from_ref(space, - ## rffi.cast(PyObject, input_arg)) - ## except TypeError, e: - ## err = oefmt(space.w_TypeError, - ## "could not cast arg to PyObject") - ## if not catch_exception: - ## raise err - ## state = space.fromcache(State) - ## state.set_exception(err) - ## if is_PyObject(restype): - ## return None - ## else: - ## return api_function.error_value - else: - # arg is not declared as PyObject, no magic - arg = input_arg - newargs += (arg, ) - if not catch_exception: - try: - res = func(space, *newargs) - finally: - keepalive_until_here(*keepalives) + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = oefmt(space.w_TypeError, + ## "could not cast arg to PyObject") + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # non-rpython variant - assert not we_are_translated() - try: - res = func(space, *newargs) - except OperationError as e: - if not hasattr(api_function, "error_value"): - raise - state = space.fromcache(State) - state.set_exception(e) - if is_PyObject(restype): - return None - else: - return api_function.error_value - # 'keepalives' is alive here (it's not rpython) - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer, ( - 'got %r not integer' % (res,)) - return res - unwrapper.func = func - unwrapper.api_func = api_function - return unwrapper + # arg is not declared as PyObject, no magic + arg = input_arg + newargs += (arg, ) + try: + return func(space, *newargs) + finally: + keepalive_until_here(*keepalives) - unwrapper_catch = make_unwrapper(True) - unwrapper_raise = make_unwrapper(False) + unwrapper.func = func + unwrapper.api_func = api_function + + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now + def unwrapper_catch(space, *args): + try: + res = unwrapper(space, *args) + except OperationError as e: + if not hasattr(api_function, "error_value"): + raise + state = space.fromcache(State) + state.set_exception(e) + if is_PyObject(restype): + return None + else: + return api_function.error_value + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res + if header is not None: if header == DEFAULT_HEADER: FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function - INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests - return unwrapper_raise # used in 'normal' RPython code. + INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests + return unwrapper # used in 'normal' RPython code. return decorate def cpython_struct(name, fields, forward=None, level=1): From pypy.commits at gmail.com Wed Dec 14 12:44:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 14 Dec 2016 09:44:24 -0800 (PST) Subject: [pypy-commit] pypy default: Don't register things manually with INTERPLEVEL_API, since they're never called as api.XXX Message-ID: <585184f8.8dcd190a.d236e.de4d@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89062:faa5d87bf2a6 Date: 2016-12-14 17:43 +0000 http://bitbucket.org/pypy/pypy/changeset/faa5d87bf2a6/ Log: Don't register things manually with INTERPLEVEL_API, since they're never called as api.XXX diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,7 +7,7 @@ from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject, - INTERPLEVEL_API, PyVarObject) + PyVarObject) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject @@ -245,12 +245,10 @@ else: return lltype.nullptr(PyObject.TO) as_pyobj._always_inline_ = 'try' -INTERPLEVEL_API['as_pyobj'] = as_pyobj def pyobj_has_w_obj(pyobj): w_obj = rawrefcount.to_obj(W_Root, pyobj) return w_obj is not None and w_obj is not w_marker_deallocating -INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) def is_pyobj(x): @@ -260,7 +258,6 @@ return True else: raise TypeError(repr(type(x))) -INTERPLEVEL_API['is_pyobj'] = staticmethod(is_pyobj) class Entry(ExtRegistryEntry): _about_ = is_pyobj @@ -286,7 +283,6 @@ if not is_pyobj(obj): keepalive_until_here(obj) return pyobj -INTERPLEVEL_API['make_ref'] = make_ref @specialize.ll() @@ -307,13 +303,11 @@ assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY keepalive_until_here(w_obj) return w_obj -INTERPLEVEL_API['get_w_obj_and_decref'] = get_w_obj_and_decref @specialize.ll() def incref(space, obj): make_ref(space, obj) -INTERPLEVEL_API['incref'] = incref @specialize.ll() def decref(space, obj): @@ -326,7 +320,6 @@ _Py_Dealloc(space, obj) else: get_w_obj_and_decref(space, obj) -INTERPLEVEL_API['decref'] = decref @cpython_api([PyObject], lltype.Void) From pypy.commits at gmail.com Wed Dec 14 14:22:20 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 14 Dec 2016 11:22:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <58519bec.a013190a.9a9d0.b0bc@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89063:8454bbf43352 Date: 2016-12-14 18:13 +0000 http://bitbucket.org/pypy/pypy/changeset/8454bbf43352/ Log: hg merge default diff too long, truncating to 2000 out of 11892 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -78,3 +78,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,135 +1,36 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++. +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a backend, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html - Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build) and Python2.7 (for LLVM). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ pypy-c -m pip install PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ - -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling. Basic bindings example diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -426,6 +426,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -257,16 +257,15 @@ def descr_function_repr(self): return self.getrepr(self.space, u'function %s' % self.qualname) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -274,10 +273,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -724,10 +724,10 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code - find = staticmethod(find) + return Function.find(space, identifier).code def signature(self): return self.sig diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -73,7 +73,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -82,7 +82,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.wrap('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.wrap(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.wrap(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.wrap('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.wrap('_'+attr), space.wrap(cdata)) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) - - def iter_w(self): - return self.space.wrap(self) - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.wrap("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.wrap("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), 'is_enum' : ([c_ccharp], c_int), # type/class reflection information @@ -216,8 +249,14 @@ 'strtoull' : ([c_ccharp], c_ullong), 'free' : ([c_voidp], c_void), - 'charp2stdstring' : ([c_ccharp], c_object), + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + } # size/offset are backend-specific but fixed after load @@ -277,87 +316,99 @@ ptr = w_cdata.unsafe_escaping_ptr() return rffi.cast(rffi.VOIDP, ptr) +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + def c_load_dictionary(name): return libffi.CDLL(name) # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] + args = [_ArgH(cppscope.handle), _ArgL(iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): - return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) -def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + args = [_ArgH(cppclass.handle), _ArgH(cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_ArgH(cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'deallocate', [_ArgH(cppclass.handle), _ArgH(cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'destruct', [_ArgH(cppclass.handle), _ArgH(cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return call_capi(space, 'call_s', args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'call_s', + [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), + _ArgP(rffi.cast(rffi.VOIDP, length))]) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return _cdata_to_ccharp(space, w_cstr), cstr_len def c_constructor(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_cobject(space, call_capi(space, 'constructor', args)) def c_call_o(space, cppmethod, cppobject, nargs, cargs, cppclass): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs), _Arg(h=cppclass.handle)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs), _ArgH(cppclass.handle)] return _cdata_to_cobject(space, call_capi(space, 'call_o', args)) -def c_get_methptr_getter(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] - return rffi.cast(C_METHPTRGETTER_PTR, - _cdata_to_ptr(space, call_capi(space, 'get_methptr_getter', args))) +def c_get_function_address(space, cppscope, index): + args = [_ArgH(cppscope.handle), _ArgL(index)] + return rffi.cast(C_FUNC_PTR, + _cdata_to_ptr(space, call_capi(space, 'get_function_address', args))) # handling of function argument buffer --------------------------------------- def c_allocate_function_args(space, size): - return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_Arg(l=size)])) + return _cdata_to_ptr(space, call_capi(space, 'allocate_function_args', [_ArgL(size)])) def c_deallocate_function_args(space, cargs): - call_capi(space, 'deallocate_function_args', [_Arg(vp=cargs)]) + call_capi(space, 'deallocate_function_args', [_ArgP(cargs)]) def c_function_arg_sizeof(space): state = space.fromcache(State) return state.c_sizeof_farg @@ -367,30 +418,34 @@ # scope reflection information ----------------------------------------------- def c_is_namespace(space, scope): - return space.bool_w(call_capi(space, 'is_namespace', [_Arg(h=scope)])) + return space.bool_w(call_capi(space, 'is_namespace', [_ArgH(scope)])) +def c_is_template(space, name): + return space.bool_w(call_capi(space, 'is_template', [_ArgS(name)])) +def c_is_abstract(space, cpptype): + return space.bool_w(call_capi(space, 'is_abstract', [_ArgH(cpptype)])) def c_is_enum(space, name): - return space.bool_w(call_capi(space, 'is_enum', [_Arg(s=name)])) + return space.bool_w(call_capi(space, 'is_enum', [_ArgS(name)])) # type/class reflection information ------------------------------------------ def c_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'final_name', [_ArgH(cpptype)])) def c_scoped_final_name(space, cpptype): - return charp2str_free(space, call_capi(space, 'scoped_final_name', [_Arg(h=cpptype)])) + return charp2str_free(space, call_capi(space, 'scoped_final_name', [_ArgH(cpptype)])) def c_has_complex_hierarchy(space, handle): - return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_Arg(h=handle)])) + return space.bool_w(call_capi(space, 'has_complex_hierarchy', [_ArgH(handle)])) def c_num_bases(space, cppclass): - return space.int_w(call_capi(space, 'num_bases', [_Arg(h=cppclass.handle)])) + return space.int_w(call_capi(space, 'num_bases', [_ArgH(cppclass.handle)])) def c_base_name(space, cppclass, base_index): - args = [_Arg(h=cppclass.handle), _Arg(l=base_index)] + args = [_ArgH(cppclass.handle), _ArgL(base_index)] return charp2str_free(space, call_capi(space, 'base_name', args)) def c_is_subtype(space, derived, base): jit.promote(base) if derived == base: return bool(1) - return space.bool_w(call_capi(space, 'is_subtype', [_Arg(h=derived.handle), _Arg(h=base.handle)])) + return space.bool_w(call_capi(space, 'is_subtype', [_ArgH(derived.handle), _ArgH(base.handle)])) def _c_base_offset(space, derived_h, base_h, address, direction): - args = [_Arg(h=derived_h), _Arg(h=base_h), _Arg(h=address), _Arg(l=direction)] + args = [_ArgH(derived_h), _ArgH(base_h), _ArgH(address), _ArgL(direction)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'base_offset', args)) def c_base_offset(space, derived, base, address, direction): if derived == base: @@ -401,13 +456,13 @@ # method/function reflection information ------------------------------------- def c_num_methods(space, cppscope): - args = [_Arg(h=cppscope.handle)] + args = [_ArgH(cppscope.handle)] return space.int_w(call_capi(space, 'num_methods', args)) def c_method_index_at(space, cppscope, imethod): - args = [_Arg(h=cppscope.handle), _Arg(l=imethod)] + args = [_ArgH(cppscope.handle), _ArgL(imethod)] return space.int_w(call_capi(space, 'method_index_at', args)) def c_method_indices_from_name(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] indices = rffi.cast(C_INDEX_ARRAY, _cdata_to_ptr(space, call_capi(space, 'method_indices_from_name', args))) if not indices: @@ -423,91 +478,91 @@ return py_indices def c_method_name(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_name', args)) def c_method_result_type(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_result_type', args)) def c_method_num_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_args', args)) def c_method_req_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_req_args', args)) def c_method_arg_type(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_type', args)) def c_method_arg_default(space, cppscope, index, arg_index): - args = [_Arg(h=cppscope.handle), _Arg(l=index), _Arg(l=arg_index)] + args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)] return charp2str_free(space, call_capi(space, 'method_arg_default', args)) def c_method_signature(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return charp2str_free(space, call_capi(space, 'method_signature', args)) def c_method_is_template(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'method_is_template', args)) def _c_method_num_template_args(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return space.int_w(call_capi(space, 'method_num_template_args', args)) def c_template_args(space, cppscope, index): nargs = _c_method_num_template_args(space, cppscope, index) - arg1 = _Arg(h=cppscope.handle) - arg2 = _Arg(l=index) + arg1 = _ArgH(cppscope.handle) + arg2 = _ArgL(index) args = [c_resolve_name(space, charp2str_free(space, - call_capi(space, 'method_template_arg_name', [arg1, arg2, _Arg(l=iarg)])) + call_capi(space, 'method_template_arg_name', [arg1, arg2, _ArgL(iarg)])) ) for iarg in range(nargs)] return args def c_get_method(space, cppscope, index): - args = [_Arg(h=cppscope.handle), _Arg(l=index)] + args = [_ArgH(cppscope.handle), _ArgL(index)] return rffi.cast(C_METHOD, space.uint_w(call_capi(space, 'get_method', args))) def c_get_global_operator(space, nss, lc, rc, op): if nss is not None: - args = [_Arg(h=nss.handle), _Arg(h=lc.handle), _Arg(h=rc.handle), _Arg(s=op)] + args = [_ArgH(nss.handle), _ArgH(lc.handle), _ArgH(rc.handle), _ArgS(op)] return rffi.cast(WLAVC_INDEX, space.int_w(call_capi(space, 'get_global_operator', args))) return rffi.cast(WLAVC_INDEX, -1) # method properties ---------------------------------------------------------- def c_is_constructor(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_constructor', args)) def c_is_staticmethod(space, cppclass, index): - args = [_Arg(h=cppclass.handle), _Arg(l=index)] + args = [_ArgH(cppclass.handle), _ArgL(index)] return space.bool_w(call_capi(space, 'is_staticmethod', args)) # data member reflection information ----------------------------------------- def c_num_datamembers(space, cppscope): - return space.int_w(call_capi(space, 'num_datamembers', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_datamembers', [_ArgH(cppscope.handle)])) def c_datamember_name(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_name', args)) def c_datamember_type(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return charp2str_free(space, call_capi(space, 'datamember_type', args)) def c_datamember_offset(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return _cdata_to_ptrdiff_t(space, call_capi(space, 'datamember_offset', args)) def c_datamember_index(space, cppscope, name): - args = [_Arg(h=cppscope.handle), _Arg(s=name)] + args = [_ArgH(cppscope.handle), _ArgS(name)] return space.int_w(call_capi(space, 'datamember_index', args)) # data member properties ----------------------------------------------------- def c_is_publicdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_publicdata', args)) def c_is_staticdata(space, cppscope, datamember_index): - args = [_Arg(h=cppscope.handle), _Arg(l=datamember_index)] + args = [_ArgH(cppscope.handle), _ArgL(datamember_index)] return space.bool_w(call_capi(space, 'is_staticdata', args)) # misc helpers --------------------------------------------------------------- def c_strtoll(space, svalue): - return space.r_longlong_w(call_capi(space, 'strtoll', [_Arg(s=svalue)])) + return space.r_longlong_w(call_capi(space, 'strtoll', [_ArgS(svalue)])) def c_strtoull(space, svalue): - return space.r_ulonglong_w(call_capi(space, 'strtoull', [_Arg(s=svalue)])) + return space.r_ulonglong_w(call_capi(space, 'strtoull', [_ArgS(svalue)])) def c_free(space, voidp): - call_capi(space, 'free', [_Arg(vp=voidp)]) + call_capi(space, 'free', [_ArgP(voidp)]) def charp2str_free(space, cdata): charp = rffi.cast(rffi.CCHARP, _cdata_to_ptr(space, cdata)) @@ -515,15 +570,60 @@ c_free(space, rffi.cast(rffi.VOIDP, charp)) return pystr -def c_charp2stdstring(space, svalue): - return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', [_Arg(s=svalue)])) +def c_charp2stdstring(space, svalue, sz): + return _cdata_to_cobject(space, call_capi(space, 'charp2stdstring', + [_ArgS(svalue), _ArgH(rffi.cast(rffi.ULONG, sz))])) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + w_cstr = call_capi(space, 'stdstring2charp', + [_ArgH(cppstr), _ArgP(rffi.cast(rffi.VOIDP, sz))]) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(_cdata_to_ccharp(space, w_cstr), cstr_len) def c_stdstring2stdstring(space, cppobject): - return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_Arg(h=cppobject)])) + return _cdata_to_cobject(space, call_capi(space, 'stdstring2stdstring', [_ArgH(cppobject)])) -# loadable-capi-specific pythonizations (none, as the capi isn't known until runtime) +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) + +def c_stdvector_valuetype(space, pystr): + return charp2str_free(space, call_capi(space, 'stdvector_valuetype', [_ArgS(pystr)])) +def c_stdvector_valuesize(space, pystr): + return _cdata_to_size_t(space, call_capi(space, 'stdvector_valuesize', [_ArgS(pystr)])) + + +# TODO: factor these out ... +# pythonizations +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") diff --git a/pypy/module/cppyy/capi/reflex_capi.py b/pypy/module/cppyy/capi/reflex_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/reflex_capi.py +++ /dev/null @@ -1,59 +0,0 @@ -import py, os - -from rpython.rlib import libffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -import commands -(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") - -if os.environ.get("ROOTSYS"): - if config_stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include")] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - if config_stat == 0: - rootincpath = [incdir] - rootlibpath = commands.getoutput("root-config --libdir").split() - else: - rootincpath = [] - rootlibpath = [] - -def identify(): - return 'Reflex' - -ts_reflect = False -ts_call = 'auto' -ts_memory = 'auto' -ts_helper = 'auto' - -std_string_name = 'std::basic_string' - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("reflexcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["reflexcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Reflex"], - use_cpp_linker=True, -) - -def c_load_dictionary(name): - return libffi.CDLL(name) - - -# Reflex-specific pythonizations -def register_pythonizations(space): - "NOT_RPYTHON" - pass - -def pythonize(space, name, w_pycppclass): - pass diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -3,8 +3,8 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rarithmetic import r_singlefloat -from rpython.rlib import jit_libffi, rfloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib import rfloat from pypy.module._rawffi.interp_rawffi import letter2tp from pypy.module._rawffi.array import W_Array, W_ArrayInstance @@ -81,11 +81,11 @@ class TypeConverter(object): - _immutable_fields_ = ['libffitype', 'uses_local', 'name'] + _immutable_fields_ = ['cffi_name', 'uses_local', 'name'] - libffitype = lltype.nullptr(jit_libffi.FFI_TYPE_P.TO) + cffi_name = None uses_local = False - name = "" + name = "" def __init__(self, space, extra): pass @@ -103,6 +103,10 @@ raise oefmt(space.w_TypeError, "no converter available for '%s'", self.name) + def cffi_type(self, space): + from pypy.module.cppyy.interp_cppyy import FastCallNotPossible + raise FastCallNotPossible + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -143,9 +147,7 @@ class ArrayTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): if array_size <= 0: @@ -153,6 +155,10 @@ else: self.size = array_size + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def from_memory(self, space, w_obj, w_pycppclass, offset): # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) @@ -172,13 +178,15 @@ class PtrTypeConverterMixin(object): _mixin_ = True - _immutable_fields_ = ['libffitype', 'size'] - - libffitype = jit_libffi.types.pointer + _immutable_fields_ = ['size'] def __init__(self, space, array_size): self.size = sys.maxint + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): w_tc = space.findattr(w_obj, space.wrap('typecode')) if w_tc is not None and space.str_w(w_tc) != self.typecode: @@ -241,6 +249,10 @@ uses_local = True + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument_libffi(self, space, w_obj, address, call_local): assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP) # see interp_cppyy.py obj = self._unwrap_object(space, w_obj) @@ -255,6 +267,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = self.typecode class FloatTypeConverterMixin(NumericTypeConverterMixin): _mixin_ = True @@ -267,13 +281,15 @@ class VoidConverter(TypeConverter): - _immutable_fields_ = ['libffitype', 'name'] - - libffitype = jit_libffi.types.void + _immutable_fields_ = ['name'] def __init__(self, space, name): self.name = name + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_void + def convert_argument(self, space, w_obj, address, call_local): self._is_abstract(space) @@ -282,6 +298,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(rffi.LONGP, address) @@ -305,6 +323,8 @@ def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.CCHARP, address) x[0] = self._unwrap_object(space, w_obj) + ba = rffi.cast(rffi.CCHARP, address) + ba[capi.c_function_arg_typeoffset(space)] = 'b' def convert_argument_libffi(self, space, w_obj, address, call_local): x = rffi.cast(self.c_ptrtype, address) @@ -331,13 +351,15 @@ def from_memory(self, space, w_obj, w_pycppclass, offset): address = self._get_raw_address(space, w_obj, offset) rffiptr = rffi.cast(self.c_ptrtype, address) - return space.wrap(float(rffiptr[0])) + return self._wrap_object(space, rffiptr[0]) class ConstFloatRefConverter(FloatConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'f' - libffitype = jit_libffi.types.pointer - typecode = 'F' + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp def convert_argument_libffi(self, space, w_obj, address, call_local): from pypy.module.cppyy.interp_cppyy import FastCallNotPossible @@ -353,11 +375,22 @@ self.default = rffi.cast(self.c_type, 0.) class ConstDoubleRefConverter(ConstRefNumericTypeConverterMixin, DoubleConverter): - _immutable_fields_ = ['libffitype', 'typecode'] + _immutable_fields_ = ['typecode'] + typecode = 'd' - libffitype = jit_libffi.types.pointer - typecode = 'D' +class LongDoubleConverter(ffitypes.typeid(rffi.LONGDOUBLE), FloatTypeConverterMixin, TypeConverter): + _immutable_fields_ = ['default'] + def __init__(self, space, default): + if default: + fval = float(rfloat.rstring_to_float(default)) + else: + fval = float(0.) + self.default = r_longfloat(fval) + +class ConstLongDoubleRefConverter(ConstRefNumericTypeConverterMixin, LongDoubleConverter): + _immutable_fields_ = ['typecode'] + typecode = 'g' class CStringConverter(TypeConverter): def convert_argument(self, space, w_obj, address, call_local): @@ -377,10 +410,6 @@ class VoidPtrConverter(TypeConverter): - _immutable_fields_ = ['libffitype'] - - libffitype = jit_libffi.types.pointer - def _unwrap_object(self, space, w_obj): try: obj = get_rawbuffer(space, w_obj) @@ -393,6 +422,10 @@ obj = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) return obj + def cffi_type(self, space): + state = space.fromcache(ffitypes.State) + return state.c_voidp + def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) @@ -422,9 +455,10 @@ address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) class VoidPtrPtrConverter(TypeConverter): - _immutable_fields_ = ['uses_local'] + _immutable_fields_ = ['uses_local', 'typecode'] uses_local = True + typecode = 'a' def convert_argument(self, space, w_obj, address, call_local): x = rffi.cast(rffi.VOIDPP, address) @@ -435,7 +469,7 @@ except TypeError: r[0] = rffi.cast(rffi.VOIDP, get_rawobject(space, w_obj)) x[0] = rffi.cast(rffi.VOIDP, call_local) - ba[capi.c_function_arg_typeoffset(space)] = 'a' + ba[capi.c_function_arg_typeoffset(space)] = self.typecode def finalize_call(self, space, w_obj, call_local): From pypy.commits at gmail.com Wed Dec 14 15:10:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 14 Dec 2016 12:10:57 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Update typeobjectdefs.py to match object.h Message-ID: <5851a751.50dd190a.4409c.deb9@mx.google.com> Author: Ronan Lamy Branch: py3.5 Changeset: r89064:6821bcbfa71b Date: 2016-12-14 20:10 +0000 http://bitbucket.org/pypy/pypy/changeset/6821bcbfa71b/ Log: Update typeobjectdefs.py to match object.h diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -34,12 +34,8 @@ ternaryfunc = P(FT([PyO, PyO, PyO], PyO)) inquiry = P(FT([PyO], rffi.INT_real)) lenfunc = P(FT([PyO], Py_ssize_t)) -intargfunc = P(FT([PyO, rffi.INT_real], PyO)) -intintargfunc = P(FT([PyO, rffi.INT_real, rffi.INT], PyO)) ssizeargfunc = P(FT([PyO, Py_ssize_t], PyO)) ssizessizeargfunc = P(FT([PyO, Py_ssize_t, Py_ssize_t], PyO)) -intobjargproc = P(FT([PyO, rffi.INT_real, PyO], rffi.INT)) -intintobjargproc = P(FT([PyO, rffi.INT_real, rffi.INT, PyO], rffi.INT)) ssizeobjargproc = P(FT([PyO, Py_ssize_t, PyO], rffi.INT_real)) ssizessizeobjargproc = P(FT([PyO, Py_ssize_t, Py_ssize_t, PyO], rffi.INT_real)) objobjargproc = P(FT([PyO, PyO, PyO], rffi.INT_real)) @@ -70,7 +66,6 @@ ("nb_add", binaryfunc), ("nb_subtract", binaryfunc), ("nb_multiply", binaryfunc), - ("nb_divide", binaryfunc), ("nb_remainder", binaryfunc), ("nb_divmod", binaryfunc), ("nb_power", ternaryfunc), @@ -85,12 +80,11 @@ ("nb_xor", binaryfunc), ("nb_or", binaryfunc), ("nb_int", unaryfunc), - ("nb_long", unaryfunc), + ("nb_reserved", rffi.VOIDP), ("nb_float", unaryfunc), ("nb_inplace_add", binaryfunc), ("nb_inplace_subtract", binaryfunc), ("nb_inplace_multiply", binaryfunc), - ("nb_inplace_divide", binaryfunc), ("nb_inplace_remainder", binaryfunc), ("nb_inplace_power", ternaryfunc), ("nb_inplace_lshift", binaryfunc), @@ -105,6 +99,8 @@ ("nb_inplace_true_divide", binaryfunc), ("nb_index", unaryfunc), + ("nb_matrix_multiply", binaryfunc), + ("nb_inplace_matrix_multiply", binaryfunc), )) PySequenceMethods = cpython_struct("PySequenceMethods", ( @@ -124,6 +120,12 @@ ("mp_ass_subscript", objobjargproc), )) +PyAsyncMethods = cpython_struct("PyAsyncMethods", ( + ("am_await", unaryfunc), + ("am_aiter", unaryfunc), + ("am_anext", unaryfunc), +)) + PyBufferProcs = cpython_struct("PyBufferProcs", ( ("bf_getbuffer", getbufferproc), ("bf_releasebuffer", releasebufferproc), @@ -155,7 +157,7 @@ ("tp_print", printfunc), #U ("tp_getattr", getattrfunc), #U ("tp_setattr", setattrfunc), #U - ("tp_compare", cmpfunc), #N + ("tp_as_async", Ptr(PyAsyncMethods)), #N ("tp_repr", reprfunc), #N # Method suites for standard classes From pypy.commits at gmail.com Thu Dec 15 08:16:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 05:16:04 -0800 (PST) Subject: [pypy-commit] pypy default: PyWeakref_Check*() variants Message-ID: <58529794.6a18190a.f9301.2152@mx.google.com> Author: Armin Rigo Branch: Changeset: r89065:04c5ad075bde Date: 2016-12-15 14:15 +0100 http://bitbucket.org/pypy/pypy/changeset/04c5ad075bde/ Log: PyWeakref_Check*() variants diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py --- a/pypy/module/cpyext/test/test_weakref.py +++ b/pypy/module/cpyext/test/test_weakref.py @@ -56,3 +56,30 @@ ) ]) module.test_macro_cast() + + def test_weakref_check(self): + module = self.import_extension('foo', [ + ("test_weakref_cast", "METH_O", + """ + return Py_BuildValue("iiii", + (int)PyWeakref_Check(args), + (int)PyWeakref_CheckRef(args), + (int)PyWeakref_CheckRefExact(args), + (int)PyWeakref_CheckProxy(args)); + """ + ) + ]) + import weakref + def foo(): pass + class Bar(object): + pass + bar = Bar() + assert module.test_weakref_cast([]) == (0, 0, 0, 0) + assert module.test_weakref_cast(weakref.ref(foo)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.ref(bar)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.proxy(foo)) == (1, 0, 0, 1) + assert module.test_weakref_cast(weakref.proxy(bar)) == (1, 0, 0, 1) + class X(weakref.ref): + pass + assert module.test_weakref_cast(X(foo)) == (1, 1, 0, 0) + assert module.test_weakref_cast(X(bar)) == (1, 1, 0, 0) diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py --- a/pypy/module/cpyext/weakrefobject.py +++ b/pypy/module/cpyext/weakrefobject.py @@ -1,6 +1,7 @@ from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyobject import PyObject, CANNOT_FAIL from pypy.module._weakref.interp__weakref import W_Weakref, proxy +from pypy.module._weakref.interp__weakref import W_Proxy, W_CallableProxy from rpython.rtyper.lltypesystem import rffi @cpython_api([PyObject, PyObject], PyObject) @@ -54,3 +55,34 @@ PyWeakref_GetObject() and Py_INCREF().) """ return space.call_function(w_ref) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRef(space, w_obj): + """Return true if ob is a reference object. + """ + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return (space.is_w(w_obj_type, w_type) or + space.issubtype_w(w_obj_type, w_type)) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRefExact(space, w_obj): + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return space.is_w(w_obj_type, w_type) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckProxy(space, w_obj): + """Return true if ob is a proxy object. + """ + w_obj_type = space.type(w_obj) + w_type1 = space.gettypeobject(W_Proxy.typedef) + w_type2 = space.gettypeobject(W_CallableProxy.typedef) + return space.is_w(w_obj_type, w_type1) or space.is_w(w_obj_type, w_type2) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_Check(space, w_obj): + """Return true if ob is either a reference or proxy object. + """ + return (PyWeakref_CheckRef(space, w_obj) or + PyWeakref_CheckProxy(space, w_obj)) From pypy.commits at gmail.com Thu Dec 15 08:23:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 05:23:38 -0800 (PST) Subject: [pypy-commit] pypy default: Uh, should be part of 04c5ad075bde Message-ID: <5852995a.1a082e0a.4c771.2248@mx.google.com> Author: Armin Rigo Branch: Changeset: r89066:4d3b8f90d4f2 Date: 2016-12-15 14:21 +0100 http://bitbucket.org/pypy/pypy/changeset/4d3b8f90d4f2/ Log: Uh, should be part of 04c5ad075bde diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -2210,21 +2210,3 @@ it causes an exception to immediately be thrown; this is used for the throw() methods of generator objects.""" raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_Check(space, ob): - """Return true if ob is either a reference or proxy object. - """ - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckRef(space, ob): - """Return true if ob is a reference object. - """ - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckProxy(space, ob): - """Return true if ob is a proxy object. - """ - raise NotImplementedError From pypy.commits at gmail.com Thu Dec 15 08:32:10 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 05:32:10 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Skip a check on pypy Message-ID: <58529b5a.879e190a.31dab.229f@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89067:c143364bcd26 Date: 2016-12-15 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/c143364bcd26/ Log: Skip a check on pypy diff --git a/lib-python/3/test/test_generators.py b/lib-python/3/test/test_generators.py --- a/lib-python/3/test/test_generators.py +++ b/lib-python/3/test/test_generators.py @@ -47,7 +47,8 @@ g = gen() next(g) g.send(g) - self.assertGreater(sys.getrefcount(g), 2) + if hasattr(sys, 'getrefcount'): + self.assertGreater(sys.getrefcount(g), 2) self.assertFalse(finalized) del g support.gc_collect() From pypy.commits at gmail.com Thu Dec 15 08:57:17 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 05:57:17 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix test Message-ID: <5852a13d.8a4d2e0a.39835.2352@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89068:3c82fb676089 Date: 2016-12-15 14:33 +0100 http://bitbucket.org/pypy/pypy/changeset/3c82fb676089/ Log: fix test diff --git a/lib-python/3/test/test_extcall.py b/lib-python/3/test/test_extcall.py --- a/lib-python/3/test/test_extcall.py +++ b/lib-python/3/test/test_extcall.py @@ -49,14 +49,14 @@ >>> f(1, 2, 3, **{'a':4, 'b':5}) (1, 2, 3) {'a': 4, 'b': 5} - >>> f(1, 2, **{'a': -1, 'b': 5}, **{'a': 4, 'c': 6}) + >>> f(1, 2, **{'a': -1, 'b': 5}, **{'a': 4, 'c': 6}) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: f() got multiple values for keyword argument 'a' - >>> f(1, 2, **{'a': -1, 'b': 5}, a=4, c=6) + TypeError: ...got multiple values for keyword argument 'a' + >>> f(1, 2, **{'a': -1, 'b': 5}, a=4, c=6) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: f() got multiple values for keyword argument 'a' + TypeError: ...got multiple values for keyword argument 'a' >>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7}) (1, 2, 3, 4, 5) {'a': 6, 'b': 7} >>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9}) @@ -217,7 +217,7 @@ >>> f(**{1:2}) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: ...keywords must be strings + TypeError: ...keywords must be strings... >>> h(**{'e': 2}) Traceback (most recent call last): From pypy.commits at gmail.com Thu Dec 15 08:57:19 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 05:57:19 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Follow-up for 0146779efba0: don't mask the original TypeError in Message-ID: <5852a13f.4391190a.fcd8e.2555@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89069:76dcc5502332 Date: 2016-12-15 14:55 +0100 http://bitbucket.org/pypy/pypy/changeset/76dcc5502332/ Log: Follow-up for 0146779efba0: don't mask the original TypeError in more general cases (but only on Python 3.x) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -92,7 +92,7 @@ args_w = space.fixedview(w_stararg) except OperationError as e: if (e.match(space, space.w_TypeError) and - not space.is_generator(w_stararg)): + not space.is_iterable(w_stararg)): raise oefmt(space.w_TypeError, "argument after * must be an iterable, not %T", w_stararg) diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -878,6 +878,21 @@ else: assert False, "Expected TypeError" + def test_call_iter_dont_eat_typeerror(self): + # same as test_cpython_issue4806, not only for generators + # (only for 3.x, on CPython 2.7 this case still eats the + # TypeError and replaces it with "argument after * ...") + class X: + def __iter__(self): + raise TypeError("myerror") + def f(): + pass + e = raises(TypeError, "f(*42)") + assert str(e.value).endswith( + "argument after * must be an iterable, not int") + e = raises(TypeError, "f(*X())") + assert str(e.value) == "myerror" + def test_keyword_arg_after_keywords_dict(self): """ def f(x, y): diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -272,6 +272,15 @@ raise oefmt(space.w_ValueError, "__len__() should return >= 0") return result + def is_iterable(space, w_obj): + w_descr = space.lookup(w_obj, '__iter__') + if w_descr is None: + if space.type(w_obj).flag_map_or_seq != 'M': + w_descr = space.lookup(w_obj, '__getitem__') + if w_descr is None: + return False + return True + def iter(space, w_obj): w_descr = space.lookup(w_obj, '__iter__') if w_descr is None: From pypy.commits at gmail.com Thu Dec 15 09:06:59 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 06:06:59 -0800 (PST) Subject: [pypy-commit] pypy py3.5: adapt test to pypy's exact behavior Message-ID: <5852a383.11482e0a.45d8e.245f@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89070:8cd625f42c0d Date: 2016-12-15 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/8cd625f42c0d/ Log: adapt test to pypy's exact behavior diff --git a/lib-python/3/test/test_grammar.py b/lib-python/3/test/test_grammar.py --- a/lib-python/3/test/test_grammar.py +++ b/lib-python/3/test/test_grammar.py @@ -1,7 +1,7 @@ # Python test set -- part 1, grammar. # This just tests whether the parser accepts them all. -from test.support import check_syntax_error +from test.support import check_syntax_error, check_impl_detail import inspect import unittest import sys @@ -419,8 +419,16 @@ with self.assertRaisesRegex(SyntaxError, custom_msg): exec(source) source = source.replace("foo", "(foo.)") + # PyPy's parser also detects the same "Missing parentheses" + # if there are some parentheses later in the line + # (above, the cases that contain '{1:'). + # CPython gives up in this case. + if check_impl_detail(pypy=True) and '{1:' in source: + expected = custom_msg + else: + expected = "invalid syntax" with self.subTest(source=source): - with self.assertRaisesRegex(SyntaxError, "invalid syntax"): + with self.assertRaisesRegex(SyntaxError, expected): exec(source) def test_del_stmt(self): From pypy.commits at gmail.com Thu Dec 15 09:38:54 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 06:38:54 -0800 (PST) Subject: [pypy-commit] pypy py3.5: mark some tests as impl details Message-ID: <5852aafe.511c190a.bae2.282f@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89071:01d17dcb5fe6 Date: 2016-12-15 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/01d17dcb5fe6/ Log: mark some tests as impl details diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -1775,8 +1775,9 @@ for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', b'x', 42, None: A = type('A', (), {'__doc__': doc}) self.assertEqual(A.__doc__, doc) - with self.assertRaises(UnicodeEncodeError): - type('A', (), {'__doc__': 'x\udcdcy'}) + if check_impl_detail(): # CPython encodes __doc__ into tp_doc + with self.assertRaises(UnicodeEncodeError): + type('A', (), {'__doc__': 'x\udcdcy'}) A = type('A', (), {}) self.assertEqual(A.__doc__, None) @@ -1807,8 +1808,9 @@ def test_bad_slots(self): with self.assertRaises(TypeError): type('A', (), {'__slots__': b'x'}) - with self.assertRaises(TypeError): - type('A', (int,), {'__slots__': 'x'}) + if check_impl_detail(): # 'int' is variable-sized on CPython 3.x + with self.assertRaises(TypeError): + type('A', (int,), {'__slots__': 'x'}) with self.assertRaises(TypeError): type('A', (), {'__slots__': ''}) with self.assertRaises(TypeError): From pypy.commits at gmail.com Thu Dec 15 09:38:57 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 06:38:57 -0800 (PST) Subject: [pypy-commit] pypy py3.5: "Fix" these tests by changing 'str_or_None' to mean that bytes are not Message-ID: <5852ab01.99012e0a.4d48d.288a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89072:7d75f981d293 Date: 2016-12-15 15:35 +0100 http://bitbucket.org/pypy/pypy/changeset/7d75f981d293/ Log: "Fix" these tests by changing 'str_or_None' to mean that bytes are not acceptable diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1567,7 +1567,8 @@ return self.buffer_w(w_obj, flags).as_str() def str_or_None_w(self, w_obj): - return None if self.is_none(w_obj) else self.str_w(w_obj) + # FIXME: XXX for now, inconsistent with str_w() + return None if self.is_none(w_obj) else self.identifier_w(w_obj) def str_w(self, w_obj): """ diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -38,6 +38,9 @@ raises(ValueError, bytearray, [65, -3]) raises(TypeError, bytearray, [65.0]) raises(ValueError, bytearray, -1) + assert bytearray('abc', 'ascii') == b'abc' + raises(TypeError, bytearray, 'abc', b'ascii') + raises(UnicodeEncodeError, bytearray, '\x80', 'ascii') def test_init_override(self): class subclass(bytearray): diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -108,6 +108,8 @@ assert bytes([42]) == b'*' assert bytes([0xFC]) == b'\xFC' assert bytes([42, 0xCC]) == b'*\xCC' + raises(TypeError, bytes, 'abc', b'ascii') + raises(UnicodeEncodeError, bytes, '\x80', 'ascii') def test_constructor_list_of_objs(self): class X: From pypy.commits at gmail.com Thu Dec 15 10:06:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 07:06:04 -0800 (PST) Subject: [pypy-commit] pypy py3.5: bytes(x) should return x.__bytes__() even if that is a subclass of 'bytes' Message-ID: <5852b15c.0a4a2e0a.d87cb.297c@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89073:4a3854e5e543 Date: 2016-12-15 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/4a3854e5e543/ Log: bytes(x) should return x.__bytes__() even if that is a subclass of 'bytes' diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -544,8 +544,16 @@ w_item = space.getitem(w_source, space.wrap(0)) value = getbytevalue(space, w_item) return W_BytesObject(value) - # - value = newbytesdata_w(space, w_source, encoding, errors) + else: + # special-case 'bytes(X)' if X has a __bytes__() method: + # we must return the result unmodified even if it is a + # subclass of bytes + w_result = invoke_bytes_method(space, w_source) + if w_result is not None: + return w_result + value = newbytesdata_w_tail(space, w_source) + else: + value = newbytesdata_w(space, w_source, encoding, errors) w_obj = space.allocate_instance(W_BytesObject, w_stringtype) W_BytesObject.__init__(w_obj, value) return w_obj @@ -699,6 +707,16 @@ raise oefmt(space.w_ValueError, "byte must be in range(0, 256)") return chr(value) +def invoke_bytes_method(space, w_source): + w_bytes_method = space.lookup(w_source, "__bytes__") + if w_bytes_method is not None: + w_bytes = space.get_and_call_function(w_bytes_method, w_source) + if not space.isinstance_w(w_bytes, space.w_bytes): + raise oefmt(space.w_TypeError, + "__bytes__ returned non-bytes (type '%T')", w_bytes) + return w_bytes + return None + def newbytesdata_w(space, w_source, encoding, errors): # None value if w_source is None: @@ -725,16 +743,18 @@ raise oefmt(space.w_TypeError, "string argument without an encoding") # Fast-path for bytes - if space.isinstance_w(w_source, space.w_str): + if space.type(w_source) is space.w_bytes: return space.bytes_w(w_source) # Some other object with a __bytes__ special method (could be str subclass) - w_bytes_method = space.lookup(w_source, "__bytes__") - if w_bytes_method is not None: - w_bytes = space.get_and_call_function(w_bytes_method, w_source) - if not space.isinstance_w(w_bytes, space.w_bytes): - raise oefmt(space.w_TypeError, - "__bytes__ returned non-bytes (type '%T')", w_bytes) - return space.bytes_w(w_bytes) + w_result = invoke_bytes_method(space, w_source) + if w_result is not None: + return space.bytes_w(w_result) + + return newbytesdata_w_tail(space, w_source) + +def newbytesdata_w_tail(space, w_source): + # converts rare case of bytes constructor arguments: we don't have + # any encodings/errors, and the argument does not have __bytes__() if space.isinstance_w(w_source, space.w_unicode): raise oefmt(space.w_TypeError, "string argument without an encoding") diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -969,3 +969,30 @@ def test_constructor_typeerror(self): raises(TypeError, bytes, b'', 'ascii') raises(TypeError, bytes, '') + + def test_constructor_subclass(self): + class Sub(bytes): + pass + class X: + def __bytes__(self): + return Sub(b'foo') + assert type(bytes(X())) is Sub + + def test_constructor_subclass_2(self): + class Sub(bytes): + pass + class X(bytes): + def __bytes__(self): + return Sub(b'foo') + assert type(bytes(X())) is Sub + + def test_constructor_subclass_3(self): + class Sub(bytes): + pass + class X(bytes): + def __bytes__(self): + return Sub(b'foo') + class Sub1(bytes): + pass + assert type(Sub1(X())) is Sub1 + assert Sub1(X()) == b'foo' From pypy.commits at gmail.com Thu Dec 15 10:21:47 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 07:21:47 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Don't look up dynamically '_frozen_importlib.__import__()', it might Message-ID: <5852b50b.411b190a.11eef.2b23@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89074:f73566c74279 Date: 2016-12-15 16:21 +0100 http://bitbucket.org/pypy/pypy/changeset/f73566c74279/ Log: Don't look up dynamically '_frozen_importlib.__import__()', it might have been changed or removed. In CPython 3.5, they hard-coded the logic in C for performance anyway diff --git a/pypy/module/_frozen_importlib/interp_import.py b/pypy/module/_frozen_importlib/interp_import.py --- a/pypy/module/_frozen_importlib/interp_import.py +++ b/pypy/module/_frozen_importlib/interp_import.py @@ -1,11 +1,19 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError +from pypy.interpreter.baseobjspace import SpaceCache + + +class FrozenCache(SpaceCache): + def __init__(self, space): + mod = space.getbuiltinmodule('_frozen_importlib') + self.w_frozen_import = mod.get('__import__') + assert self.w_frozen_import is not None + def import_with_frames_removed(space, __args__): try: return space.call_args( - space.getbuiltinmodule('_frozen_importlib').getdictvalue( - space, '__import__'), __args__) + space.fromcache(FrozenCache).w_frozen_import, __args__) except OperationError as e: e.remove_traceback_module_frames('') raise From pypy.commits at gmail.com Thu Dec 15 10:47:11 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 15 Dec 2016 07:47:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Remove the functions' Stackless-style __reduce__ method: otherwise, Message-ID: <5852baff.84472e0a.ced74.2cdb@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89075:e8b1e5c2b023 Date: 2016-12-15 16:45 +0100 http://bitbucket.org/pypy/pypy/changeset/e8b1e5c2b023/ Log: Remove the functions' Stackless-style __reduce__ method: otherwise, some tests fail because local functions are reduced using this method (but not global functions). This is probably very confusing. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -280,6 +280,7 @@ find = staticmethod(find) def descr_function__reduce__(self, space): + XXX # This is not used any more from pypy.interpreter.gateway import BuiltinCode from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') @@ -325,6 +326,7 @@ return nt([new_inst, nt(tup_base), nt(tup_state)]) def descr_function__setstate__(self, space, w_args): + XXX # This is not used any more args_w = space.unpackiterable(w_args) try: (w_name, w_qualname, w_doc, w_code, w_func_globals, w_closure, diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -136,6 +136,7 @@ def test_pickle_non_top_reachable_func(self): self.skip_on_cpython() + skip("this behavior was disabled to follow CPython more closely") def func(): return 42 global a @@ -199,6 +200,7 @@ assert a == result def test_pickle_method(self): + skip("this behavior was disabled to follow CPython more closely") class myclass(object): def f(self): return 42 @@ -221,6 +223,7 @@ del sys.modules['mod'] def test_pickle_staticmethod(self): + skip("this behavior was disabled to follow CPython more closely") self.skip_on_cpython() class myclass(object): def f(): @@ -233,6 +236,7 @@ assert method() == result() def test_pickle_classmethod(self): + skip("this behavior was disabled to follow CPython more closely") class myclass(object): def f(cls): return cls diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -672,8 +672,8 @@ descrmismatch='__call__'), __get__ = interp2app(descr_function_get), __repr__ = interp2app(Function.descr_function_repr, descrmismatch='__repr__'), - __reduce__ = interp2app(Function.descr_function__reduce__), - __setstate__ = interp2app(Function.descr_function__setstate__), + #__reduce__ = interp2app(Function.descr_function__reduce__), + #__setstate__ = interp2app(Function.descr_function__setstate__), __code__ = getset_func_code, __doc__ = getset_func_doc, __name__ = getset_func_name, From pypy.commits at gmail.com Thu Dec 15 11:58:06 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 15 Dec 2016 08:58:06 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: A branch to use a cffi-style C parser to create rffi objects in cpyext. Message-ID: <5852cb9e.11482e0a.45d8e.2fa4@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89076:df792feb93b7 Date: 2016-12-15 16:51 +0000 http://bitbucket.org/pypy/pypy/changeset/df792feb93b7/ Log: A branch to use a cffi-style C parser to create rffi objects in cpyext. From pypy.commits at gmail.com Thu Dec 15 11:58:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 15 Dec 2016 08:58:09 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Import a stripped-down copy of cffi/cparser.py Message-ID: <5852cba1.1a0e2e0a.b350c.3172@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89077:22dd82d0da49 Date: 2016-12-15 16:57 +0000 http://bitbucket.org/pypy/pypy/changeset/22dd82d0da49/ Log: Import a stripped-down copy of cffi/cparser.py diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/cparser.py @@ -0,0 +1,641 @@ +from cffi import api, model +from cffi.commontypes import COMMON_TYPES, resolve_common_type +import pycparser +import weakref, re + +_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", + re.DOTALL | re.MULTILINE) +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" + r"\b((?:[^\n\\]|\\.)*?)$", + re.DOTALL | re.MULTILINE) +_r_words = re.compile(r"\w+|\S") +_parser_cache = None +_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") +_r_star_const_space = re.compile( # matches "* const " + r"[*]\s*((const|volatile|restrict)\b\s*)+") + +def _get_parser(): + global _parser_cache + if _parser_cache is None: + _parser_cache = pycparser.CParser() + return _parser_cache + +def _preprocess(csource): + # Remove comments. NOTE: this only work because the cdef() section + # should not contain any string literal! + csource = _r_comment.sub(' ', csource) + # Remove the "#define FOO x" lines + macros = {} + for match in _r_define.finditer(csource): + macroname, macrovalue = match.groups() + macrovalue = macrovalue.replace('\\\n', '').strip() + macros[macroname] = macrovalue + csource = _r_define.sub('', csource) + # + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) + return csource, macros + +def _common_type_names(csource): + # Look in the source for what looks like usages of types from the + # list of common types. A "usage" is approximated here as the + # appearance of the word, minus a "definition" of the type, which + # is the last word in a "typedef" statement. Approximative only + # but should be fine for all the common types. + look_for_words = set(COMMON_TYPES) + look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') + look_for_words.add('typedef') + words_used = set() + is_typedef = False + paren = 0 + previous_word = '' + for word in _r_words.findall(csource): + if word in look_for_words: + if word == ';': + if is_typedef: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + is_typedef = False + elif word == 'typedef': + is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + else: # word in COMMON_TYPES + words_used.add(word) + previous_word = word + return words_used + + +class Parser(object): + + def __init__(self): + self._declarations = {} + self._included_declarations = set() + self._anonymous_counter = 0 + self._structnode2type = weakref.WeakKeyDictionary() + self._options = {} + self._int_constants = {} + self._recomplete = [] + + def _parse(self, csource): + csource, macros = _preprocess(csource) + # XXX: for more efficiency we would need to poke into the + # internals of CParser... the following registers the + # typedefs, because their presence or absence influences the + # parsing itself (but what they are typedef'ed to plays no role) + ctn = _common_type_names(csource) + typenames = [] + for name in sorted(self._declarations): + if name.startswith('typedef '): + name = name[8:] + typenames.append(name) + ctn.discard(name) + typenames += sorted(ctn) + # + csourcelines = ['typedef int %s;' % typename for typename in typenames] + csourcelines.append('typedef int __dotdotdot__;') + csourcelines.append(csource) + csource = '\n'.join(csourcelines) + try: + ast = _get_parser().parse(csource) + except pycparser.c_parser.ParseError as e: + self.convert_pycparser_error(e, csource) + # csource will be used to find buggy source text + return ast, macros, csource + + def _convert_pycparser_error(self, e, csource): + # xxx look for ":NUM:" at the start of str(e) and try to interpret + # it as a line number + line = None + msg = str(e) + if msg.startswith(':') and ':' in msg[1:]: + linenum = msg[1:msg.find(':',1)] + if linenum.isdigit(): + linenum = int(linenum, 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) + if line: + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) + else: + msg = 'parse error\n%s' % (msg,) + raise api.CDefError(msg) + + def parse(self, csource, override=False, packed=False, dllexport=False): + prev_options = self._options + try: + self._options = {'override': override, + 'packed': packed, + 'dllexport': dllexport} + self._internal_parse(csource) + finally: + self._options = prev_options + + def _internal_parse(self, csource): + ast, macros, csource = self._parse(csource) + # add the macros + self._process_macros(macros) + # find the first "__dotdotdot__" and use that as a separator + # between the repeated typedefs and the real csource + iterator = iter(ast.ext) + for decl in iterator: + if decl.name == '__dotdotdot__': + break + # + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + quals = 0 + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name, partial_length_ok=True) + self._declare('typedef ' + decl.name, realtype, quals=quals) + elif decl.__class__.__name__ == 'Pragma': + pass # skip pragma, only in pycparser 2.15 + else: + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _add_integer_constant(self, name, int_str): + int_str = int_str.lower().rstrip("ul") + neg = int_str.startswith('-') + if neg: + int_str = int_str[1:] + # "010" is not valid oct in py3 + if (int_str.startswith("0") and int_str != '0' + and not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + pyvalue = int(int_str, 0) + if neg: + pyvalue = -pyvalue + self._add_constants(name, pyvalue) + self._declare('macro ' + name, pyvalue) + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + if _r_int_literal.match(value): + self._add_integer_constant(key, value) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise api.CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) + + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._options.get('dllexport'): + tag = 'dllexport_python ' + else: + tag = 'function ' + self._declare(tag + decl.name, tp) + + def _parse_decl(self, decl): + node = decl.type + if isinstance(node, pycparser.c_ast.FuncDecl): + tp, quals = self._get_type_and_quals(node, name=decl.name) + assert isinstance(tp, model.RawFunctionType) + self._declare_function(tp, quals, decl) + else: + if isinstance(node, pycparser.c_ast.Struct): + self._get_struct_union_enum_type('struct', node) + elif isinstance(node, pycparser.c_ast.Union): + self._get_struct_union_enum_type('union', node) + elif isinstance(node, pycparser.c_ast.Enum): + self._get_struct_union_enum_type('enum', node) + elif not decl.name: + raise api.CDefError("construct does not declare any variable", + decl) + # + if decl.name: + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) + if tp.is_raw_function: + self._declare_function(tp, quals, decl) + elif (tp.is_integer_type() and + hasattr(decl, 'init') and + hasattr(decl.init, 'value') and + _r_int_literal.match(decl.init.value)): + self._add_integer_constant(decl.name, decl.init.value) + elif (tp.is_integer_type() and + isinstance(decl.init, pycparser.c_ast.UnaryOp) and + decl.init.op == '-' and + hasattr(decl.init.expr, 'value') and + _r_int_literal.match(decl.init.expr.value)): + self._add_integer_constant(decl.name, + '-' + decl.init.expr.value) + else: + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + self._declare('variable ' + decl.name, tp, quals=quals) + + def parse_type(self, cdecl): + return self.parse_type_and_quals(cdecl)[0] + + def parse_type_and_quals(self, cdecl): + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] + assert not macros + exprnode = ast.ext[-1].type.args.params[0] + if isinstance(exprnode, pycparser.c_ast.ID): + raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) + return self._get_type_and_quals(exprnode.type) + + def _declare(self, name, obj, included=False, quals=0): + if name in self._declarations: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: + return + self._declarations[name] = (obj, quals) + if included: + self._included_declarations.add(obj) + + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'volatile' in type.quals: + quals |= model.Q_VOLATILE + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): + if isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + if (isinstance(type, model.StructOrUnionOrEnum) and + type.name.startswith('$') and type.name[1:].isdigit() and + type.forcename is None and declname is not None): + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) + + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): + # first, dereference typedefs, if we have it already parsed, we're good + if (isinstance(typenode, pycparser.c_ast.TypeDecl) and + isinstance(typenode.type, pycparser.c_ast.IdentifierType) and + len(typenode.type.names) == 1 and + ('typedef ' + typenode.type.names[0]) in self._declarations): + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.ArrayDecl): + # array type + if typenode.dim is None: + length = None + else: + length = self._parse_constant( + typenode.dim, partial_length_ok=partial_length_ok) + tp, quals = self._get_type_and_quals(typenode.type, + partial_length_ok=partial_length_ok) + return model.ArrayType(tp, length), quals + # + if isinstance(typenode, pycparser.c_ast.PtrDecl): + # pointer type + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) + type = typenode.type + if isinstance(type, pycparser.c_ast.IdentifierType): + # assume a primitive type. get it from .names, but reduce + # synonyms to a single chosen combination + names = list(type.names) + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names + ident = ' '.join(names) + if ident == 'void': + return model.void_type, quals + tp0, quals0 = resolve_common_type(self, ident) + return tp0, (quals | quals0) + # + if isinstance(type, pycparser.c_ast.Struct): + # 'struct foobar' + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Union): + # 'union foobar' + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Enum): + # 'enum foobar' + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.FuncDecl): + # a function type + return self._parse_function_type(typenode, name), 0 + # + # nested anonymous structs or unions end up here + if isinstance(typenode, pycparser.c_ast.Struct): + return self._get_struct_union_enum_type('struct', typenode, name, + nested=True), 0 + if isinstance(typenode, pycparser.c_ast.Union): + return self._get_struct_union_enum_type('union', typenode, name, + nested=True), 0 + # + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) + + def _parse_function_type(self, typenode, funcname=None): + params = list(getattr(typenode.args, 'params', [])) + for i, arg in enumerate(params): + if not hasattr(arg, 'type'): + raise api.CDefError("%s arg %d: unknown type '%s'" + " (if you meant to use the old C syntax of giving" + " untyped arguments, it is not supported)" + % (funcname or 'in expression', i + 1, + getattr(arg, 'name', '?'))) + ellipsis = ( + len(params) > 0 and + isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and + isinstance(params[-1].type.type, + pycparser.c_ast.IdentifierType) and + params[-1].type.type.names == ['__dotdotdot__']) + if ellipsis: + params.pop() + if not params: + raise api.CDefError( + "%s: a function with only '(...)' as argument" + " is not correct C" % (funcname or 'in expression')) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) + for argdeclnode in params] + if not ellipsis and args == [model.void_type]: + args = [] + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) + + def _as_func_arg(self, type, quals): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item, quals) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): + # First, a level of caching on the exact 'type' node of the AST. + # This is obscure, but needed because pycparser "unrolls" declarations + # such as "typedef struct { } foo_t, *foo_p" and we end up with + # an AST that is not a tree, but a DAG, with the "type" node of the + # two branches foo_t and foo_p of the trees being the same node. + # It's a bit silly but detecting "DAG-ness" in the AST tree seems + # to be the only way to distinguish this case from two independent + # structs. See test_struct_with_two_usages. + try: + return self._structnode2type[type] + except KeyError: + pass + # + # Note that this must handle parsing "struct foo" any number of + # times and always return the same StructType object. Additionally, + # one of these times (not necessarily the first), the fields of + # the struct can be specified with "struct foo { ...fields... }". + # If no name is given, then we have to create a new anonymous struct + # with no caching; in this case, the fields are either specified + # right now or never. + # + force_name = name + name = type.name + # + # get the type or create it if needed + if name is None: + # 'force_name' is used to guess a more readable name for + # anonymous structs, for the common case "typedef struct { } foo". + if force_name is not None: + explicit_name = '$%s' % force_name + else: + self._anonymous_counter += 1 + explicit_name = '$%d' % self._anonymous_counter + tp = None + else: + explicit_name = name + key = '%s %s' % (kind, name) + tp, _ = self._declarations.get(key, (None, None)) + # + if tp is None: + if kind == 'struct': + tp = model.StructType(explicit_name, None, None, None) + elif kind == 'union': + tp = model.UnionType(explicit_name, None, None, None) + elif kind == 'enum': + tp = self._build_enum_type(explicit_name, type.values) + else: + raise AssertionError("kind = %r" % (kind,)) + if name is not None: + self._declare(key, tp) + else: + if kind == 'enum' and type.values is not None: + raise NotImplementedError( + "enum %s: the '{}' declaration should appear on the first " + "time the enum is mentioned, not later" % explicit_name) + if not tp.forcename: + tp.force_the_name(force_name) + if tp.forcename and '$' in tp.name: + self._declare('anonymous %s' % tp.forcename, tp) + # + self._structnode2type[type] = tp + # + # enums: done here + if kind == 'enum': + return tp + # + # is there a 'type.decls'? If yes, then this is the place in the + # C sources that declare the fields. If no, then just return the + # existing type, possibly still incomplete. + if type.decls is None: + return tp + # + if tp.fldnames is not None: + raise api.CDefError("duplicate declaration of struct %s" % name) + fldnames = [] + fldtypes = [] + fldbitsize = [] + fldquals = [] + for decl in type.decls: + if decl.bitsize is None: + bitsize = -1 + else: + bitsize = self._parse_constant(decl.bitsize) + self._partial_length = False + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) + if self._partial_length: + self._make_partial(tp, nested) + if isinstance(type, model.StructType) and type.partial: + self._make_partial(tp, nested) + fldnames.append(decl.name or '') + fldtypes.append(type) + fldbitsize.append(bitsize) + fldquals.append(fqual) + tp.fldnames = tuple(fldnames) + tp.fldtypes = tuple(fldtypes) + tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) + if fldbitsize != [-1] * len(fldbitsize): + if isinstance(tp, model.StructType) and tp.partial: + raise NotImplementedError("%s: using both bitfields and '...;'" + % (tp,)) + tp.packed = self._options.get('packed') + if tp.completed: # must be re-completed: it is not opaque any more + tp.completed = 0 + self._recomplete.append(tp) + return tp + + def _make_partial(self, tp, nested): + if not isinstance(tp, model.StructOrUnion): + raise api.CDefError("%s cannot be partial" % (tp,)) + if not tp.has_c_name() and not nested: + raise NotImplementedError("%s is partial but has no C name" %(tp,)) + tp.partial = True + + def _parse_constant(self, exprnode, partial_length_ok=False): + # for now, limited to expressions that are an immediate number + # or positive/negative number + if isinstance(exprnode, pycparser.c_ast.Constant): + s = exprnode.value + if s.startswith('0'): + if s.startswith('0x') or s.startswith('0X'): + return int(s, 16) + return int(s, 8) + elif '1' <= s[0] <= '9': + return int(s, 10) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise api.CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '-'): + return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] + # + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name == '__dotdotdotarray__'): + if partial_length_ok: + self._partial_length = True + return '...' + raise api.FFIError(":%d: unsupported '[...]' here, cannot derive " + "the actual array length in this context" + % exprnode.coord.line) + # + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) + + def _build_enum_type(self, explicit_name, decls): + if decls is not None: + partial = False + enumerators = [] + enumvalues = [] + nextenumvalue = 0 + for enum in decls.enumerators: + if enum.value is not None: + nextenumvalue = self._parse_constant(enum.value) + enumerators.append(enum.name) + enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) + nextenumvalue += 1 + enumerators = tuple(enumerators) + enumvalues = tuple(enumvalues) + tp = model.EnumType(explicit_name, enumerators, enumvalues) + tp.partial = partial + else: # opaque enum + tp = model.EnumType(explicit_name, (), ()) + return tp + + def include(self, other): + for name, (tp, quals) in other._declarations.items(): + if name.startswith('anonymous $enum_$'): + continue # fix for test_anonymous_enum_include + kind = name.split(' ', 1)[0] + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_cparser.py @@ -0,0 +1,15 @@ +from pypy.module.cpyext.cparser import Parser + +def test_simple(): + decl = """ + typedef intptr_t Py_ssize_t; + + typedef struct { + Py_ssize_t ob_refcnt; + Py_ssize_t ob_pypy_link; + struct _typeobject *ob_type; + double ob_fval; + } PyFloatObject; + """ + ctx = Parser() + ctx.parse(decl) From pypy.commits at gmail.com Thu Dec 15 17:08:36 2016 From: pypy.commits at gmail.com (wlav) Date: Thu, 15 Dec 2016 14:08:36 -0800 (PST) Subject: [pypy-commit] pypy default: disable tests if compiler does not C++11 Message-ID: <58531464.6a18190a.f9301.3f3d@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r89078:3831332c4c41 Date: 2016-12-15 13:59 -0800 http://bitbucket.org/pypy/pypy/changeset/3831332c4c41/ Log: disable tests if compiler does not C++11 diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,7 +1,13 @@ -import py +import py, sys @py.test.mark.tryfirst def pytest_runtest_setup(item): + if 'linux' in sys.platform: + # tests require minimally std=c++11 + cc_info = py.process.cmdexec('gcc -v --help') + if not '-std=c++11' in cc_info: + py.test.skip('skipping tests because gcc does not support C++11') + if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: From pypy.commits at gmail.com Thu Dec 15 17:08:38 2016 From: pypy.commits at gmail.com (wlav) Date: Thu, 15 Dec 2016 14:08:38 -0800 (PST) Subject: [pypy-commit] pypy default: update release notes and cppyy documentation Message-ID: <58531466.411b190a.11eef.4152@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r89079:985fae67dded Date: 2016-12-15 14:00 -0800 http://bitbucket.org/pypy/pypy/changeset/985fae67dded/ Log: update release notes and cppyy documentation diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -3,16 +3,17 @@ The cppyy module delivers dynamic Python-C++ bindings. It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++. +handling all of modern C++ (11, 14, etc.). It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ reflection and interactivity. Reflection information is extracted from C++ header files. Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a backend, installable through pip, to interface with Cling. +it requires a `backend`_, installable through pip, to interface with Cling. .. _Cling: https://root.cern.ch/cling .. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation @@ -22,25 +23,39 @@ module, which is no longer supported. Both the tooling and user-facing Python codes are very backwards compatible, however. -Further dependencies are cmake (for general build) and Python2.7 (for LLVM). +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). Assuming you have a recent enough version of PyPy installed, use pip to complete the installation of cppyy:: - $ pypy-c -m pip install PyPy-cppyy-backend + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. The building process may take quite some time as it includes a customized -version of LLVM as part of Cling. +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. + +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -54,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -110,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -212,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -271,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -567,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -587,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -665,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -676,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -772,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -45,3 +45,14 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). From pypy.commits at gmail.com Thu Dec 15 17:18:26 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 15 Dec 2016 14:18:26 -0800 (PST) Subject: [pypy-commit] pypy default: hack at test to pass on cpython3.5, still fails on py3.5 Message-ID: <585316b2.51ce190a.9ba6d.403f@mx.google.com> Author: Matti Picus Branch: Changeset: r89081:f1a5f1fd4df2 Date: 2016-12-16 00:16 +0200 http://bitbucket.org/pypy/pypy/changeset/f1a5f1fd4df2/ Log: hack at test to pass on cpython3.5, still fails on py3.5 diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1183,9 +1183,19 @@ Base1->tp_basicsize = sizeof(PyHeapTypeObject); Base2->tp_basicsize = sizeof(PyHeapTypeObject); Base12->tp_basicsize = sizeof(PyHeapTypeObject); + #ifndef PYPY_VERSION /* PyHeapTypeObject has no ht_qualname on PyPy */ + #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 + { + PyObject * dummyname = PyBytes_FromString("dummy name"); + ((PyHeapTypeObject*)Base1)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base2)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base12)->ht_qualname = dummyname; + } + #endif + #endif Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; Base12->tp_base = Base1; Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); Base12->tp_doc = "The Base12 type or object"; From pypy.commits at gmail.com Thu Dec 15 17:18:24 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 15 Dec 2016 14:18:24 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hack at test to pass on cpython3.5, still fails on py3.5 Message-ID: <585316b0.87d4190a.cb49b.3dee@mx.google.com> Author: Matti Picus Branch: py3.5 Changeset: r89080:f145455a15e5 Date: 2016-12-16 00:11 +0200 http://bitbucket.org/pypy/pypy/changeset/f145455a15e5/ Log: hack at test to pass on cpython3.5, still fails on py3.5 diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1148,9 +1148,19 @@ Base1->tp_basicsize = sizeof(PyHeapTypeObject); Base2->tp_basicsize = sizeof(PyHeapTypeObject); Base12->tp_basicsize = sizeof(PyHeapTypeObject); + #ifndef PYPY_VERSION /* PyHeapTypeObject has no ht_qualname on PyPy */ + #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 + { + PyObject * dummyname = PyBytes_FromString("dummy name"); + ((PyHeapTypeObject*)Base1)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base2)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base12)->ht_qualname = dummyname; + } + #endif + #endif Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; Base12->tp_base = Base1; Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); Base12->tp_doc = "The Base12 type or object"; From pypy.commits at gmail.com Thu Dec 15 18:07:29 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 15 Dec 2016 15:07:29 -0800 (PST) Subject: [pypy-commit] pypy default: tweak doc display and expose contributors.rst, now an umlaut-a is shown as "?", Message-ID: <58532231.c54d2e0a.af1d5.42f6@mx.google.com> Author: Matti Picus Branch: Changeset: r89082:7c90b50b2832 Date: 2016-12-16 01:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7c90b50b2832/ Log: tweak doc display and expose contributors.rst, now an umlaut-a is shown as "?", diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -61,8 +61,8 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Reflex and cppyy +---------------- The builtin :doc:`cppyy ` module uses reflection information, provided by `Reflex`_ (which needs to be `installed separately`_), of C/C++ code to @@ -81,6 +81,9 @@ .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 .. _Reflex: https://root.cern.ch/how/how-use-reflex +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions From pypy.commits at gmail.com Thu Dec 15 20:11:59 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 15 Dec 2016 17:11:59 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Convert primitive types to rffi Message-ID: <58533f5f.50cc190a.b3da4.47e5@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89083:b4125bc7dfba Date: 2016-12-16 01:08 +0000 http://bitbucket.org/pypy/pypy/changeset/b4125bc7dfba/ Log: Convert primitive types to rffi diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -2,6 +2,8 @@ from cffi.commontypes import COMMON_TYPES, resolve_common_type import pycparser import weakref, re +from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.tool import rfficache _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", re.DOTALL | re.MULTILINE) @@ -639,3 +641,22 @@ self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) + +CNAME_TO_LLTYPE = { + 'char': rffi.CHAR, + 'double': rffi.DOUBLE, 'long double': rffi.LONGDOUBLE, + 'float': rffi.FLOAT} + +def add_inttypes(): + for name in rffi.TYPES: + if name.startswith('unsigned'): + rname = 'u' + name[9:] + else: + rname = name + rname = rname.replace(' ', '').upper() + CNAME_TO_LLTYPE[name] = rfficache.platform.types[rname] + +add_inttypes() + +def cname_to_lltype(name): + return CNAME_TO_LLTYPE[name] diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -1,4 +1,5 @@ -from pypy.module.cpyext.cparser import Parser +from rpython.rtyper.lltypesystem import rffi +from pypy.module.cpyext.cparser import Parser, cname_to_lltype def test_simple(): decl = """ @@ -7,9 +8,11 @@ typedef struct { Py_ssize_t ob_refcnt; Py_ssize_t ob_pypy_link; - struct _typeobject *ob_type; double ob_fval; } PyFloatObject; """ ctx = Parser() ctx.parse(decl) + obj = ctx._declarations['typedef PyFloatObject'][0] + assert [cname_to_lltype(tp.name) for tp in obj.fldtypes] == [ + rffi.INTPTR_T, rffi.INTPTR_T, rffi.DOUBLE] From pypy.commits at gmail.com Thu Dec 15 21:30:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 15 Dec 2016 18:30:15 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Start defining the parsing API Message-ID: <585351b7.e2202e0a.3c72d.49fc@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89084:9d38728d62cc Date: 2016-12-16 02:29 +0000 http://bitbucket.org/pypy/pypy/changeset/9d38728d62cc/ Log: Start defining the parsing API diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -660,3 +660,24 @@ def cname_to_lltype(name): return CNAME_TO_LLTYPE[name] + +class ParsedSource(object): + def __init__(self, source, definitions): + self.source = source + self.definitions = definitions + + +def parse_source(source): + ctx = Parser() + ctx.parse(source) + defs = {} + for name, (obj, quals) in ctx._declarations.iteritems(): + if not name.startswith('typedef '): + continue + name = name[8:] + if isinstance(obj, model.PrimitiveType): + assert obj.name not in defs + defs[name] = cname_to_lltype(obj.name) + else: + pass + return ParsedSource(source, defs) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -1,9 +1,9 @@ from rpython.rtyper.lltypesystem import rffi -from pypy.module.cpyext.cparser import Parser, cname_to_lltype +from pypy.module.cpyext.cparser import Parser, cname_to_lltype, parse_source -def test_simple(): +def test_stuff(): decl = """ - typedef intptr_t Py_ssize_t; + typedef ssize_t Py_ssize_t; typedef struct { Py_ssize_t ob_refcnt; @@ -15,4 +15,9 @@ ctx.parse(decl) obj = ctx._declarations['typedef PyFloatObject'][0] assert [cname_to_lltype(tp.name) for tp in obj.fldtypes] == [ - rffi.INTPTR_T, rffi.INTPTR_T, rffi.DOUBLE] + rffi.SSIZE_T, rffi.SSIZE_T, rffi.DOUBLE] + +def test_simple(): + decl = "typedef ssize_t Py_ssize_t;" + hdr = parse_source(decl) + assert hdr.definitions == {'Py_ssize_t': rffi.SSIZE_T} From pypy.commits at gmail.com Thu Dec 15 22:14:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 15 Dec 2016 19:14:27 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: progress Message-ID: <58535c13.50dd190a.df21f.4b7b@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89085:483201b600d4 Date: 2016-12-16 03:13 +0000 http://bitbucket.org/pypy/pypy/changeset/483201b600d4/ Log: progress diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -666,6 +666,16 @@ self.source = source self.definitions = definitions +def cffi_to_lltype(obj): + from pypy.module.cpyext.api import cpython_struct + if isinstance(obj, model.PrimitiveType): + return cname_to_lltype(obj.name) + elif isinstance(obj, model.StructType): + fields = zip( + obj.fldnames, + [cffi_to_lltype(field) for field in obj.fldtypes]) + return cpython_struct(obj.name, fields) + def parse_source(source): ctx = Parser() @@ -675,9 +685,6 @@ if not name.startswith('typedef '): continue name = name[8:] - if isinstance(obj, model.PrimitiveType): - assert obj.name not in defs - defs[name] = cname_to_lltype(obj.name) - else: - pass + assert name not in defs + defs[name] = cffi_to_lltype(obj) return ParsedSource(source, defs) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -16,6 +16,7 @@ obj = ctx._declarations['typedef PyFloatObject'][0] assert [cname_to_lltype(tp.name) for tp in obj.fldtypes] == [ rffi.SSIZE_T, rffi.SSIZE_T, rffi.DOUBLE] + res = parse_source(decl) def test_simple(): decl = "typedef ssize_t Py_ssize_t;" From pypy.commits at gmail.com Thu Dec 15 23:57:15 2016 From: pypy.commits at gmail.com (wlav) Date: Thu, 15 Dec 2016 20:57:15 -0800 (PST) Subject: [pypy-commit] pypy default: replace mention of Reflex by Cling Message-ID: <5853742b.511c190a.bae2.5032@mx.google.com> Author: Wim Lavrijsen Branch: Changeset: r89086:075ce4f71525 Date: 2016-12-15 20:48 -0800 http://bitbucket.org/pypy/pypy/changeset/075ce4f71525/ Log: replace mention of Reflex by Cling diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through cppyy_ using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex and cppyy ----------------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -78,8 +78,8 @@ :doc:`Full details ` are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling .. toctree:: From pypy.commits at gmail.com Fri Dec 16 02:41:14 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 15 Dec 2016 23:41:14 -0800 (PST) Subject: [pypy-commit] pypy default: tweak cppyy links Message-ID: <58539a9a.d4c4190a.97876.5570@mx.google.com> Author: Matti Picus Branch: Changeset: r89087:7fe5525b6ff6 Date: 2016-12-16 09:32 +0200 http://bitbucket.org/pypy/pypy/changeset/7fe5525b6ff6/ Log: tweak cppyy links diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through cppyy_ using Cling. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -76,7 +76,7 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. .. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend .. _Cling: https://root.cern.ch/cling From pypy.commits at gmail.com Fri Dec 16 03:29:24 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 00:29:24 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5853a5e4.84c3190a.7e2df.5934@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r834:33beccdd20eb Date: 2016-12-16 09:29 +0100 http://bitbucket.org/pypy/pypy.org/changeset/33beccdd20eb/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66436 of $105000 (63.3%) + $66464 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Fri Dec 16 03:52:43 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 00:52:43 -0800 (PST) Subject: [pypy-commit] pypy default: utf-8 Message-ID: <5853ab5b.87a3190a.d58ec.5883@mx.google.com> Author: Armin Rigo Branch: Changeset: r89088:58b122a955e6 Date: 2016-12-16 09:51 +0100 http://bitbucket.org/pypy/pypy/changeset/58b122a955e6/ Log: utf-8 diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -313,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker From pypy.commits at gmail.com Fri Dec 16 04:43:30 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 01:43:30 -0800 (PST) Subject: [pypy-commit] pypy default: Linux: try to implement os.urandom() on top of the syscall getrandom() Message-ID: <5853b742.c14d190a.8706c.5ca7@mx.google.com> Author: Armin Rigo Branch: Changeset: r89089:82ef21124af8 Date: 2016-12-16 10:42 +0100 http://bitbucket.org/pypy/pypy/changeset/82ef21124af8/ Log: Linux: try to implement os.urandom() on top of the syscall getrandom() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1331,8 +1331,9 @@ Return a string of n random bytes suitable for cryptographic use. """ context = get(space).random_context + signal_checker = space.getexecutioncontext().checksignals try: - return space.wrap(rurandom.urandom(context, n)) + return space.wrap(rurandom.urandom(context, n, signal_checker)) except OSError as e: raise wrap_oserror(space, e) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -7,12 +7,12 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.objectmodel import not_rpython +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform if sys.platform == 'win32': from rpython.rlib import rwin32 - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.rtyper.tool import rffi_platform eci = ExternalCompilationInfo( includes = ['windows.h', 'wincrypt.h'], @@ -56,7 +56,7 @@ return lltype.malloc(rffi.CArray(HCRYPTPROV), 1, immortal=True, zero=True) - def urandom(context, n): + def urandom(context, n, signal_checker=None): provider = context[0] if not provider: # This handle is never explicitly released. The operating @@ -80,11 +80,71 @@ def init_urandom(): return None - def urandom(context, n): + SYS_getrandom = None + + if sys.platform.startswith('linux'): + eci = ExternalCompilationInfo(includes=['sys/syscall.h']) + class CConfig: + _compilation_info_ = eci + SYS_getrandom = rffi_platform.DefinedConstantInteger( + 'SYS_getrandom') + globals().update(rffi_platform.configure(CConfig)) + + if SYS_getrandom is not None: + from rpython.rlib.rposix import get_saved_errno, handle_posix_error + import errno + + eci = eci.merge(ExternalCompilationInfo(includes=['linux/random.h'])) + class CConfig: + _compilation_info_ = eci + GRND_NONBLOCK = rffi_platform.ConstantInteger('GRND_NONBLOCK') + globals().update(rffi_platform.configure(CConfig)) + + # On Linux, use the syscall() function because the GNU libc doesn't + # expose the Linux getrandom() syscall yet. + syscall = rffi.llexternal( + 'syscall', + [lltype.Signed, rffi.CCHARP, rffi.LONG, rffi.INT], + lltype.Signed, + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) + + class Works: + status = True + getrandom_works = Works() + + def _getrandom(n, result, signal_checker): + if not getrandom_works.status: + return n + while n > 0: + with rffi.scoped_alloc_buffer(n) as buf: + got = syscall(SYS_getrandom, buf.raw, n, GRND_NONBLOCK) + if got >= 0: + s = buf.str(got) + result.append(s) + n -= len(s) + continue + err = get_saved_errno() + if (err == errno.ENOSYS or err == errno.EPERM or + err == errno.EAGAIN): # see CPython 3.5 + getrandom_works.status = False + return n + if err == errno.EINTR: + if signal_checker is not None: + signal_checker() + continue + handle_posix_error("getrandom", got) + raise AssertionError("unreachable") + return n + + def urandom(context, n, signal_checker=None): "Read n bytes from /dev/urandom." - result = '' - if n == 0: - return result + result = [] + if SYS_getrandom is not None: + n = _getrandom(n, result, signal_checker) + if n <= 0: + return ''.join(result) + # XXX should somehow cache the file descriptor. It's a mess. # CPython has a 99% solution and hopes for the remaining 1% # not to occur. For now, we just don't cache the file @@ -98,8 +158,8 @@ if e.errno != errno.EINTR: raise data = '' - result += data + result.append(data) n -= len(data) finally: os.close(fd) - return result + return ''.join(result) diff --git a/rpython/rlib/test/test_rurandom.py b/rpython/rlib/test/test_rurandom.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rurandom.py @@ -0,0 +1,12 @@ +from rpython.rlib import rurandom + +def test_rurandom(): + context = rurandom.init_urandom() + s = rurandom.urandom(context, 5000) + assert type(s) is str and len(s) == 5000 + for x in [1, 11, 111, 222]: + assert s.count(chr(x)) >= 1 + +def test_rurandom_no_syscall(monkeypatch): + monkeypatch.setattr(rurandom, 'SYS_getrandom', None) + test_rurandom() From pypy.commits at gmail.com Fri Dec 16 04:44:42 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 01:44:42 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <5853b78a.0c472e0a.1e0e9.5fa4@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89090:97c916c45ec7 Date: 2016-12-16 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/97c916c45ec7/ Log: hg merge default diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz @@ -307,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -3,16 +3,17 @@ The cppyy module delivers dynamic Python-C++ bindings. It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++. +handling all of modern C++ (11, 14, etc.). It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ reflection and interactivity. Reflection information is extracted from C++ header files. Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a backend, installable through pip, to interface with Cling. +it requires a `backend`_, installable through pip, to interface with Cling. .. _Cling: https://root.cern.ch/cling .. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation @@ -22,25 +23,39 @@ module, which is no longer supported. Both the tooling and user-facing Python codes are very backwards compatible, however. -Further dependencies are cmake (for general build) and Python2.7 (for LLVM). +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). Assuming you have a recent enough version of PyPy installed, use pip to complete the installation of cppyy:: - $ pypy-c -m pip install PyPy-cppyy-backend + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. The building process may take quite some time as it includes a customized -version of LLVM as part of Cling. +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. + +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -54,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -110,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -212,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -271,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -567,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -587,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -665,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -676,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -772,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -76,11 +76,14 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,14 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,7 +1,13 @@ -import py +import py, sys @py.test.mark.tryfirst def pytest_runtest_setup(item): + if 'linux' in sys.platform: + # tests require minimally std=c++11 + cc_info = py.process.cmdexec('gcc -v --help') + if not '-std=c++11' in cc_info: + py.test.skip('skipping tests because gcc does not support C++11') + if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -2158,18 +2158,3 @@ it causes an exception to immediately be thrown; this is used for the throw() methods of generator objects.""" raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_Check(space, ob): - """Return true if ob is either a reference or proxy object.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckRef(space, ob): - """Return true if ob is a reference object.""" - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckProxy(space, ob): - """Return true if ob is a proxy object.""" - raise NotImplementedError diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py --- a/pypy/module/cpyext/test/test_weakref.py +++ b/pypy/module/cpyext/test/test_weakref.py @@ -56,3 +56,30 @@ ) ]) module.test_macro_cast() + + def test_weakref_check(self): + module = self.import_extension('foo', [ + ("test_weakref_cast", "METH_O", + """ + return Py_BuildValue("iiii", + (int)PyWeakref_Check(args), + (int)PyWeakref_CheckRef(args), + (int)PyWeakref_CheckRefExact(args), + (int)PyWeakref_CheckProxy(args)); + """ + ) + ]) + import weakref + def foo(): pass + class Bar(object): + pass + bar = Bar() + assert module.test_weakref_cast([]) == (0, 0, 0, 0) + assert module.test_weakref_cast(weakref.ref(foo)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.ref(bar)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.proxy(foo)) == (1, 0, 0, 1) + assert module.test_weakref_cast(weakref.proxy(bar)) == (1, 0, 0, 1) + class X(weakref.ref): + pass + assert module.test_weakref_cast(X(foo)) == (1, 1, 0, 0) + assert module.test_weakref_cast(X(bar)) == (1, 1, 0, 0) diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py --- a/pypy/module/cpyext/weakrefobject.py +++ b/pypy/module/cpyext/weakrefobject.py @@ -1,6 +1,7 @@ from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyobject import PyObject, CANNOT_FAIL from pypy.module._weakref.interp__weakref import W_Weakref, proxy +from pypy.module._weakref.interp__weakref import W_Proxy, W_CallableProxy from rpython.rtyper.lltypesystem import rffi @cpython_api([PyObject, PyObject], PyObject) @@ -54,3 +55,34 @@ PyWeakref_GetObject() and Py_INCREF().) """ return space.call_function(w_ref) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRef(space, w_obj): + """Return true if ob is a reference object. + """ + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return (space.is_w(w_obj_type, w_type) or + space.issubtype_w(w_obj_type, w_type)) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRefExact(space, w_obj): + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return space.is_w(w_obj_type, w_type) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckProxy(space, w_obj): + """Return true if ob is a proxy object. + """ + w_obj_type = space.type(w_obj) + w_type1 = space.gettypeobject(W_Proxy.typedef) + w_type2 = space.gettypeobject(W_CallableProxy.typedef) + return space.is_w(w_obj_type, w_type1) or space.is_w(w_obj_type, w_type2) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_Check(space, w_obj): + """Return true if ob is either a reference or proxy object. + """ + return (PyWeakref_CheckRef(space, w_obj) or + PyWeakref_CheckProxy(space, w_obj)) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -2099,8 +2099,9 @@ Return a string of 'size' random bytes suitable for cryptographic use. """ context = get(space).random_context + signal_checker = space.getexecutioncontext().checksignals try: - return space.newbytes(rurandom.urandom(context, size)) + return space.newbytes(rurandom.urandom(context, n, signal_checker)) except OSError as e: # 'rurandom' should catch and retry internally if it gets EINTR # (at least in os.read(), which is probably enough in practice) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -7,12 +7,12 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.objectmodel import not_rpython +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform if sys.platform == 'win32': from rpython.rlib import rwin32 - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.rtyper.tool import rffi_platform eci = ExternalCompilationInfo( includes = ['windows.h', 'wincrypt.h'], @@ -56,7 +56,7 @@ return lltype.malloc(rffi.CArray(HCRYPTPROV), 1, immortal=True, zero=True) - def urandom(context, n): + def urandom(context, n, signal_checker=None): provider = context[0] if not provider: # This handle is never explicitly released. The operating @@ -80,11 +80,71 @@ def init_urandom(): return None - def urandom(context, n): + SYS_getrandom = None + + if sys.platform.startswith('linux'): + eci = ExternalCompilationInfo(includes=['sys/syscall.h']) + class CConfig: + _compilation_info_ = eci + SYS_getrandom = rffi_platform.DefinedConstantInteger( + 'SYS_getrandom') + globals().update(rffi_platform.configure(CConfig)) + + if SYS_getrandom is not None: + from rpython.rlib.rposix import get_saved_errno, handle_posix_error + import errno + + eci = eci.merge(ExternalCompilationInfo(includes=['linux/random.h'])) + class CConfig: + _compilation_info_ = eci + GRND_NONBLOCK = rffi_platform.ConstantInteger('GRND_NONBLOCK') + globals().update(rffi_platform.configure(CConfig)) + + # On Linux, use the syscall() function because the GNU libc doesn't + # expose the Linux getrandom() syscall yet. + syscall = rffi.llexternal( + 'syscall', + [lltype.Signed, rffi.CCHARP, rffi.LONG, rffi.INT], + lltype.Signed, + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) + + class Works: + status = True + getrandom_works = Works() + + def _getrandom(n, result, signal_checker): + if not getrandom_works.status: + return n + while n > 0: + with rffi.scoped_alloc_buffer(n) as buf: + got = syscall(SYS_getrandom, buf.raw, n, GRND_NONBLOCK) + if got >= 0: + s = buf.str(got) + result.append(s) + n -= len(s) + continue + err = get_saved_errno() + if (err == errno.ENOSYS or err == errno.EPERM or + err == errno.EAGAIN): # see CPython 3.5 + getrandom_works.status = False + return n + if err == errno.EINTR: + if signal_checker is not None: + signal_checker() + continue + handle_posix_error("getrandom", got) + raise AssertionError("unreachable") + return n + + def urandom(context, n, signal_checker=None): "Read n bytes from /dev/urandom." - result = '' - if n == 0: - return result + result = [] + if SYS_getrandom is not None: + n = _getrandom(n, result, signal_checker) + if n <= 0: + return ''.join(result) + # XXX should somehow cache the file descriptor. It's a mess. # CPython has a 99% solution and hopes for the remaining 1% # not to occur. For now, we just don't cache the file @@ -98,8 +158,8 @@ if e.errno != errno.EINTR: raise data = '' - result += data + result.append(data) n -= len(data) finally: os.close(fd) - return result + return ''.join(result) diff --git a/rpython/rlib/test/test_rurandom.py b/rpython/rlib/test/test_rurandom.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rurandom.py @@ -0,0 +1,12 @@ +from rpython.rlib import rurandom + +def test_rurandom(): + context = rurandom.init_urandom() + s = rurandom.urandom(context, 5000) + assert type(s) is str and len(s) == 5000 + for x in [1, 11, 111, 222]: + assert s.count(chr(x)) >= 1 + +def test_rurandom_no_syscall(monkeypatch): + monkeypatch.setattr(rurandom, 'SYS_getrandom', None) + test_rurandom() From pypy.commits at gmail.com Fri Dec 16 05:19:19 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 02:19:19 -0800 (PST) Subject: [pypy-commit] pypy py3.5: A more proper fix than 7d75f981d293: introduce and use the unwrap_spec Message-ID: <5853bfa7.46052e0a.14c27.601d@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89091:98017da1387f Date: 2016-12-16 11:18 +0100 http://bitbucket.org/pypy/pypy/changeset/98017da1387f/ Log: A more proper fix than 7d75f981d293: introduce and use the unwrap_spec 'text_or_None' (doesn't break randomly other things for now) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1567,7 +1567,9 @@ return self.buffer_w(w_obj, flags).as_str() def str_or_None_w(self, w_obj): - # FIXME: XXX for now, inconsistent with str_w() + return None if self.is_none(w_obj) else self.str_w(w_obj) + + def text_or_None_w(self, w_obj): return None if self.is_none(w_obj) else self.identifier_w(w_obj) def str_w(self, w_obj): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -156,6 +156,9 @@ def visit_str_or_None(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_text_or_None(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_str0(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -296,6 +299,9 @@ def visit_str_or_None(self, typ): self.run_args.append("space.str_or_None_w(%s)" % (self.scopenext(),)) + def visit_text_or_None(self, typ): + self.run_args.append("space.text_or_None_w(%s)" % (self.scopenext(),)) + def visit_str0(self, typ): self.run_args.append("space.str0_w(%s)" % (self.scopenext(),)) @@ -455,6 +461,9 @@ def visit_str_or_None(self, typ): self.unwrap.append("space.str_or_None_w(%s)" % (self.nextarg(),)) + def visit_text_or_None(self, typ): + self.unwrap.append("space.text_or_None_w(%s)" % (self.nextarg(),)) + def visit_str0(self, typ): self.unwrap.append("space.str0_w(%s)" % (self.nextarg(),)) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -206,7 +206,7 @@ # we ignore w_type and always return a bytearray return new_bytearray(space, space.w_bytearray, data) - @unwrap_spec(encoding='str_or_None', errors='str_or_None') + @unwrap_spec(encoding='text_or_None', errors='text_or_None') def descr_init(self, space, w_source=None, encoding=None, errors=None): assert isinstance(self, W_BytearrayObject) data = [c for c in newbytesdata_w(space, w_source, encoding, errors)] diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -526,7 +526,7 @@ return space.newlist_bytes(lst) @staticmethod - @unwrap_spec(encoding='str_or_None', errors='str_or_None') + @unwrap_spec(encoding='text_or_None', errors='text_or_None') def descr_new(space, w_stringtype, w_source=None, encoding=None, errors=None): if (w_source and space.is_w(w_stringtype, space.w_bytes) From pypy.commits at gmail.com Fri Dec 16 06:12:28 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 16 Dec 2016 03:12:28 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: remove some new wraps Message-ID: <5853cc1c.2a12190a.9e1d3.65c9@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89092:342d364c012a Date: 2016-12-16 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/342d364c012a/ Log: remove some new wraps diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -601,7 +601,7 @@ from pypy.module.cppyy import interp_cppyy cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) - return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + return space.newtext(c_stdstring2charp(space, cppstr._rawobject)) # setup pythonizations for later use at run-time _pythonizations = {} @@ -616,14 +616,14 @@ ] for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) + _pythonizations[f.__name__] = interp2app(f).spacebind(space) def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) + space.setattr(w_pycppclass, space.newtext(m1), + space.getattr(w_pycppclass, space.newtext(m2))) def pythonize(space, name, w_pycppclass): if name == "string": - space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + space.setattr(w_pycppclass, space.newtext("c_str"), _pythonizations["stdstring_c_str"]) _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") _method_alias(space, w_pycppclass, "__str__", "c_str") From pypy.commits at gmail.com Fri Dec 16 06:12:30 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 16 Dec 2016 03:12:30 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: merge default Message-ID: <5853cc1e.d4c4190a.97876.62ba@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89093:b799e1db856f Date: 2016-12-16 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/b799e1db856f/ Log: merge default diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz @@ -307,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -3,16 +3,17 @@ The cppyy module delivers dynamic Python-C++ bindings. It is designed for automation, high performance, scale, interactivity, and -handling all of modern C++. +handling all of modern C++ (11, 14, etc.). It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ reflection and interactivity. Reflection information is extracted from C++ header files. Cppyy itself is built into PyPy (an alternative exists for CPython), but -it requires a backend, installable through pip, to interface with Cling. +it requires a `backend`_, installable through pip, to interface with Cling. .. _Cling: https://root.cern.ch/cling .. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation @@ -22,25 +23,39 @@ module, which is no longer supported. Both the tooling and user-facing Python codes are very backwards compatible, however. -Further dependencies are cmake (for general build) and Python2.7 (for LLVM). +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). Assuming you have a recent enough version of PyPy installed, use pip to complete the installation of cppyy:: - $ pypy-c -m pip install PyPy-cppyy-backend + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. The building process may take quite some time as it includes a customized -version of LLVM as part of Cling. +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. + +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -54,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -110,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -212,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -271,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -567,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -587,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -665,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -676,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -772,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -76,11 +76,14 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -45,3 +45,14 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,7 +1,13 @@ -import py +import py, sys @py.test.mark.tryfirst def pytest_runtest_setup(item): + if 'linux' in sys.platform: + # tests require minimally std=c++11 + cc_info = py.process.cmdexec('gcc -v --help') + if not '-std=c++11' in cc_info: + py.test.skip('skipping tests because gcc does not support C++11') + if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -379,103 +379,97 @@ if error is _NOT_SPECIFIED: raise ValueError("function %s has no return value for exceptions" % func) - def make_unwrapper(catch_exception): - # ZZZ is this whole logic really needed??? It seems to be only - # for RPython code calling PyXxx() functions directly. I would - # think that usually directly calling the function is clean - # enough now - names = api_function.argnames - types_names_enum_ui = unrolling_iterable(enumerate( - zip(api_function.argtypes, - [tp_name.startswith("w_") for tp_name in names]))) + names = api_function.argnames + types_names_enum_ui = unrolling_iterable(enumerate( + zip(api_function.argtypes, + [tp_name.startswith("w_") for tp_name in names]))) - @specialize.ll() - def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj - from pypy.module.cpyext.pyobject import from_ref, as_pyobj - newargs = () - keepalives = () - assert len(args) == len(api_function.argtypes) - for i, (ARG, is_wrapped) in types_names_enum_ui: - input_arg = args[i] - if is_PyObject(ARG) and not is_wrapped: - # build a 'PyObject *' (not holding a reference) - if not is_pyobj(input_arg): - keepalives += (input_arg,) - arg = rffi.cast(ARG, as_pyobj(space, input_arg)) - else: - arg = rffi.cast(ARG, input_arg) - elif ARG == rffi.VOIDP and not is_wrapped: - # unlike is_PyObject case above, we allow any kind of - # argument -- just, if it's an object, we assume the - # caller meant for it to become a PyObject*. - if input_arg is None or isinstance(input_arg, W_Root): - keepalives += (input_arg,) - arg = rffi.cast(ARG, as_pyobj(space, input_arg)) - else: - arg = rffi.cast(ARG, input_arg) - elif (is_PyObject(ARG) or ARG == rffi.VOIDP) and is_wrapped: - # build a W_Root, possibly from a 'PyObject *' - if is_pyobj(input_arg): - arg = from_ref(space, input_arg) - else: - arg = input_arg + @specialize.ll() + def unwrapper(space, *args): + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj + from pypy.module.cpyext.pyobject import from_ref, as_pyobj + newargs = () + keepalives = () + assert len(args) == len(api_function.argtypes) + for i, (ARG, is_wrapped) in types_names_enum_ui: + input_arg = args[i] + if is_PyObject(ARG) and not is_wrapped: + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif ARG == rffi.VOIDP and not is_wrapped: + # unlike is_PyObject case above, we allow any kind of + # argument -- just, if it's an object, we assume the + # caller meant for it to become a PyObject*. + if input_arg is None or isinstance(input_arg, W_Root): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif (is_PyObject(ARG) or ARG == rffi.VOIDP) and is_wrapped: + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_ref(space, input_arg) + else: + arg = input_arg - ## ZZZ: for is_pyobj: - ## try: - ## arg = from_ref(space, - ## rffi.cast(PyObject, input_arg)) - ## except TypeError, e: - ## err = oefmt(space.w_TypeError, - ## "could not cast arg to PyObject") - ## if not catch_exception: - ## raise err - ## state = space.fromcache(State) - ## state.set_exception(err) - ## if is_PyObject(restype): - ## return None - ## else: - ## return api_function.error_value - else: - # arg is not declared as PyObject, no magic - arg = input_arg - newargs += (arg, ) - if not catch_exception: - try: - res = func(space, *newargs) - finally: - keepalive_until_here(*keepalives) + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = oefmt(space.w_TypeError, + ## "could not cast arg to PyObject") + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # non-rpython variant - assert not we_are_translated() - try: - res = func(space, *newargs) - except OperationError as e: - if not hasattr(api_function, "error_value"): - raise - state = space.fromcache(State) - state.set_exception(e) - if is_PyObject(restype): - return None - else: - return api_function.error_value - # 'keepalives' is alive here (it's not rpython) - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer, ( - 'got %r not integer' % (res,)) - return res - unwrapper.func = func - unwrapper.api_func = api_function - return unwrapper + # arg is not declared as PyObject, no magic + arg = input_arg + newargs += (arg, ) + try: + return func(space, *newargs) + finally: + keepalive_until_here(*keepalives) - unwrapper_catch = make_unwrapper(True) - unwrapper_raise = make_unwrapper(False) + unwrapper.func = func + unwrapper.api_func = api_function + + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now + def unwrapper_catch(space, *args): + try: + res = unwrapper(space, *args) + except OperationError as e: + if not hasattr(api_function, "error_value"): + raise + state = space.fromcache(State) + state.set_exception(e) + if is_PyObject(restype): + return None + else: + return api_function.error_value + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res + if header is not None: if header == DEFAULT_HEADER: FUNCTIONS[func_name] = api_function FUNCTIONS_BY_HEADER.setdefault(header, {})[func_name] = api_function - INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests - return unwrapper_raise # used in 'normal' RPython code. + INTERPLEVEL_API[func_name] = unwrapper_catch # used in tests + return unwrapper # used in 'normal' RPython code. return decorate def cpython_struct(name, fields, forward=None, level=1): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -7,7 +7,7 @@ from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject, - INTERPLEVEL_API, PyVarObject) + PyVarObject) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject @@ -245,12 +245,10 @@ else: return lltype.nullptr(PyObject.TO) as_pyobj._always_inline_ = 'try' -INTERPLEVEL_API['as_pyobj'] = as_pyobj def pyobj_has_w_obj(pyobj): w_obj = rawrefcount.to_obj(W_Root, pyobj) return w_obj is not None and w_obj is not w_marker_deallocating -INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) def is_pyobj(x): @@ -260,7 +258,6 @@ return True else: raise TypeError(repr(type(x))) -INTERPLEVEL_API['is_pyobj'] = staticmethod(is_pyobj) class Entry(ExtRegistryEntry): _about_ = is_pyobj @@ -286,7 +283,6 @@ if not is_pyobj(obj): keepalive_until_here(obj) return pyobj -INTERPLEVEL_API['make_ref'] = make_ref @specialize.ll() @@ -307,13 +303,11 @@ assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY keepalive_until_here(w_obj) return w_obj -INTERPLEVEL_API['get_w_obj_and_decref'] = get_w_obj_and_decref @specialize.ll() def incref(space, obj): make_ref(space, obj) -INTERPLEVEL_API['incref'] = incref @specialize.ll() def decref(space, obj): @@ -326,7 +320,6 @@ _Py_Dealloc(space, obj) else: get_w_obj_and_decref(space, obj) -INTERPLEVEL_API['decref'] = decref @cpython_api([PyObject], lltype.Void) diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -2210,21 +2210,3 @@ it causes an exception to immediately be thrown; this is used for the throw() methods of generator objects.""" raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_Check(space, ob): - """Return true if ob is either a reference or proxy object. - """ - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckRef(space, ob): - """Return true if ob is a reference object. - """ - raise NotImplementedError - - at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) -def PyWeakref_CheckProxy(space, ob): - """Return true if ob is a proxy object. - """ - raise NotImplementedError diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1183,9 +1183,19 @@ Base1->tp_basicsize = sizeof(PyHeapTypeObject); Base2->tp_basicsize = sizeof(PyHeapTypeObject); Base12->tp_basicsize = sizeof(PyHeapTypeObject); + #ifndef PYPY_VERSION /* PyHeapTypeObject has no ht_qualname on PyPy */ + #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3 + { + PyObject * dummyname = PyBytes_FromString("dummy name"); + ((PyHeapTypeObject*)Base1)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base2)->ht_qualname = dummyname; + ((PyHeapTypeObject*)Base12)->ht_qualname = dummyname; + } + #endif + #endif Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - Base12->tp_flags = Py_TPFLAGS_DEFAULT; + Base12->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; Base12->tp_base = Base1; Base12->tp_bases = PyTuple_Pack(2, Base1, Base2); Base12->tp_doc = "The Base12 type or object"; diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py --- a/pypy/module/cpyext/test/test_weakref.py +++ b/pypy/module/cpyext/test/test_weakref.py @@ -56,3 +56,30 @@ ) ]) module.test_macro_cast() + + def test_weakref_check(self): + module = self.import_extension('foo', [ + ("test_weakref_cast", "METH_O", + """ + return Py_BuildValue("iiii", + (int)PyWeakref_Check(args), + (int)PyWeakref_CheckRef(args), + (int)PyWeakref_CheckRefExact(args), + (int)PyWeakref_CheckProxy(args)); + """ + ) + ]) + import weakref + def foo(): pass + class Bar(object): + pass + bar = Bar() + assert module.test_weakref_cast([]) == (0, 0, 0, 0) + assert module.test_weakref_cast(weakref.ref(foo)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.ref(bar)) == (1, 1, 1, 0) + assert module.test_weakref_cast(weakref.proxy(foo)) == (1, 0, 0, 1) + assert module.test_weakref_cast(weakref.proxy(bar)) == (1, 0, 0, 1) + class X(weakref.ref): + pass + assert module.test_weakref_cast(X(foo)) == (1, 1, 0, 0) + assert module.test_weakref_cast(X(bar)) == (1, 1, 0, 0) diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py --- a/pypy/module/cpyext/weakrefobject.py +++ b/pypy/module/cpyext/weakrefobject.py @@ -1,6 +1,7 @@ from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject +from pypy.module.cpyext.pyobject import PyObject, CANNOT_FAIL from pypy.module._weakref.interp__weakref import W_Weakref, proxy +from pypy.module._weakref.interp__weakref import W_Proxy, W_CallableProxy from rpython.rtyper.lltypesystem import rffi @cpython_api([PyObject, PyObject], PyObject) @@ -54,3 +55,34 @@ PyWeakref_GetObject() and Py_INCREF().) """ return space.call_function(w_ref) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRef(space, w_obj): + """Return true if ob is a reference object. + """ + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return (space.is_w(w_obj_type, w_type) or + space.issubtype_w(w_obj_type, w_type)) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckRefExact(space, w_obj): + w_obj_type = space.type(w_obj) + w_type = space.gettypeobject(W_Weakref.typedef) + return space.is_w(w_obj_type, w_type) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_CheckProxy(space, w_obj): + """Return true if ob is a proxy object. + """ + w_obj_type = space.type(w_obj) + w_type1 = space.gettypeobject(W_Proxy.typedef) + w_type2 = space.gettypeobject(W_CallableProxy.typedef) + return space.is_w(w_obj_type, w_type1) or space.is_w(w_obj_type, w_type2) + + at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) +def PyWeakref_Check(space, w_obj): + """Return true if ob is either a reference or proxy object. + """ + return (PyWeakref_CheckRef(space, w_obj) or + PyWeakref_CheckProxy(space, w_obj)) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1332,8 +1332,9 @@ Return a string of n random bytes suitable for cryptographic use. """ context = get(space).random_context + signal_checker = space.getexecutioncontext().checksignals try: - return space.newbytes(rurandom.urandom(context, n)) + return space.newbytes(rurandom.urandom(context, n, signal_checker)) except OSError as e: raise wrap_oserror(space, e) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -7,12 +7,12 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.objectmodel import not_rpython +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.tool import rffi_platform if sys.platform == 'win32': from rpython.rlib import rwin32 - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.rtyper.tool import rffi_platform eci = ExternalCompilationInfo( includes = ['windows.h', 'wincrypt.h'], @@ -56,7 +56,7 @@ return lltype.malloc(rffi.CArray(HCRYPTPROV), 1, immortal=True, zero=True) - def urandom(context, n): + def urandom(context, n, signal_checker=None): provider = context[0] if not provider: # This handle is never explicitly released. The operating @@ -80,11 +80,71 @@ def init_urandom(): return None - def urandom(context, n): + SYS_getrandom = None + + if sys.platform.startswith('linux'): + eci = ExternalCompilationInfo(includes=['sys/syscall.h']) + class CConfig: + _compilation_info_ = eci + SYS_getrandom = rffi_platform.DefinedConstantInteger( + 'SYS_getrandom') + globals().update(rffi_platform.configure(CConfig)) + + if SYS_getrandom is not None: + from rpython.rlib.rposix import get_saved_errno, handle_posix_error + import errno + + eci = eci.merge(ExternalCompilationInfo(includes=['linux/random.h'])) + class CConfig: + _compilation_info_ = eci + GRND_NONBLOCK = rffi_platform.ConstantInteger('GRND_NONBLOCK') + globals().update(rffi_platform.configure(CConfig)) + + # On Linux, use the syscall() function because the GNU libc doesn't + # expose the Linux getrandom() syscall yet. + syscall = rffi.llexternal( + 'syscall', + [lltype.Signed, rffi.CCHARP, rffi.LONG, rffi.INT], + lltype.Signed, + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) + + class Works: + status = True + getrandom_works = Works() + + def _getrandom(n, result, signal_checker): + if not getrandom_works.status: + return n + while n > 0: + with rffi.scoped_alloc_buffer(n) as buf: + got = syscall(SYS_getrandom, buf.raw, n, GRND_NONBLOCK) + if got >= 0: + s = buf.str(got) + result.append(s) + n -= len(s) + continue + err = get_saved_errno() + if (err == errno.ENOSYS or err == errno.EPERM or + err == errno.EAGAIN): # see CPython 3.5 + getrandom_works.status = False + return n + if err == errno.EINTR: + if signal_checker is not None: + signal_checker() + continue + handle_posix_error("getrandom", got) + raise AssertionError("unreachable") + return n + + def urandom(context, n, signal_checker=None): "Read n bytes from /dev/urandom." - result = '' - if n == 0: - return result + result = [] + if SYS_getrandom is not None: + n = _getrandom(n, result, signal_checker) + if n <= 0: + return ''.join(result) + # XXX should somehow cache the file descriptor. It's a mess. # CPython has a 99% solution and hopes for the remaining 1% # not to occur. For now, we just don't cache the file @@ -98,8 +158,8 @@ if e.errno != errno.EINTR: raise data = '' - result += data + result.append(data) n -= len(data) finally: os.close(fd) - return result + return ''.join(result) diff --git a/rpython/rlib/test/test_rurandom.py b/rpython/rlib/test/test_rurandom.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rurandom.py @@ -0,0 +1,12 @@ +from rpython.rlib import rurandom + +def test_rurandom(): + context = rurandom.init_urandom() + s = rurandom.urandom(context, 5000) + assert type(s) is str and len(s) == 5000 + for x in [1, 11, 111, 222]: + assert s.count(chr(x)) >= 1 + +def test_rurandom_no_syscall(monkeypatch): + monkeypatch.setattr(rurandom, 'SYS_getrandom', None) + test_rurandom() From pypy.commits at gmail.com Fri Dec 16 06:36:11 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 03:36:11 -0800 (PST) Subject: [pypy-commit] pypy py3.5: test and fix for bytes.__rmod__ Message-ID: <5853d1ab.0e1f190a.77dbe.676b@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89094:96366ab75bb6 Date: 2016-12-16 12:35 +0100 http://bitbucket.org/pypy/pypy/changeset/96366ab75bb6/ Log: test and fix for bytes.__rmod__ diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -684,6 +684,8 @@ return mod_format(space, self, w_values, fmt_type=FORMAT_BYTES) def descr_rmod(self, space, w_values): + if not isinstance(w_values, W_AbstractBytesObject): + return space.w_NotImplemented return mod_format(space, w_values, self, fmt_type=FORMAT_BYTES) @staticmethod diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -170,6 +170,8 @@ expected2 = int_format + ' format: an integer is required, not str' assert str(exc_info.value) in (expected1, expected2) raises(TypeError, "None % 'abc'") # __rmod__ + assert b'abc'.__rmod__('-%b-') is NotImplemented + assert b'abc'.__rmod__(b'-%b-') == b'-abc-' def test_format_bytes(self): assert b'<%s>' % b'abc' == b'' From pypy.commits at gmail.com Fri Dec 16 06:54:57 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 03:54:57 -0800 (PST) Subject: [pypy-commit] pypy py3.5: fix for tests Message-ID: <5853d611.8dcd190a.9aa2e.678a@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89095:31775eeb3581 Date: 2016-12-16 12:54 +0100 http://bitbucket.org/pypy/pypy/changeset/31775eeb3581/ Log: fix for tests diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -349,6 +349,9 @@ def is_generator(self, w_obj): return NonConstant(False) + def is_iterable(self, w_obj): + return NonConstant(False) + def lookup_in_type(self, w_type, name): return w_some_obj() From pypy.commits at gmail.com Fri Dec 16 07:56:27 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 04:56:27 -0800 (PST) Subject: [pypy-commit] pypy default: Flow space: "raise x" now explicitly asserts that x is not a None Message-ID: <5853e47b.0a4b2e0a.bbe5e.68d5@mx.google.com> Author: Armin Rigo Branch: Changeset: r89096:2fbea2e90463 Date: 2016-12-16 13:40 +0100 http://bitbucket.org/pypy/pypy/changeset/2fbea2e90463/ Log: Flow space: "raise x" now explicitly asserts that x is not a None diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -231,6 +231,12 @@ v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, s_ImpossibleValue) + v = graph.exceptblock.inputargs[1] + if v.annotation is not None and v.annotation.can_be_none(): + raise annmodel.AnnotatorError( + "%r is found by annotation to possibly raise None, " + "but the None was not suppressed by the flow space" % + (graph,)) def validate(self): """Check that the annotation results are valid""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -484,6 +484,9 @@ def __init__(self, classdefs): self.classdefs = classdefs + def can_be_none(self): + return False + def as_SomeInstance(self): return unionof(*[SomeInstance(cdef) for cdef in self.classdefs]) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4652,6 +4652,17 @@ assert ('string formatting requires a constant string/unicode' in str(e.value)) + def test_cannot_raise_none(self): + def f(x): + s = None + if x > 5: + s = ValueError() + raise s + a = self.RPythonAnnotator() + a.build_types(f, [int]) + s_exc = a.binding(graphof(a, f).exceptblock.inputargs[1]) + assert not s_exc.can_be_none() + def g(n): return [0, 1, 2, n] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -597,6 +597,8 @@ Returns an FSException object whose w_value is an instance of w_type. """ + from rpython.rlib.debug import ll_assert_not_none + w_is_type = op.isinstance(w_arg1, const(type)).eval(self) if self.guessbool(w_is_type): # this is for all cases of the form (Class, something) @@ -618,6 +620,7 @@ "separate value") raise Raise(const(exc)) w_value = w_arg1 + w_value = op.simple_call(const(ll_assert_not_none), w_value).eval(self) w_type = op.type(w_value).eval(self) return FSException(w_type, w_value) diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -11,7 +11,8 @@ # Expose these here (public interface) from rpython.rtyper.debug import ( - ll_assert, FatalError, fatalerror, fatalerror_notb, debug_print_traceback) + ll_assert, FatalError, fatalerror, fatalerror_notb, debug_print_traceback, + ll_assert_not_none) class DebugLog(list): diff --git a/rpython/rtyper/debug.py b/rpython/rtyper/debug.py --- a/rpython/rtyper/debug.py +++ b/rpython/rtyper/debug.py @@ -20,6 +20,26 @@ hop.exception_cannot_occur() hop.genop('debug_assert', vlist) +def ll_assert_not_none(x): + """assert x is not None""" + assert x, "ll_assert_not_none(%r)" % (x,) + return x + +class Entry(ExtRegistryEntry): + _about_ = ll_assert_not_none + + def compute_result_annotation(self, s_x): + return s_x.nonnoneify() + + def specialize_call(self, hop): + [v0] = hop.inputargs(hop.args_r[0]) + assert isinstance(v0.concretetype, lltype.Ptr) + v1 = hop.genop('ptr_nonzero', [v0], resulttype=lltype.Bool) + hop.exception_cannot_occur() + cmsg = hop.inputconst(lltype.Void, "ll_assert_not_none failed") + hop.genop('debug_assert', [v1, cmsg]) + return v0 + class FatalError(Exception): pass From pypy.commits at gmail.com Fri Dec 16 09:01:02 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 06:01:02 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: A branch to track space-newtext in the py3.5 world Message-ID: <5853f39e.4fcb190a.8c89e.6d06@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89097:58d1d9b712b7 Date: 2016-12-16 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/58d1d9b712b7/ Log: A branch to track space-newtext in the py3.5 world From pypy.commits at gmail.com Fri Dec 16 09:01:04 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 06:01:04 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: Start Message-ID: <5853f3a0.5038190a.91e5f.6cad@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89098:e5f85b6b5bbf Date: 2016-12-16 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/e5f85b6b5bbf/ Log: Start diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -6,7 +6,7 @@ from rpython.rlib.buffer import StringBuffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id, specialize) + compute_unique_id, specialize, not_rpython) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX @@ -255,6 +255,9 @@ def identifier_w(self, space): self._typed_unwrap_error(space, "string") + def text_w(self, space): + self._typed_unwrap_error(space, "string") + def bytearray_list_of_chars_w(self, space): self._typed_unwrap_error(space, "bytearray") @@ -1570,18 +1573,20 @@ return None if self.is_none(w_obj) else self.str_w(w_obj) def text_or_None_w(self, w_obj): - return None if self.is_none(w_obj) else self.identifier_w(w_obj) + return None if self.is_none(w_obj) else self.text_w(w_obj) + @not_rpython def str_w(self, w_obj): """ - if w_obj is unicode, call identifier_w() (i.e., return the UTF-8 + if w_obj is unicode, call text_w() (i.e., return the UTF-8-nosg encoded string). Else, call bytes_w(). - Maybe we should kill str_w completely and manually substitute it with - identifier_w/bytes_w at all call sites? + We should kill str_w completely and manually substitute it with + text_w/identifier_w/bytes_w at all call sites. It remains for + now for tests only. """ if self.isinstance_w(w_obj, self.w_unicode): - return w_obj.identifier_w(self) + return w_obj.text_w(self) else: return w_obj.bytes_w(self) @@ -1660,11 +1665,22 @@ raise oefmt(self.w_TypeError, "argument must be a unicode") return self.unicode_w(w_obj) + def text_w(self, w_obj): + """ + Unwrap a unicode object and return a 'utf-8-nosg' byte string + ('no surrogate'). This encoding always works and is in one-to- + one correspondance with the unicode. + """ + return w_obj.text_w(self) + def identifier_w(self, w_obj): """ Unwrap an object which is used as an identifier (i.e. names of variables, methdods, functions, classes etc.). In py3k, identifiers are unicode strings and are unwrapped as UTF-8 encoded byte strings. + This differs from space.text_w() because it raises an app-level + UnicodeEncodeError if the unicode string contains surrogates. + This corresponds exactly to 'str.encode(obj, "utf-8")' at app-level. """ return w_obj.identifier_w(self) diff --git a/pypy/module/__pypy__/interp_stderrprinter.py b/pypy/module/__pypy__/interp_stderrprinter.py --- a/pypy/module/__pypy__/interp_stderrprinter.py +++ b/pypy/module/__pypy__/interp_stderrprinter.py @@ -34,8 +34,8 @@ return space.wrap(res) def descr_write(self, space, w_data): - # Encode to UTF-8. - data = space.identifier_w(w_data) + # Encode to UTF-8-nosg. + data = space.text_w(w_data) try: n = os.write(self.fd, data) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -30,6 +30,16 @@ space.w_unicode, "__new__", space.w_unicode, w_uni) assert w_new is w_uni + def test_identifier_or_text_w(self): + space = self.space + w_uni = space.wrap(u'abcd') + assert space.identifier_w(w_uni) == 'abcd' + assert space.text_w(w_uni) == 'abcd' + w_uni = space.wrap(unichr(0xd921) + unichr(0xdddd)) + space.raises_w(space.w_UnicodeEncodeError, space.identifier_w, w_uni) + assert space.text_w(w_uni) == '\xed\xa4\xa1\xed\xb7\x9d' + # ^^^ and not the 4-bytes combined character + class AppTestUnicodeStringStdOnly: def test_compares(self): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1073,7 +1073,7 @@ "__slots__ items must be strings, not '%T'", w_name) if not _isidentifier(space.unicode_w(w_name)): raise oefmt(space.w_TypeError, "__slots__ must be identifiers") - return w_name.identifier_w(space) + return w_name.text_w(space) def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): from pypy.objspace.std.listobject import StringSort diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -6,7 +6,9 @@ from rpython.rlib.rstring import StringBuilder, UnicodeBuilder from rpython.rlib.runicode import ( make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, - unicode_encode_ascii, unicode_encode_utf_8, fast_str_decode_ascii) + unicode_encode_ascii, unicode_encode_utf_8, fast_str_decode_ascii, + unicode_encode_utf8sp, unicode_encode_utf8_forbid_surrogates, + SurrogateError) from rpython.rlib import jit from pypy.interpreter import unicodehelper @@ -77,24 +79,35 @@ def unicode_w(self, space): return self._value - def identifier_w(self, space): + def _identifier_or_text_w(self, space, ignore_sg): try: identifier = jit.conditional_call_elidable( self._utf8, g_encode_utf8, self._value) if not jit.isconstant(self): self._utf8 = identifier - except UnicodeEncodeError: - # bah, this is just to get an official app-level - # UnicodeEncodeError + except SurrogateError: + # If 'ignore_sg' is False, this logic is here only + # to get an official app-level UnicodeEncodeError. + # If 'ignore_sg' is True, we encode instead using + # unicode_encode_utf8sp(). u = self._value - eh = unicodehelper.rpy_encode_error_handler() - try: - identifier = unicode_encode_utf_8(u, len(u), None, - errorhandler=eh) - except unicodehelper.RUnicodeEncodeError as ue: - raise wrap_encode_error(space, ue) + if ignore_sg: + identifier = unicode_encode_utf8sp(u, len(u)) + else: + eh = unicodehelper.rpy_encode_error_handler() + try: + identifier = unicode_encode_utf_8(u, len(u), None, + errorhandler=eh) + except unicodehelper.RUnicodeEncodeError as ue: + raise wrap_encode_error(space, ue) return identifier + def text_w(self, space): + return self._identifier_or_text_w(space, ignore_sg=True) + + def identifier_w(self, space): + return self._identifier_or_text_w(space, ignore_sg=False) + def listview_unicode(self): return _create_list_from_unicode(self._value) @@ -1279,7 +1292,7 @@ @jit.elidable def g_encode_utf8(value): """This is a global function because of jit.conditional_call_value""" - return value.encode('utf-8') + return unicode_encode_utf8_forbid_surrogates(value, len(value)) _repr_function, _ = make_unicode_escape_function( pass_printable=True, unicode_output=True, quotes=True, prefix='') diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -428,6 +428,37 @@ _encodeUCS4(result, ch) return result.build() +class SurrogateError(Exception): + pass + +def unicode_encode_utf8_forbid_surrogates(s, size): + # Strict surrogate-forbidding utf-8 encoding. Any surrogate character + # raises an interp-level SurrogateError, even on 16-bit hosts. + # --- XXX check in detail what occurs on 16-bit hosts in PyPy 3 --- + assert(size >= 0) + result = StringBuilder(size) + pos = 0 + while pos < size: + ch = ord(s[pos]) + pos += 1 + if ch < 0x80: + # Encode ASCII + result.append(chr(ch)) + elif ch < 0x0800: + # Encode Latin-1 + result.append(chr((0xc0 | (ch >> 6)))) + result.append(chr((0x80 | (ch & 0x3f)))) + elif ch < 0x10000: + if 0xD800 <= ch <= 0xDFFF: + raise SurrogateError + # Encode UCS2 Unicode ordinals + result.append((chr((0xe0 | (ch >> 12))))) + result.append((chr((0x80 | ((ch >> 6) & 0x3f))))) + result.append((chr((0x80 | (ch & 0x3f))))) + else: + _encodeUCS4(result, ch) + return result.build() + # ____________________________________________________________ # utf-16 From pypy.commits at gmail.com Fri Dec 16 09:01:07 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 16 Dec 2016 06:01:07 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: Replace identifier_w() with text_w() in some places where we don't really Message-ID: <5853f3a3.12092e0a.f1876.6ebe@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89099:cda3e3f5f13e Date: 2016-12-16 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/cda3e3f5f13e/ Log: Replace identifier_w() with text_w() in some places where we don't really want the "there-are-no-surrogates" check diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -404,7 +404,7 @@ i = 0 for w_key in keys_w: try: - key = space.identifier_w(w_key) + key = space.text_w(w_key) except OperationError as e: if e.match(space, space.w_TypeError): raise oefmt(space.w_TypeError, @@ -595,7 +595,7 @@ except IndexError: name = '?' else: - name = space.identifier_w(w_name) + name = space.text_w(w_name) break self.kwd_name = name diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1522,7 +1522,7 @@ if self.isinstance_w(w_obj, self.w_str): return StringBuffer(w_obj.bytes_w(self)) if self.isinstance_w(w_obj, self.w_unicode): - return StringBuffer(w_obj.identifier_w(self)) + return StringBuffer(w_obj.identifier_w(self)) # no surrogates try: return w_obj.buffer_w(self, self.BUF_SIMPLE) except BufferInterfaceNotFound: @@ -1531,7 +1531,7 @@ if self.isinstance_w(w_obj, self.w_str): return w_obj.bytes_w(self) if self.isinstance_w(w_obj, self.w_unicode): - return w_obj.identifier_w(self) + return w_obj.identifier_w(self) # no surrogates (forbidden) try: return w_obj.buffer_w(self, self.BUF_SIMPLE).as_str() except BufferInterfaceNotFound: @@ -1681,6 +1681,7 @@ This differs from space.text_w() because it raises an app-level UnicodeEncodeError if the unicode string contains surrogates. This corresponds exactly to 'str.encode(obj, "utf-8")' at app-level. + (XXX check what occurs on narrow builds or kill narrow builds!) """ return w_obj.identifier_w(self) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -465,7 +465,7 @@ return self.getcode().co_consts_w[index] def getname_u(self, index): - return self.space.identifier_w(self.getname_w(index)) + return self.space.text_w(self.getname_w(index)) def getname_w(self, index): return self.getcode().co_names_w[index] @@ -899,7 +899,7 @@ def LOAD_NAME(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - varname = self.space.identifier_w(w_varname) + varname = self.space.text_w(w_varname) if self.getorcreatedebug().w_locals is not self.get_w_globals(): w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, varname) @@ -929,7 +929,7 @@ @always_inline def LOAD_GLOBAL(self, nameindex, next_instr): w_varname = self.getname_w(nameindex) - w_value = self._load_global(self.space.identifier_w(w_varname)) + w_value = self._load_global(self.space.text_w(w_varname)) if w_value is None: self._load_global_failed(w_varname) self.pushvalue(w_value) @@ -1257,7 +1257,7 @@ break w_value = self.popvalue() w_key = self.popvalue() - key = self.space.identifier_w(w_key) + key = self.space.text_w(w_key) keywords[n_keywords] = key keywords_w[n_keywords] = w_value else: From pypy.commits at gmail.com Fri Dec 16 09:13:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 16 Dec 2016 06:13:03 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: fix some wrong newbytes in baseobjspace.py. thanks armin Message-ID: <5853f66f.43df190a.bb615.6eb5@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89100:d77161178f39 Date: 2016-12-16 15:12 +0100 http://bitbucket.org/pypy/pypy/changeset/d77161178f39/ Log: fix some wrong newbytes in baseobjspace.py. thanks armin diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1193,7 +1193,7 @@ return w_res def call_method(self, w_obj, methname, *arg_w): - w_meth = self.getattr(w_obj, self.newbytes(methname)) + w_meth = self.getattr(w_obj, self.newtext(methname)) return self.call_function(w_meth, *arg_w) def raise_key_error(self, w_key): @@ -1202,7 +1202,7 @@ def lookup(self, w_obj, name): w_type = self.type(w_obj) - w_mro = self.getattr(w_type, self.newbytes("__mro__")) + w_mro = self.getattr(w_type, self.newtext("__mro__")) for w_supertype in self.fixedview(w_mro): w_value = w_supertype.getdictvalue(self, name) if w_value is not None: @@ -1223,7 +1223,7 @@ if self.is_oldstyle_instance(w_obj): # ugly old style class special treatment, but well ... try: - self.getattr(w_obj, self.newbytes("__call__")) + self.getattr(w_obj, self.newtext("__call__")) return self.w_True except OperationError as e: if not e.match(self, self.w_AttributeError): @@ -1235,7 +1235,7 @@ def issequence_w(self, w_obj): if self.is_oldstyle_instance(w_obj): - return (self.findattr(w_obj, self.newbytes('__getitem__')) is not None) + return (self.findattr(w_obj, self.newtext('__getitem__')) is not None) flag = self.type(w_obj).flag_map_or_seq if flag == 'M': return False @@ -1246,7 +1246,7 @@ def ismapping_w(self, w_obj): if self.is_oldstyle_instance(w_obj): - return (self.findattr(w_obj, self.newbytes('__getitem__')) is not None) + return (self.findattr(w_obj, self.newtext('__getitem__')) is not None) flag = self.type(w_obj).flag_map_or_seq if flag == 'M': return True @@ -1327,7 +1327,7 @@ hidden_applevel=hidden_applevel) if not isinstance(statement, PyCode): raise TypeError('space.exec_(): expected a string, code or PyCode object') - w_key = self.newbytes('__builtins__') + w_key = self.newtext('__builtins__') if not self.contains_w(w_globals, w_key): self.setitem(w_globals, w_key, self.builtin) return statement.exec_code(self, w_globals, w_locals) @@ -1808,7 +1808,7 @@ if (not self.isinstance_w(w_fd, self.w_int) and not self.isinstance_w(w_fd, self.w_long)): try: - w_fileno = self.getattr(w_fd, self.newbytes("fileno")) + w_fileno = self.getattr(w_fd, self.newtext("fileno")) except OperationError as e: if e.match(self, self.w_AttributeError): raise oefmt(self.w_TypeError, From pypy.commits at gmail.com Fri Dec 16 10:53:59 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 16 Dec 2016 07:53:59 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: gave up finding the root cause of this translation problem, and instead enforce Message-ID: <58540e17.c200190a.7606c.7617@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89101:4d6b02c8898d Date: 2016-12-16 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/4d6b02c8898d/ Log: gave up finding the root cause of this translation problem, and instead enforce some W_Root types in gateway.py. (why things have to be None is another sad story) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -27,6 +27,9 @@ from rpython.rlib.rarithmetic import r_longlong, r_int, r_ulonglong, r_uint from rpython.tool.sourcetools import func_with_new_name, compile2 +from rpython.rlib.signature import signature, finishsigs +from rpython.rlib import types as sigtypes + # internal non-translatable parts: class SignatureBuilder(object): @@ -795,11 +798,17 @@ w_result = space.w_None return w_result +w_root_or_none = sigtypes.instance(W_Root, can_be_None=True) + at finishsigs class BuiltinCode1(BuiltinCode): _immutable_ = True fast_natural_arity = 1 + @signature(sigtypes.self(), sigtypes.any(), + w_root_or_none, + w_root_or_none, + returns=w_root_or_none) def fastcall_1(self, space, w_func, w1): try: w_result = self.fastfunc_1(space, w1) @@ -816,10 +825,16 @@ return w_result + at finishsigs class BuiltinCode2(BuiltinCode): _immutable_ = True fast_natural_arity = 2 + @signature(sigtypes.self(), sigtypes.any(), + w_root_or_none, + w_root_or_none, + w_root_or_none, + returns=w_root_or_none) def fastcall_2(self, space, w_func, w1, w2): try: w_result = self.fastfunc_2(space, w1, w2) @@ -836,10 +851,17 @@ return w_result + at finishsigs class BuiltinCode3(BuiltinCode): _immutable_ = True fast_natural_arity = 3 + @signature(sigtypes.self(), sigtypes.any(), + w_root_or_none, + w_root_or_none, + w_root_or_none, + w_root_or_none, + returns=w_root_or_none) def fastcall_3(self, space, func, w1, w2, w3): try: w_result = self.fastfunc_3(space, w1, w2, w3) @@ -855,12 +877,20 @@ w_result = space.w_None return w_result - + at finishsigs class BuiltinCode4(BuiltinCode): _immutable_ = True fast_natural_arity = 4 + @signature(sigtypes.self(), sigtypes.any(), + w_root_or_none, + w_root_or_none, + w_root_or_none, + w_root_or_none, + w_root_or_none, + returns=w_root_or_none) def fastcall_4(self, space, func, w1, w2, w3, w4): + from rpython.rlib.debug import check_annotation try: w_result = self.fastfunc_4(space, w1, w2, w3, w4) except DescrMismatch: diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -221,6 +221,36 @@ @check_annotator_fails def bad_for_body(): f(C1()) + @check_annotator_fails + def ok_for_body(): + f(None) + +def test_instance_or_none(): + class C1(object): + pass + class C2(C1): + pass + class C3(C2): + pass + @signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True)) + def f(x): + assert isinstance(x, C2) or x is None + return x + argtype, rettype = getsig(f) + assert isinstance(argtype, model.SomeInstance) + assert argtype.classdef.classdesc.pyobj == C3 + assert argtype.can_be_None + assert isinstance(rettype, model.SomeInstance) + assert rettype.classdef.classdesc.pyobj == C2 + assert rettype.can_be_None + + @check_annotator_fails + def ok_for_body(): + f(C2()) + @check_annotator_fails + def bad_for_body(): + f(C1()) + def test_self(): @finishsigs diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -76,8 +76,8 @@ return model.SomeDict(dictdef) -def instance(cls): - return lambda bookkeeper: model.SomeInstance(bookkeeper.getuniqueclassdef(cls)) +def instance(cls, can_be_None=False): + return lambda bookkeeper: model.SomeInstance(bookkeeper.getuniqueclassdef(cls), can_be_None=can_be_None) class SelfTypeMarker(object): From pypy.commits at gmail.com Fri Dec 16 11:31:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 16 Dec 2016 08:31:57 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Deal with parameterless macros Message-ID: <585416fd.8259190a.ad498.74dc@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89102:11a2deff4a3d Date: 2016-12-16 16:31 +0000 http://bitbucket.org/pypy/pypy/changeset/11a2deff4a3d/ Log: Deal with parameterless macros diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -45,6 +45,10 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + + for name, value in reversed(macros.items()): + csource = re.sub(r'\b%s\b' % name, value, csource) + return csource, macros def _common_type_names(csource): @@ -219,17 +223,8 @@ value = value.strip() if _r_int_literal.match(value): self._add_integer_constant(key, value) - elif value == '...': + else: self._declare('macro ' + key, value) - else: - raise api.CDefError( - 'only supports one of the following syntax:\n' - ' #define %s ... (literally dot-dot-dot)\n' - ' #define %s NUMBER (with NUMBER an integer' - ' constant, decimal/hex/octal)\n' - 'got:\n' - ' #define %s %s' - % (key, key, key, value)) def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) @@ -662,9 +657,10 @@ return CNAME_TO_LLTYPE[name] class ParsedSource(object): - def __init__(self, source, definitions): + def __init__(self, source, definitions, macros): self.source = source self.definitions = definitions + self.macros = macros def cffi_to_lltype(obj): from pypy.module.cpyext.api import cpython_struct @@ -681,10 +677,15 @@ ctx = Parser() ctx.parse(source) defs = {} + macros = {} for name, (obj, quals) in ctx._declarations.iteritems(): - if not name.startswith('typedef '): - continue - name = name[8:] - assert name not in defs - defs[name] = cffi_to_lltype(obj) - return ParsedSource(source, defs) + if name.startswith('typedef '): + name = name[8:] + assert name not in defs + defs[name] = cffi_to_lltype(obj) + elif name.startswith('macro '): + name = name[6:] + assert name not in macros + macros[name] = obj + + return ParsedSource(source, defs, macros) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -22,3 +22,20 @@ decl = "typedef ssize_t Py_ssize_t;" hdr = parse_source(decl) assert hdr.definitions == {'Py_ssize_t': rffi.SSIZE_T} + +def test_macro(): + decl = """ + typedef ssize_t Py_ssize_t; + + #define PyObject_HEAD \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ + + typedef struct { + PyObject_HEAD + double ob_fval; + } PyFloatObject; + """ + hdr = parse_source(decl) + assert 'PyFloatObject' in hdr.definitions + assert 'PyObject_HEAD' in hdr.macros From pypy.commits at gmail.com Fri Dec 16 23:11:00 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 16 Dec 2016 20:11:00 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Remove unused function Message-ID: <5854bad4.624fc20a.202c2.b459@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89103:faa4070df377 Date: 2016-12-16 16:38 +0000 http://bitbucket.org/pypy/pypy/changeset/faa4070df377/ Log: Remove unused function diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -607,14 +607,6 @@ % (cpyname, )) build_exported_objects() -def get_structtype_for_ctype(ctype): - from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr - from pypy.module.cpyext.cdatetime import PyDateTime_CAPI - from pypy.module.cpyext.intobject import PyIntObject - return {"PyObject*": PyObject, "PyTypeObject*": PyTypeObjectPtr, - "PyIntObject*": PyIntObject, - "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] - # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. From pypy.commits at gmail.com Fri Dec 16 23:11:02 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 16 Dec 2016 20:11:02 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: progress? Message-ID: <5854bad6.c220c20a.d77c4.c0cd@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89104:f6bc1fb871de Date: 2016-12-17 02:24 +0000 http://bitbucket.org/pypy/pypy/changeset/f6bc1fb871de/ Log: progress? diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -656,36 +656,62 @@ def cname_to_lltype(name): return CNAME_TO_LLTYPE[name] +class DelayedStruct(object): + def __init__(self, name, fields): + self.struct_name = name + self.fields = fields + + def realize(self, type_name): + from pypy.module.cpyext.api import cpython_struct + return cpython_struct(type_name, self.fields) + + def __repr__(self): + return "".format(vars(self)) + + class ParsedSource(object): - def __init__(self, source, definitions, macros): + def __init__(self, source, definitions=None, macros=None): self.source = source - self.definitions = definitions - self.macros = macros + self.definitions = definitions if definitions is not None else {} + self.macros = macros if macros is not None else {} + self.structs = {} -def cffi_to_lltype(obj): - from pypy.module.cpyext.api import cpython_struct - if isinstance(obj, model.PrimitiveType): - return cname_to_lltype(obj.name) - elif isinstance(obj, model.StructType): - fields = zip( - obj.fldnames, - [cffi_to_lltype(field) for field in obj.fldtypes]) - return cpython_struct(obj.name, fields) + def add_typedef(self, name, obj): + assert name not in self.definitions + tp = self.convert_type(obj) + if isinstance(tp, DelayedStruct): + tp = tp.realize(name) + self.structs[obj] = tp + self.definitions[name] = tp + + def add_macro(self, name, value): + assert name not in self.macros + self.macros[name] = value + + def convert_type(self, obj): + from pypy.module.cpyext.api import cpython_struct + if isinstance(obj, model.PrimitiveType): + return cname_to_lltype(obj.name) + elif isinstance(obj, model.StructType): + if obj in self.structs: + return self.structs[obj] + fields = zip( + obj.fldnames, + [self.convert_type(field) for field in obj.fldtypes]) + result = DelayedStruct(obj.name, fields) + self.structs[obj] = result + return result def parse_source(source): ctx = Parser() ctx.parse(source) - defs = {} - macros = {} + src = ParsedSource(source) for name, (obj, quals) in ctx._declarations.iteritems(): if name.startswith('typedef '): name = name[8:] - assert name not in defs - defs[name] = cffi_to_lltype(obj) + src.add_typedef(name, obj) elif name.startswith('macro '): name = name[6:] - assert name not in macros - macros[name] = obj - - return ParsedSource(source, defs, macros) + src.add_macro(name, obj) + return src From pypy.commits at gmail.com Fri Dec 16 23:11:03 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 16 Dec 2016 20:11:03 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Begin using parse_source() in pypy.module.cpyext.api Message-ID: <5854bad7.c220c20a.d77c4.c0cf@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89105:91730b67d4d6 Date: 2016-12-17 03:00 +0000 http://bitbucket.org/pypy/pypy/changeset/91730b67d4d6/ Log: Begin using parse_source() in pypy.module.cpyext.api diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -41,15 +41,10 @@ from rpython.rlib import rawrefcount from rpython.rlib import rthread from rpython.rlib.debug import fatalerror_notb +from pypy.module.cpyext.cparser import parse_source DEBUG_WRAPPER = True -# update these for other platforms -Py_ssize_t = lltype.Typedef(rffi.SSIZE_T, 'Py_ssize_t') -Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) -size_t = rffi.ULONG -ADDR = lltype.Signed - pypydir = py.path.local(pypydir) include_dir = pypydir / 'module' / 'cpyext' / 'include' source_dir = pypydir / 'module' / 'cpyext' / 'src' @@ -119,6 +114,9 @@ return is_valid_fd(c_fileno(fp)) pypy_decl = 'pypy_decl.h' +udir.join(pypy_decl).write("/* Will be filled later */\n") +udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") +udir.join('pypy_macros.h').write("/* Will be filled later */\n") constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING Py_TPFLAGS_HAVE_GETCHARBUFFER @@ -129,9 +127,6 @@ """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) -udir.join(pypy_decl).write("/* Will be filled later */\n") -udir.join('pypy_structmember_decl.h').write("/* Will be filled later */\n") -udir.join('pypy_macros.h').write("/* Will be filled later */\n") globals().update(rffi_platform.configure(CConfig_constants)) def _copy_header_files(headers, dstdir): @@ -607,6 +602,14 @@ % (cpyname, )) build_exported_objects() +# update these for other platforms +h = parse_source("typedef ssize_t Py_ssize_t;") + +Py_ssize_t = h.definitions['Py_ssize_t'] +Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) +size_t = rffi.ULONG +ADDR = lltype.Signed + # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -2,7 +2,7 @@ from cffi.commontypes import COMMON_TYPES, resolve_common_type import pycparser import weakref, re -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rfficache _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", @@ -682,17 +682,17 @@ if isinstance(tp, DelayedStruct): tp = tp.realize(name) self.structs[obj] = tp - self.definitions[name] = tp + self.definitions[name] = lltype.Typedef(tp, name) def add_macro(self, name, value): assert name not in self.macros self.macros[name] = value def convert_type(self, obj): - from pypy.module.cpyext.api import cpython_struct if isinstance(obj, model.PrimitiveType): return cname_to_lltype(obj.name) elif isinstance(obj, model.StructType): + from pypy.module.cpyext.api import cpython_struct if obj in self.structs: return self.structs[obj] fields = zip( From pypy.commits at gmail.com Fri Dec 16 23:11:06 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 16 Dec 2016 20:11:06 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Expand the pseudo-header in api.py, handle forward refs and pointers Message-ID: <5854bada.ce941c0a.d731b.4fdf@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89106:cb24068cdac0 Date: 2016-12-17 04:10 +0000 http://bitbucket.org/pypy/pypy/changeset/cb24068cdac0/ Log: Expand the pseudo-header in api.py, handle forward refs and pointers diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -602,8 +602,28 @@ % (cpyname, )) build_exported_objects() -# update these for other platforms -h = parse_source("typedef ssize_t Py_ssize_t;") +h = parse_source(""" +typedef ssize_t Py_ssize_t; + +#define PyObject_HEAD \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ + struct _typeobject *ob_type; + +#define PyObject_VAR_HEAD \ + PyObject_HEAD \ + Py_ssize_t ob_size; /* Number of items in variable part */ + +typedef struct _object { + PyObject_HEAD +} PyObject; + +typedef struct { + PyObject_VAR_HEAD +} PyVarObject; + +typedef struct _typeobject PyTypeObject; +""") Py_ssize_t = h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) @@ -613,16 +633,15 @@ # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = lltype.ForwardReference() +PyTypeObject = h.definitions['PyTypeObject'].OF PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = lltype.ForwardReference() +PyObjectStruct = h.definitions['PyObject'].OF PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -cpython_struct('PyObject', PyObjectFields, PyObjectStruct) -PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) +PyVarObjectStruct = h.definitions['PyVarObject'].OF PyVarObject = lltype.Ptr(PyVarObjectStruct) Py_buffer = cpython_struct( diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -1,3 +1,4 @@ +from collections import OrderedDict from cffi import api, model from cffi.commontypes import COMMON_TYPES, resolve_common_type import pycparser @@ -30,7 +31,7 @@ # should not contain any string literal! csource = _r_comment.sub(' ', csource) # Remove the "#define FOO x" lines - macros = {} + macros = OrderedDict() for match in _r_define.finditer(csource): macroname, macrovalue = match.groups() macrovalue = macrovalue.replace('\\\n', '').strip() @@ -695,12 +696,19 @@ from pypy.module.cpyext.api import cpython_struct if obj in self.structs: return self.structs[obj] - fields = zip( - obj.fldnames, - [self.convert_type(field) for field in obj.fldtypes]) - result = DelayedStruct(obj.name, fields) + if obj.fldtypes is None: + result = lltype.ForwardReference() + else: + fields = zip( + obj.fldnames, + [self.convert_type(field) for field in obj.fldtypes]) + result = DelayedStruct(obj.name, fields) self.structs[obj] = result return result + elif isinstance(obj, model.PointerType): + return lltype.Ptr(self.convert_type(obj.totype)) + else: + raise NotImplementedError def parse_source(source): From pypy.commits at gmail.com Sat Dec 17 03:25:01 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 00:25:01 -0800 (PST) Subject: [pypy-commit] pypy default: _PyImport_{Acquire,Release}Lock() Message-ID: <5854f65d.c5371c0a.f6936.85ea@mx.google.com> Author: Armin Rigo Branch: Changeset: r89107:836ec0a42c70 Date: 2016-12-17 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/836ec0a42c70/ Log: _PyImport_{Acquire,Release}Lock() diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,6 +1,6 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( - generic_cpy_call, cpython_api, PyObject, CONST_STRING) + generic_cpy_call, cpython_api, PyObject, CONST_STRING, CANNOT_FAIL) from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError from pypy.interpreter.module import Module @@ -124,3 +124,24 @@ w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.wrap('__file__'), space.wrap(pathname)) return importing.exec_code_module(space, w_mod, code, w_name) + + at cpython_api([], lltype.Void, error=CANNOT_FAIL) +def _PyImport_AcquireLock(space): + """Locking primitive to prevent parallel imports of the same module + in different threads to return with a partially loaded module. + These calls are serialized by the global interpreter lock.""" + try: + w_func = space.getbuiltinmodule('imp').get('acquire_lock') + space.call_function(w_func) + except OperationError as e: + e.write_unraisable(space, "_PyImport_AcquireLock") + + at cpython_api([], rffi.INT_real, error=CANNOT_FAIL) +def _PyImport_ReleaseLock(space): + try: + w_func = space.getbuiltinmodule('imp').get('release_lock') + space.call_function(w_func) + return 1 + except OperationError as e: + e.write_unraisable(space, "_PyImport_ReleaseLock") + return -1 diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -37,6 +37,14 @@ stat = api.PyImport_ReloadModule(stat) assert space.getattr(stat, space.wrap("S_IMODE")) + def test_lock(self, space, api): + # "does not crash" + api._PyImport_AcquireLock() + api._PyImport_AcquireLock() + api._PyImport_ReleaseLock() + api._PyImport_ReleaseLock() + + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): import sys, os From pypy.commits at gmail.com Sat Dec 17 03:50:56 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 00:50:56 -0800 (PST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <5854fc70.d32f1c0a.9e8ad.8b22@mx.google.com> Author: Armin Rigo Branch: Changeset: r89108:51d7e78bf66c Date: 2016-12-17 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/51d7e78bf66c/ Log: Translation fix diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -131,16 +131,14 @@ in different threads to return with a partially loaded module. These calls are serialized by the global interpreter lock.""" try: - w_func = space.getbuiltinmodule('imp').get('acquire_lock') - space.call_function(w_func) + space.call_method(space.getbuiltinmodule('imp'), 'acquire_lock') except OperationError as e: e.write_unraisable(space, "_PyImport_AcquireLock") @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def _PyImport_ReleaseLock(space): try: - w_func = space.getbuiltinmodule('imp').get('release_lock') - space.call_function(w_func) + space.call_method(space.getbuiltinmodule('imp'), 'release_lock') return 1 except OperationError as e: e.write_unraisable(space, "_PyImport_ReleaseLock") From pypy.commits at gmail.com Sat Dec 17 04:43:16 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 17 Dec 2016 01:43:16 -0800 (PST) Subject: [pypy-commit] pypy py3.5: update cryptography cffi backend Message-ID: <585508b4.e576c20a.37a92.0ec1@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89109:09da85689a2d Date: 2016-12-17 10:40 +0100 http://bitbucket.org/pypy/pypy/changeset/09da85689a2d/ Log: update cryptography cffi backend diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md --- a/lib_pypy/_cffi_ssl/README.md +++ b/lib_pypy/_cffi_ssl/README.md @@ -14,3 +14,7 @@ Copy over all the sources into the folder `lib_pypy/_cffi_ssl/*`. Updating the cffi backend can be simply done by the following command: $ cp -r /src/_cffi_src/* . + +# Crpytography version + +`c8f47ad2122efdd5e772aee13ed5d4c64e7d6086` diff --git a/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py b/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py @@ -68,6 +68,7 @@ "objects", "ocsp", "opensslv", + "osrandom_engine", "pem", "pkcs12", "rand", diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/aes.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/aes.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/aes.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/aes.py @@ -10,7 +10,6 @@ TYPES = """ static const int Cryptography_HAS_AES_WRAP; -static const int Cryptography_HAS_AES_CTR128_ENCRYPT; struct aes_key_st { ...; @@ -29,22 +28,8 @@ """ MACROS = """ -/* The ctr128_encrypt function is only useful in 1.0.0. We can use EVP for - this in 1.0.1+. */ -void AES_ctr128_encrypt(const unsigned char *, unsigned char *, - size_t, const AES_KEY *, unsigned char[], - unsigned char[], unsigned int *); """ CUSTOMIZATIONS = """ static const long Cryptography_HAS_AES_WRAP = 1; -#if CRYPTOGRAPHY_OPENSSL_110_OR_GREATER && !defined(LIBRESSL_VERSION_NUMBER) -static const int Cryptography_HAS_AES_CTR128_ENCRYPT = 0; -void (*AES_ctr128_encrypt)(const unsigned char *, unsigned char *, - size_t, const AES_KEY *, - unsigned char[], unsigned char[], - unsigned int *) = NULL; -#else -static const int Cryptography_HAS_AES_CTR128_ENCRYPT = 1; -#endif """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cmac.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cmac.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cmac.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cmac.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function INCLUDES = """ -#if !defined(OPENSSL_NO_CMAC) && CRYPTOGRAPHY_OPENSSL_101_OR_GREATER +#if !defined(OPENSSL_NO_CMAC) #include #endif """ @@ -28,7 +28,7 @@ """ CUSTOMIZATIONS = """ -#if !defined(OPENSSL_NO_CMAC) && CRYPTOGRAPHY_OPENSSL_101_OR_GREATER +#if !defined(OPENSSL_NO_CMAC) static const long Cryptography_HAS_CMAC = 1; #else static const long Cryptography_HAS_CMAC = 0; diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/cryptography.py @@ -17,8 +17,6 @@ #include #endif -#define CRYPTOGRAPHY_OPENSSL_101_OR_GREATER \ - (OPENSSL_VERSION_NUMBER >= 0x10001000) #define CRYPTOGRAPHY_OPENSSL_102_OR_GREATER \ (OPENSSL_VERSION_NUMBER >= 0x10002000) #define CRYPTOGRAPHY_OPENSSL_102BETA2_OR_GREATER \ @@ -26,8 +24,6 @@ #define CRYPTOGRAPHY_OPENSSL_110_OR_GREATER \ (OPENSSL_VERSION_NUMBER >= 0x10100000) -#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_101 \ - (OPENSSL_VERSION_NUMBER < 0x10001000) #define CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 \ (OPENSSL_VERSION_NUMBER < 0x10002000) #define CRYPTOGRAPHY_OPENSSL_LESS_THAN_102BETA3 \ @@ -51,12 +47,8 @@ """ TYPES = """ -static const int CRYPTOGRAPHY_OPENSSL_101_OR_GREATER; - static const int CRYPTOGRAPHY_OPENSSL_110_OR_GREATER; -static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_101; - static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I; static const int CRYPTOGRAPHY_IS_LIBRESSL; diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ec.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ec.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ec.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ec.py @@ -14,7 +14,6 @@ TYPES = """ static const int Cryptography_HAS_EC; -static const int Cryptography_HAS_EC_1_0_1; static const int Cryptography_HAS_EC2M; static const int Cryptography_HAS_EC_1_0_2; @@ -327,13 +326,6 @@ int (*EC_METHOD_get_field_type)(const EC_METHOD *) = NULL; -#else -static const long Cryptography_HAS_EC = 1; -#endif - -#if defined(OPENSSL_NO_EC) || CRYPTOGRAPHY_OPENSSL_LESS_THAN_101 -static const long Cryptography_HAS_EC_1_0_1 = 0; - int (*EC_KEY_get_flags)(const EC_KEY *) = NULL; void (*EC_KEY_set_flags)(EC_KEY *, int) = NULL; void (*EC_KEY_clear_flags)(EC_KEY *, int) = NULL; @@ -341,10 +333,9 @@ int (*EC_KEY_set_public_key_affine_coordinates)( EC_KEY *, BIGNUM *, BIGNUM *) = NULL; #else -static const long Cryptography_HAS_EC_1_0_1 = 1; +static const long Cryptography_HAS_EC = 1; #endif - #if defined(OPENSSL_NO_EC) || defined(OPENSSL_NO_EC2M) static const long Cryptography_HAS_EC2M = 0; diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/evp.py @@ -25,13 +25,6 @@ static const int EVP_CTRL_GCM_GET_TAG; static const int EVP_CTRL_GCM_SET_TAG; -typedef struct { - int type; - int alias; - const char *name; - const char *data; -} OBJ_NAME; - static const int Cryptography_HAS_GCM; static const int Cryptography_HAS_PBKDF2_HMAC; static const int Cryptography_HAS_PKEY_CTX; @@ -143,7 +136,6 @@ without worrying about what OpenSSL we're running against. */ EVP_MD_CTX *Cryptography_EVP_MD_CTX_new(void); void Cryptography_EVP_MD_CTX_free(EVP_MD_CTX *); -void OBJ_NAME_do_all(int, void (*) (const OBJ_NAME *, void *), void *); """ MACROS = """ @@ -164,7 +156,7 @@ EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *); int EVP_PKEY_set1_EC_KEY(EVP_PKEY *, EC_KEY *); -int EVP_MD_CTX_block_size(const EVP_MD_CTX *md); +int EVP_MD_CTX_block_size(const EVP_MD_CTX *); int EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *); int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *, int, int, void *); @@ -176,7 +168,6 @@ int EVP_PBE_scrypt(const char *, size_t, const unsigned char *, size_t, uint64_t, uint64_t, uint64_t, uint64_t, unsigned char *, size_t); -#define OBJ_NAME_TYPE_MD_METH ... """ CUSTOMIZATIONS = """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/objects.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/objects.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/objects.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/objects.py @@ -9,6 +9,14 @@ """ TYPES = """ +typedef struct { + int type; + int alias; + const char *name; + const char *data; +} OBJ_NAME; + +static const long OBJ_NAME_TYPE_MD_METH; """ FUNCTIONS = """ @@ -24,6 +32,7 @@ int OBJ_cmp(const ASN1_OBJECT *, const ASN1_OBJECT *); ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *); int OBJ_create(const char *, const char *, const char *); +void OBJ_NAME_do_all(int, void (*) (const OBJ_NAME *, void *), void *); """ MACROS = """ diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/rsa.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/rsa.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/rsa.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/rsa.py @@ -20,7 +20,6 @@ static const int RSA_F4; static const int Cryptography_HAS_PSS_PADDING; -static const int Cryptography_HAS_MGF1_MD; static const int Cryptography_HAS_RSA_OAEP_MD; """ @@ -73,12 +72,6 @@ CUSTOMIZATIONS = """ static const long Cryptography_HAS_PSS_PADDING = 1; -#if CRYPTOGRAPHY_OPENSSL_101_OR_GREATER -static const long Cryptography_HAS_MGF1_MD = 1; -#else -static const long Cryptography_HAS_MGF1_MD = 0; -int (*EVP_PKEY_CTX_set_rsa_mgf1_md)(EVP_PKEY_CTX *, EVP_MD *) = NULL; -#endif #if defined(EVP_PKEY_CTX_set_rsa_oaep_md) static const long Cryptography_HAS_RSA_OAEP_MD = 1; #else diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -25,7 +25,6 @@ static const long Cryptography_HAS_GET_SERVER_TMP_KEY; static const long Cryptography_HAS_SSL_CTX_SET_CLIENT_CERT_ENGINE; static const long Cryptography_HAS_SSL_CTX_CLEAR_OPTIONS; -static const long Cryptography_HAS_NPN_NEGOTIATED; /* Internally invented symbol to tell us if SNI is supported */ static const long Cryptography_HAS_TLSEXT_HOSTNAME; @@ -44,8 +43,8 @@ static const long Cryptography_HAS_SSL_SET_SSL_CTX; static const long Cryptography_HAS_SSL_OP_NO_TICKET; static const long Cryptography_HAS_NETBSD_D1_METH; +static const long Cryptography_HAS_ALPN; static const long Cryptography_HAS_NEXTPROTONEG; -static const long Cryptography_HAS_ALPN; static const long Cryptography_HAS_SET_CERT_CB; static const long SSL_FILETYPE_PEM; @@ -141,6 +140,7 @@ typedef ... SSL; static const long TLSEXT_NAMETYPE_host_name; +static const long TLSEXT_STATUSTYPE_ocsp; typedef ... SSL_CIPHER; typedef ... Cryptography_STACK_OF_SSL_CIPHER; @@ -362,9 +362,6 @@ long SSL_session_reused(SSL *); -/* NPN APIs were introduced in OpenSSL 1.0.1. To continue to support earlier - * versions some special handling of these is necessary. - */ void SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *, int (*)(SSL *, const unsigned char **, @@ -413,7 +410,7 @@ /* Added in 1.0.2 */ const SSL_METHOD *SSL_CTX_get_ssl_method(SSL_CTX *); -/* Added in 1.0.1 */ + int SSL_SESSION_set1_id_context(SSL_SESSION *, const unsigned char *, unsigned int); /* Added in 1.1.0 for the great opaquing of structs */ @@ -437,28 +434,6 @@ """ CUSTOMIZATIONS = """ -/* Added in 1.0.1 but we need it in all versions now due to the great - opaquing. */ -#if CRYPTOGRAPHY_OPENSSL_LESS_THAN_101 -/* from ssl.h */ -#define SSL_F_SSL_SESSION_SET1_ID_CONTEXT 312 -#define SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG 273 -/* from ssl/ssl_sess.c */ -int SSL_SESSION_set1_id_context(SSL_SESSION *s, const unsigned char *sid_ctx, - unsigned int sid_ctx_len) -{ - if (sid_ctx_len > SSL_MAX_SID_CTX_LENGTH) { - SSLerr(SSL_F_SSL_SESSION_SET1_ID_CONTEXT, - SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); - return 0; - } - s->sid_ctx_length = sid_ctx_len; - memcpy(s->sid_ctx, sid_ctx, sid_ctx_len); - - return 1; -} -#endif - /* Added in 1.0.2 but we need it in all versions now due to the great opaquing. */ #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 || defined(LIBRESSL_VERSION_NUMBER) @@ -604,36 +579,7 @@ static const long Cryptography_HAS_NETBSD_D1_METH = 1; #endif -/* Because OPENSSL defines macros that claim lack of support for things, rather - * than macros that claim support for things, we need to do a version check in - * addition to a definition check. NPN was added in 1.0.1: for any version - * before that, there is no compatibility. - */ -#if defined(OPENSSL_NO_NEXTPROTONEG) || CRYPTOGRAPHY_OPENSSL_LESS_THAN_101 -static const long Cryptography_HAS_NEXTPROTONEG = 0; -void (*SSL_CTX_set_next_protos_advertised_cb)(SSL_CTX *, - int (*)(SSL *, - const unsigned char **, - unsigned int *, - void *), - void *) = NULL; -void (*SSL_CTX_set_next_proto_select_cb)(SSL_CTX *, - int (*)(SSL *, - unsigned char **, - unsigned char *, - const unsigned char *, - unsigned int, - void *), - void *) = NULL; -int (*SSL_select_next_proto)(unsigned char **, unsigned char *, - const unsigned char *, unsigned int, - const unsigned char *, unsigned int) = NULL; -void (*SSL_get0_next_proto_negotiated)(const SSL *, - const unsigned char **, - unsigned *) = NULL; -#else static const long Cryptography_HAS_NEXTPROTONEG = 1; -#endif /* ALPN was added in OpenSSL 1.0.2. */ #if CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 && !defined(LIBRESSL_VERSION_NUMBER) @@ -706,13 +652,4 @@ static const long TLS_ST_BEFORE = 0; static const long TLS_ST_OK = 0; #endif - -/* This define is available in 1.0.1+ so we can remove this when we drop - support for 1.0.0 */ -#ifdef OPENSSL_NPN_NEGOTIATED -static const long Cryptography_HAS_NPN_NEGOTIATED = 1; -#else -static const long OPENSSL_NPN_NEGOTIATED = -1; -static const long Cryptography_HAS_NPN_NEGOTIATED = 0; -#endif """ From pypy.commits at gmail.com Sat Dec 17 05:06:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 17 Dec 2016 02:06:07 -0800 (PST) Subject: [pypy-commit] pypy py3.5: call _setup_ssl_threads() Message-ID: <58550e0f.06891c0a.4d4b.a9d0@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89110:94808865e615 Date: 2016-12-17 11:05 +0100 http://bitbucket.org/pypy/pypy/changeset/94808865e615/ Log: call _setup_ssl_threads() diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -96,7 +96,7 @@ # init open ssl lib.SSL_load_error_strings() lib.SSL_library_init() -# TODO threads? +lib._setup_ssl_threads() lib.OpenSSL_add_all_algorithms() def check_signals(): From pypy.commits at gmail.com Sat Dec 17 05:50:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 17 Dec 2016 02:50:59 -0800 (PST) Subject: [pypy-commit] pypy py3.5: update comments Message-ID: <58551893.4dd41c0a.c02f0.b62d@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89111:699a508dff49 Date: 2016-12-17 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/699a508dff49/ Log: update comments diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -100,7 +100,8 @@ lib.OpenSSL_add_all_algorithms() def check_signals(): - # TODO PyErr_CheckSignal equivalent for pypy? + # nothing to do, we are on python level, signals are + # checked frequently in the bytecode dispatch loop pass def _socket_timeout(s): @@ -347,7 +348,6 @@ peer_cert = ffi.gc(peer_cert, lib.X509_free) self.peer_cert = peer_cert - #PySSL_END_ALLOW_THREADS self.handshake_done = 1 return None diff --git a/lib_pypy/_cffi_ssl/_stdssl/certificate.py b/lib_pypy/_cffi_ssl/_stdssl/certificate.py --- a/lib_pypy/_cffi_ssl/_stdssl/certificate.py +++ b/lib_pypy/_cffi_ssl/_stdssl/certificate.py @@ -151,7 +151,6 @@ value = lib.X509_NAME_ENTRY_get_data(entry); attr = _create_tuple_for_attribute(name, value); if attr == ffi.NULL: - pass # TODO error raise NotImplementedError rdn.append(attr) From pypy.commits at gmail.com Sat Dec 17 06:03:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sat, 17 Dec 2016 03:03:39 -0800 (PST) Subject: [pypy-commit] pypy py3.5: missing files Message-ID: <58551b8b.313ac20a.5e842.2989@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89112:a357008bf0d3 Date: 2016-12-17 12:03 +0100 http://bitbucket.org/pypy/pypy/changeset/a357008bf0d3/ Log: missing files diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/osrandom_engine.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/osrandom_engine.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/osrandom_engine.py @@ -0,0 +1,29 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from __future__ import absolute_import, division, print_function + +import os + +HERE = os.path.dirname(os.path.abspath(__file__)) + +with open(os.path.join(HERE, "src/osrandom_engine.h")) as f: + INCLUDES = f.read() + +TYPES = """ +static const char *const Cryptography_osrandom_engine_name; +static const char *const Cryptography_osrandom_engine_id; +""" + +FUNCTIONS = """ +int Cryptography_add_osrandom_engine(void); +""" + +MACROS = """ +""" + +with open(os.path.join(HERE, "src/osrandom_engine.c")) as f: + CUSTOMIZATIONS = f.read() + +CONDITIONAL_NAMES = {} diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.c b/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.c new file mode 100644 --- /dev/null +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.c @@ -0,0 +1,576 @@ +/* osurandom engine + * + * Windows CryptGenRandom() + * macOS >= 10.12 getentropy() + * OpenBSD 5.6+ getentropy() + * other BSD getentropy() if SYS_getentropy is defined + * Linux 3.4.17+ getrandom() with fallback to /dev/urandom + * other /dev/urandom with cached fd + * + * The /dev/urandom, getrandom and getentropy code is derived from Python's + * Python/random.c, written by Antoine Pitrou and Victor Stinner. + * + * Copyright 2001-2016 Python Software Foundation; All Rights Reserved. + */ + +static const char *Cryptography_osrandom_engine_id = "osrandom"; + +/**************************************************************************** + * Windows + */ +#if CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_CRYPTGENRANDOM +static const char *Cryptography_osrandom_engine_name = "osrandom_engine CryptGenRandom()"; +static HCRYPTPROV hCryptProv = 0; + +static int osrandom_init(ENGINE *e) { + if (hCryptProv != 0) { + return 1; + } + if (CryptAcquireContext(&hCryptProv, NULL, NULL, + PROV_RSA_FULL, CRYPT_VERIFYCONTEXT)) { + return 1; + } else { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_INIT, + CRYPTOGRAPHY_OSRANDOM_R_CRYPTACQUIRECONTEXT, + __FILE__, __LINE__ + ); + return 0; + } +} + +static int osrandom_rand_bytes(unsigned char *buffer, int size) { + if (hCryptProv == 0) { + return 0; + } + + if (!CryptGenRandom(hCryptProv, (DWORD)size, buffer)) { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES, + CRYPTOGRAPHY_OSRANDOM_R_CRYPTGENRANDOM, + __FILE__, __LINE__ + ); + return 0; + } + return 1; +} + +static int osrandom_finish(ENGINE *e) { + if (CryptReleaseContext(hCryptProv, 0)) { + hCryptProv = 0; + return 1; + } else { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_FINISH, + CRYPTOGRAPHY_OSRANDOM_R_CRYPTRELEASECONTEXT, + __FILE__, __LINE__ + ); + return 0; + } +} + +static int osrandom_rand_status(void) { + return hCryptProv != 0; +} + +static const char *osurandom_get_implementation(void) { + return "CryptGenRandom"; +} + +#endif /* CRYPTOGRAPHY_OSRANDOM_ENGINE_CRYPTGENRANDOM */ + +/**************************************************************************** + * BSD getentropy + */ +#if CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_GETENTROPY +static const char *Cryptography_osrandom_engine_name = "osrandom_engine getentropy()"; + +static int osrandom_init(ENGINE *e) { + return 1; +} + +static int osrandom_rand_bytes(unsigned char *buffer, int size) { + int len, res; + while (size > 0) { + /* OpenBSD and macOS restrict maximum buffer size to 256. */ + len = size > 256 ? 256 : size; + res = getentropy(buffer, len); + if (res < 0) { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES, + CRYPTOGRAPHY_OSRANDOM_R_GETENTROPY_FAILED, + __FILE__, __LINE__ + ); + return 0; + } + buffer += len; + size -= len; + } + return 1; +} + +static int osrandom_finish(ENGINE *e) { + return 1; +} + +static int osrandom_rand_status(void) { + return 1; +} + +static const char *osurandom_get_implementation(void) { + return "getentropy"; +} +#endif /* CRYPTOGRAPHY_OSRANDOM_ENGINE_GETENTROPY */ + +/**************************************************************************** + * /dev/urandom helpers for all non-BSD Unix platforms + */ +#ifdef CRYPTOGRAPHY_OSRANDOM_NEEDS_DEV_URANDOM + +static struct { + int fd; + dev_t st_dev; + ino_t st_ino; +} urandom_cache = { -1 }; + +/* return -1 on error */ +static int dev_urandom_fd(void) { + int fd, n, flags; + struct stat st; + + /* Check that fd still points to the correct device */ + if (urandom_cache.fd >= 0) { + if (fstat(urandom_cache.fd, &st) + || st.st_dev != urandom_cache.st_dev + || st.st_ino != urandom_cache.st_ino) { + /* Somebody replaced our FD. Invalidate our cache but don't + * close the fd. */ + urandom_cache.fd = -1; + } + } + if (urandom_cache.fd < 0) { + fd = open("/dev/urandom", O_RDONLY); + if (fd < 0) { + goto error; + } + if (fstat(fd, &st)) { + goto error; + } + /* set CLOEXEC flag */ + flags = fcntl(fd, F_GETFD); + if (flags == -1) { + goto error; + } else if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) == -1) { + goto error; + } + /* Another thread initialized the fd */ + if (urandom_cache.fd >= 0) { + do { + n = close(fd); + } while (n < 0 && errno == EINTR); + return urandom_cache.fd; + } + urandom_cache.st_dev = st.st_dev; + urandom_cache.st_ino = st.st_ino; + urandom_cache.fd = fd; + } + return urandom_cache.fd; + + error: + if (fd != -1) { + do { + n = close(fd); + } while (n < 0 && errno == EINTR); + } + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_FD, + CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_OPEN_FAILED, + __FILE__, __LINE__ + ); + return -1; +} + +static int dev_urandom_read(unsigned char *buffer, int size) { + int fd; + ssize_t n; + + fd = dev_urandom_fd(); + if (fd < 0) { + return 0; + } + + while (size > 0) { + do { + n = read(fd, buffer, (size_t)size); + } while (n < 0 && errno == EINTR); + + if (n <= 0) { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_READ, + CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_READ_FAILED, + __FILE__, __LINE__ + ); + return 0; + } + buffer += n; + size -= n; + } + return 1; +} + +static void dev_urandom_close(void) { + if (urandom_cache.fd >= 0) { + int fd, n; + struct stat st; + + if (fstat(urandom_cache.fd, &st) + && st.st_dev == urandom_cache.st_dev + && st.st_ino == urandom_cache.st_ino) { + fd = urandom_cache.fd; + urandom_cache.fd = -1; + do { + n = close(fd); + } while (n < 0 && errno == EINTR); + } + } +} +#endif /* CRYPTOGRAPHY_OSRANDOM_NEEDS_DEV_URANDOM */ + +/**************************************************************************** + * Linux getrandom engine with fallback to dev_urandom + */ + +#if CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_GETRANDOM +static const char *Cryptography_osrandom_engine_name = "osrandom_engine getrandom()"; + +static int getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_NOT_INIT; + +static int osrandom_init(ENGINE *e) { + /* We try to detect working getrandom until we succeed. */ + if (getrandom_works != CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS) { + long n; + char dest[1]; + n = syscall(SYS_getrandom, dest, sizeof(dest), GRND_NONBLOCK); + if (n == sizeof(dest)) { + getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS; + } else { + int e = errno; + switch(e) { + case ENOSYS: + /* Fallback: Kernel does not support the syscall. */ + getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK; + break; + case EPERM: + /* Fallback: seccomp prevents syscall */ + getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK; + break; + case EAGAIN: + /* Failure: Kernel CRPNG has not been seeded yet */ + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_INIT, + CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_EAGAIN, + __FILE__, __LINE__ + ); + getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED; + break; + default: + /* EINTR cannot occur for buflen < 256. */ + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_INIT, + CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_UNEXPECTED, + "errno", e + ); + getrandom_works = CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED; + break; + } + } + } + + /* fallback to dev urandom */ + if (getrandom_works == CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK) { + int fd = dev_urandom_fd(); + if (fd < 0) { + return 0; + } + } + return 1; +} + +static int osrandom_rand_bytes(unsigned char *buffer, int size) { + long n; + + switch(getrandom_works) { + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED: + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES, + CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED, + __FILE__, __LINE__ + ); + return 0; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_NOT_INIT: + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES, + CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_NOT_INIT, + __FILE__, __LINE__ + ); + return 0; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK: + return dev_urandom_read(buffer, size); + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS: + while (size > 0) { + do { + n = syscall(SYS_getrandom, buffer, size, GRND_NONBLOCK); + } while (n < 0 && errno == EINTR); + + if (n <= 0) { + ERR_Cryptography_OSRandom_error( + CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES, + CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_FAILED, + __FILE__, __LINE__ + ); + return 0; + } + buffer += n; + size -= n; + } + return 1; + } + return 0; /* unreachable */ +} + +static int osrandom_finish(ENGINE *e) { + dev_urandom_close(); + return 1; +} + +static int osrandom_rand_status(void) { + switch(getrandom_works) { + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED: + return 0; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_NOT_INIT: + return 0; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK: + return urandom_cache.fd >= 0; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS: + return 1; + } + return 0; /* unreachable */ +} + +static const char *osurandom_get_implementation(void) { + switch(getrandom_works) { + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED: + return ""; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_NOT_INIT: + return ""; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK: + return "/dev/urandom"; + case CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS: + return "getrandom"; + } + return ""; /* unreachable */ +} +#endif /* CRYPTOGRAPHY_OSRANDOM_ENGINE_GETRANDOM */ + +/**************************************************************************** + * dev_urandom engine for all remaining platforms + */ + +#if CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_DEV_URANDOM +static const char *Cryptography_osrandom_engine_name = "osrandom_engine /dev/urandom"; + +static int osrandom_init(ENGINE *e) { + int fd = dev_urandom_fd(); + if (fd < 0) { + return 0; + } + return 1; +} + +static int osrandom_rand_bytes(unsigned char *buffer, int size) { + return dev_urandom_read(buffer, size); +} + +static int osrandom_finish(ENGINE *e) { + dev_urandom_close(); + return 1; +} + +static int osrandom_rand_status(void) { + return urandom_cache.fd >= 0; +} + +static const char *osurandom_get_implementation(void) { + return "/dev/urandom"; +} +#endif /* CRYPTOGRAPHY_OSRANDOM_ENGINE_DEV_URANDOM */ + +/**************************************************************************** + * ENGINE boiler plate + */ + +/* This replicates the behavior of the OpenSSL FIPS RNG, which returns a + -1 in the event that there is an error when calling RAND_pseudo_bytes. */ +static int osrandom_pseudo_rand_bytes(unsigned char *buffer, int size) { + int res = osrandom_rand_bytes(buffer, size); + if (res == 0) { + return -1; + } else { + return res; + } +} + +static RAND_METHOD osrandom_rand = { + NULL, + osrandom_rand_bytes, + NULL, + NULL, + osrandom_pseudo_rand_bytes, + osrandom_rand_status, +}; + +static const ENGINE_CMD_DEFN osrandom_cmd_defns[] = { + {CRYPTOGRAPHY_OSRANDOM_GET_IMPLEMENTATION, + "get_implementation", + "Get CPRNG implementation.", + ENGINE_CMD_FLAG_NO_INPUT}, + {0, NULL, NULL, 0} +}; + +static int osrandom_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f) (void)) { + const char *name; + size_t len; + + switch (cmd) { + case CRYPTOGRAPHY_OSRANDOM_GET_IMPLEMENTATION: + /* i: buffer size, p: char* buffer */ + name = osurandom_get_implementation(); + len = strlen(name); + if ((p == NULL) && (i == 0)) { + /* return required buffer len */ + return len; + } + if ((p == NULL) || i < 0 || ((size_t)i <= len)) { + /* no buffer or buffer too small */ + ENGINEerr(ENGINE_F_ENGINE_CTRL, ENGINE_R_INVALID_ARGUMENT); + return 0; + } + strncpy((char *)p, name, len); + return len; + default: + ENGINEerr(ENGINE_F_ENGINE_CTRL, ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED); + return 0; + } +} + +/* error reporting */ +#define ERR_FUNC(func) ERR_PACK(0, func, 0) +#define ERR_REASON(reason) ERR_PACK(0, 0, reason) + +static ERR_STRING_DATA CRYPTOGRAPHY_OSRANDOM_lib_name[] = { + {0, "osrandom_engine"}, + {0, NULL} +}; + +static ERR_STRING_DATA CRYPTOGRAPHY_OSRANDOM_str_funcs[] = { + {ERR_FUNC(CRYPTOGRAPHY_OSRANDOM_F_INIT), + "osrandom_init"}, + {ERR_FUNC(CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES), + "osrandom_rand_bytes"}, + {ERR_FUNC(CRYPTOGRAPHY_OSRANDOM_F_FINISH), + "osrandom_finish"}, + {ERR_FUNC(CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_FD), + "dev_urandom_fd"}, + {ERR_FUNC(CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_READ), + "dev_urandom_read"}, + {0, NULL} +}; + +static ERR_STRING_DATA CRYPTOGRAPHY_OSRANDOM_str_reasons[] = { + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_CRYPTACQUIRECONTEXT), + "CryptAcquireContext() failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_CRYPTGENRANDOM), + "CryptGenRandom() failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_CRYPTRELEASECONTEXT), + "CryptReleaseContext() failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETENTROPY_FAILED), + "getentropy() failed"}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_OPEN_FAILED), + "open('/dev/urandom') failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_READ_FAILED), + "Reading from /dev/urandom fd failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED), + "getrandom() initialization failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_EAGAIN), + "getrandom() initialization failed with EAGAIN. Most likely Kernel " + "CPRNG is not seeded yet."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_UNEXPECTED), + "getrandom() initialization failed with unexpected errno."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_FAILED), + "getrandom() syscall failed."}, + {ERR_REASON(CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_NOT_INIT), + "getrandom() engine was not properly initialized."}, + {0, NULL} +}; + +static int Cryptography_OSRandom_lib_error_code = 0; + +static void ERR_load_Cryptography_OSRandom_strings(void) +{ + if (Cryptography_OSRandom_lib_error_code == 0) { + Cryptography_OSRandom_lib_error_code = ERR_get_next_error_library(); + ERR_load_strings(Cryptography_OSRandom_lib_error_code, + CRYPTOGRAPHY_OSRANDOM_lib_name); + ERR_load_strings(Cryptography_OSRandom_lib_error_code, + CRYPTOGRAPHY_OSRANDOM_str_funcs); + ERR_load_strings(Cryptography_OSRandom_lib_error_code, + CRYPTOGRAPHY_OSRANDOM_str_reasons); + } +} + +static void ERR_Cryptography_OSRandom_error(int function, int reason, + char *file, int line) +{ + ERR_PUT_error(Cryptography_OSRandom_lib_error_code, function, reason, + file, line); +} + +/* Returns 1 if successfully added, 2 if engine has previously been added, + and 0 for error. */ +int Cryptography_add_osrandom_engine(void) { + ENGINE *e; + + ERR_load_Cryptography_OSRandom_strings(); + + e = ENGINE_by_id(Cryptography_osrandom_engine_id); + if (e != NULL) { + ENGINE_free(e); + return 2; + } else { + ERR_clear_error(); + } + + e = ENGINE_new(); + if (e == NULL) { + return 0; + } + if(!ENGINE_set_id(e, Cryptography_osrandom_engine_id) || + !ENGINE_set_name(e, Cryptography_osrandom_engine_name) || + !ENGINE_set_RAND(e, &osrandom_rand) || + !ENGINE_set_init_function(e, osrandom_init) || + !ENGINE_set_finish_function(e, osrandom_finish) || + !ENGINE_set_cmd_defns(e, osrandom_cmd_defns) || + !ENGINE_set_ctrl_function(e, osrandom_ctrl)) { + ENGINE_free(e); + return 0; + } + if (!ENGINE_add(e)) { + ENGINE_free(e); + return 0; + } + if (!ENGINE_free(e)) { + return 0; + } + + return 1; +} diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.h b/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.h new file mode 100644 --- /dev/null +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/src/osrandom_engine.h @@ -0,0 +1,88 @@ +#ifdef _WIN32 + #include +#else + #include + #include + /* for defined(BSD) */ + #include + + #ifdef BSD + /* for SYS_getentropy */ + #include + #endif + + #ifdef __APPLE__ + #include + #endif + + #ifdef __linux__ + /* for SYS_getrandom */ + #include + #ifndef GRND_NONBLOCK + #define GRND_NONBLOCK 0x0001 + #endif /* GRND_NONBLOCK */ + #endif /* __linux__ */ +#endif /* _WIN32 */ + +#define CRYPTOGRAPHY_OSRANDOM_ENGINE_CRYPTGENRANDOM 1 +#define CRYPTOGRAPHY_OSRANDOM_ENGINE_GETENTROPY 2 +#define CRYPTOGRAPHY_OSRANDOM_ENGINE_GETRANDOM 3 +#define CRYPTOGRAPHY_OSRANDOM_ENGINE_DEV_URANDOM 4 + +#ifndef CRYPTOGRAPHY_OSRANDOM_ENGINE + #if defined(_WIN32) + /* Windows */ + #define CRYPTOGRAPHY_OSRANDOM_ENGINE CRYPTOGRAPHY_OSRANDOM_ENGINE_CRYPTGENRANDOM + #elif defined(BSD) && defined(SYS_getentropy) + /* OpenBSD 5.6+ or macOS 10.12+ */ + #define CRYPTOGRAPHY_OSRANDOM_ENGINE CRYPTOGRAPHY_OSRANDOM_ENGINE_GETENTROPY + #elif defined(__linux__) && defined(SYS_getrandom) + /* Linux 3.4.17+ */ + #define CRYPTOGRAPHY_OSRANDOM_ENGINE CRYPTOGRAPHY_OSRANDOM_ENGINE_GETRANDOM + #else + /* Keep this as last entry, fall back to /dev/urandom */ + #define CRYPTOGRAPHY_OSRANDOM_ENGINE CRYPTOGRAPHY_OSRANDOM_ENGINE_DEV_URANDOM + #endif +#endif /* CRYPTOGRAPHY_OSRANDOM_ENGINE */ + +/* Fallbacks need /dev/urandom helper functions. */ +#if CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_GETRANDOM || \ + CRYPTOGRAPHY_OSRANDOM_ENGINE == CRYPTOGRAPHY_OSRANDOM_ENGINE_DEV_URANDOM + #define CRYPTOGRAPHY_OSRANDOM_NEEDS_DEV_URANDOM 1 +#endif + +enum { + CRYPTOGRAPHY_OSRANDOM_GETRANDOM_INIT_FAILED = -2, + CRYPTOGRAPHY_OSRANDOM_GETRANDOM_NOT_INIT, + CRYPTOGRAPHY_OSRANDOM_GETRANDOM_FALLBACK, + CRYPTOGRAPHY_OSRANDOM_GETRANDOM_WORKS +}; + +/* engine ctrl */ +#define CRYPTOGRAPHY_OSRANDOM_GET_IMPLEMENTATION ENGINE_CMD_BASE + +/* error reporting */ +static void ERR_load_Cryptography_OSRandom_strings(void); +static void ERR_Cryptography_OSRandom_error(int function, int reason, + char *file, int line); + +#define CRYPTOGRAPHY_OSRANDOM_F_INIT 100 +#define CRYPTOGRAPHY_OSRANDOM_F_RAND_BYTES 101 +#define CRYPTOGRAPHY_OSRANDOM_F_FINISH 102 +#define CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_FD 300 +#define CRYPTOGRAPHY_OSRANDOM_F_DEV_URANDOM_READ 301 + +#define CRYPTOGRAPHY_OSRANDOM_R_CRYPTACQUIRECONTEXT 100 +#define CRYPTOGRAPHY_OSRANDOM_R_CRYPTGENRANDOM 101 +#define CRYPTOGRAPHY_OSRANDOM_R_CRYPTRELEASECONTEXT 102 + +#define CRYPTOGRAPHY_OSRANDOM_R_GETENTROPY_FAILED 200 + +#define CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_OPEN_FAILED 300 +#define CRYPTOGRAPHY_OSRANDOM_R_DEV_URANDOM_READ_FAILED 301 + +#define CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED 400 +#define CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_EAGAIN 401 +#define CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_INIT_FAILED_UNEXPECTED 402 +#define CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_FAILED 403 +#define CRYPTOGRAPHY_OSRANDOM_R_GETRANDOM_NOT_INIT 404 From pypy.commits at gmail.com Sat Dec 17 07:27:54 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 04:27:54 -0800 (PST) Subject: [pypy-commit] pypy default: Try to import 'embedding' from __init__(), which might allow us Message-ID: <58552f4a.d5091c0a.6942f.d553@mx.google.com> Author: Armin Rigo Branch: Changeset: r89113:be4a524fa746 Date: 2016-12-17 12:45 +0100 http://bitbucket.org/pypy/pypy/changeset/be4a524fa746/ Log: Try to import 'embedding' from __init__(), which might allow us to check a space flag diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,6 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi VERSION = "1.9.1" @@ -68,7 +68,11 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL - def startup(self, space): + def __init__(self, space, *args): + MixedModule.__init__(self, space, *args) + # + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point from pypy.module._cffi_backend import embedding embedding.glob.space = space @@ -85,11 +89,3 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value - - -# write this entrypoint() here, to make sure it is registered early enough - at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], - c_name='pypy_init_embedded_cffi_module') -def pypy_init_embedded_cffi_module(version, init_struct): - from pypy.module._cffi_backend import embedding - return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,4 +1,5 @@ import os +from rpython.rlib import entrypoint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -46,6 +47,8 @@ glob = Global() + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') def pypy_init_embedded_cffi_module(version, init_struct): # called from __init__.py name = "?" From pypy.commits at gmail.com Sat Dec 17 08:11:17 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 05:11:17 -0800 (PST) Subject: [pypy-commit] pypy default: Add the config option 'disable_entrypoints' for embedding PyPy together Message-ID: <58553975.43e61c0a.f871f.e799@mx.google.com> Author: Armin Rigo Branch: Changeset: r89114:17a956e1c059 Date: 2016-12-17 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/17a956e1c059/ Log: Add the config option 'disable_entrypoints' for embedding PyPy together with another RPython VM diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -190,6 +190,12 @@ "make sure that all calls go through space.call_args", default=False), + BoolOption("disable_entrypoints", + "Disable external entry points, notably the" + " cpyext module and cffi's embedding mode.", + default=False, + requires=[("objspace.usemodules.cpyext", False)]), + OptionDescription("std", "Standard Object Space Options", [ BoolOption("withtproxy", "support transparent proxies", default=True), diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,12 +83,18 @@ return 1 return exitcode + return entry_point, get_additional_entrypoints(space) + + +def get_additional_entrypoints(space): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype + if space.config.objspace.disable_entrypoints: + return {} + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -188,11 +194,11 @@ return -1 return 0 - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + return {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -71,10 +71,11 @@ def __init__(self, space, *args): MixedModule.__init__(self, space, *args) # - # import 'embedding', which has the side-effect of registering - # the 'pypy_init_embedded_cffi_module' entry point - from pypy.module._cffi_backend import embedding - embedding.glob.space = space + if not space.config.objspace.disable_entrypoints: + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): From pypy.commits at gmail.com Sat Dec 17 09:53:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 06:53:27 -0800 (PST) Subject: [pypy-commit] pypy default: oops Message-ID: <58555167.212dc20a.22a2c.6cfa@mx.google.com> Author: Armin Rigo Branch: Changeset: r89115:b447bdd635ee Date: 2016-12-17 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/b447bdd635ee/ Log: oops diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,10 +83,10 @@ return 1 return exitcode - return entry_point, get_additional_entrypoints(space) + return entry_point, get_additional_entrypoints(space, w_initstdio) -def get_additional_entrypoints(space): +def get_additional_entrypoints(space, w_initstdio): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks from rpython.rlib.entrypoint import entrypoint_highlevel From pypy.commits at gmail.com Sat Dec 17 10:25:05 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 07:25:05 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Handle includes Message-ID: <585558d1.c5371c0a.f6936.094a@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89116:078ef51e7af9 Date: 2016-12-17 15:24 +0000 http://bitbucket.org/pypy/pypy/changeset/078ef51e7af9/ Log: Handle includes diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -26,12 +26,11 @@ _parser_cache = pycparser.CParser() return _parser_cache -def _preprocess(csource): +def _preprocess(csource, macros): # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literal! csource = _r_comment.sub(' ', csource) # Remove the "#define FOO x" lines - macros = OrderedDict() for match in _r_define.finditer(csource): macroname, macrovalue = match.groups() macrovalue = macrovalue.replace('\\\n', '').strip() @@ -102,9 +101,11 @@ self._options = {} self._int_constants = {} self._recomplete = [] + self._macros = OrderedDict() def _parse(self, csource): - csource, macros = _preprocess(csource) + # modifies self._macros in-place + csource, macros = _preprocess(csource, self._macros) # XXX: for more efficiency we would need to poke into the # internals of CParser... the following registers the # typedefs, because their presence or absence influences the @@ -633,10 +634,12 @@ if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef', 'macro'): self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) + for k, v in other._macros.items(): + self._macros[k] = v CNAME_TO_LLTYPE = { 'char': rffi.CHAR, @@ -671,11 +674,12 @@ class ParsedSource(object): - def __init__(self, source, definitions=None, macros=None): + def __init__(self, source, parser, definitions=None, macros=None): self.source = source self.definitions = definitions if definitions is not None else {} self.macros = macros if macros is not None else {} self.structs = {} + self.ctx = parser def add_typedef(self, name, obj): assert name not in self.definitions @@ -711,11 +715,17 @@ raise NotImplementedError -def parse_source(source): +def parse_source(source, includes=None): ctx = Parser() + if includes is not None: + for header in includes: + ctx.include(header.ctx) + ctx.parse(source) - src = ParsedSource(source) + src = ParsedSource(source, ctx) for name, (obj, quals) in ctx._declarations.iteritems(): + if obj in ctx._included_declarations: + continue if name.startswith('typedef '): name = name[8:] src.add_typedef(name, obj) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -39,3 +39,21 @@ hdr = parse_source(decl) assert 'PyFloatObject' in hdr.definitions assert 'PyObject_HEAD' in hdr.macros + +def test_include(): + cdef1 = """ + typedef ssize_t Py_ssize_t; + + #define PyObject_HEAD \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ + """ + hdr1 = parse_source(cdef1) + cdef2 = """ + typedef struct { + PyObject_HEAD + Py_ssize_t ob_foo; + } Object; + """ + hdr2 = parse_source(cdef2, includes=[hdr1]) + assert 'Object' in hdr2.definitions From pypy.commits at gmail.com Sat Dec 17 11:10:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 08:10:27 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Inline cpython_struct() into ParsedSource Message-ID: <58556373.e7b1c20a.678f4.87f8@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89117:768d5bba506e Date: 2016-12-17 16:09 +0000 http://bitbucket.org/pypy/pypy/changeset/768d5bba506e/ Log: Inline cpython_struct() into ParsedSource diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -4,11 +4,11 @@ import pycparser import weakref, re from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rfficache +from rpython.rtyper.tool import rfficache, rffi_platform _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", re.DOTALL | re.MULTILINE) -_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" r"\b((?:[^\n\\]|\\.)*?)$", re.DOTALL | re.MULTILINE) _r_words = re.compile(r"\w+|\S") @@ -665,10 +665,6 @@ self.struct_name = name self.fields = fields - def realize(self, type_name): - from pypy.module.cpyext.api import cpython_struct - return cpython_struct(type_name, self.fields) - def __repr__(self): return "".format(vars(self)) @@ -685,7 +681,7 @@ assert name not in self.definitions tp = self.convert_type(obj) if isinstance(tp, DelayedStruct): - tp = tp.realize(name) + tp = self.realize_struct(tp, name) self.structs[obj] = tp self.definitions[name] = lltype.Typedef(tp, name) @@ -693,20 +689,31 @@ assert name not in self.macros self.macros[name] = value + def new_struct(self, obj): + if obj.fldtypes is None: + return lltype.ForwardReference() + else: + fields = zip( + obj.fldnames, + [self.convert_type(field) for field in obj.fldtypes]) + return DelayedStruct(obj.name, fields) + + def realize_struct(self, struct, type_name): + from pypy.module.cpyext.api import CConfig, TYPES + configname = type_name.replace(' ', '__') + setattr(CConfig, configname, + rffi_platform.Struct(type_name, struct.fields)) + forward = lltype.ForwardReference() + TYPES[configname] = forward + return forward + def convert_type(self, obj): if isinstance(obj, model.PrimitiveType): return cname_to_lltype(obj.name) elif isinstance(obj, model.StructType): - from pypy.module.cpyext.api import cpython_struct if obj in self.structs: return self.structs[obj] - if obj.fldtypes is None: - result = lltype.ForwardReference() - else: - fields = zip( - obj.fldnames, - [self.convert_type(field) for field in obj.fldtypes]) - result = DelayedStruct(obj.name, fields) + result = self.new_struct(obj) self.structs[obj] = result return result elif isinstance(obj, model.PointerType): From pypy.commits at gmail.com Sat Dec 17 11:59:42 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 08:59:42 -0800 (PST) Subject: [pypy-commit] pypy default: flow space: only emit ll_assert_not_none() in some forms of raise, Message-ID: <58556efe.45f6c20a.23a87.9724@mx.google.com> Author: Armin Rigo Branch: Changeset: r89118:e58b69ffe5aa Date: 2016-12-17 17:59 +0100 http://bitbucket.org/pypy/pypy/changeset/e58b69ffe5aa/ Log: flow space: only emit ll_assert_not_none() in some forms of raise, not if we built the exception instance just now diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -599,6 +599,7 @@ """ from rpython.rlib.debug import ll_assert_not_none + check_not_none = False w_is_type = op.isinstance(w_arg1, const(type)).eval(self) if self.guessbool(w_is_type): # this is for all cases of the form (Class, something) @@ -610,6 +611,7 @@ if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)): # raise Type, Instance: let etype be the exact type of value w_value = w_arg2 + check_not_none = True else: # raise Type, X: assume X is the constructor argument w_value = op.simple_call(w_arg1, w_arg2).eval(self) @@ -620,7 +622,10 @@ "separate value") raise Raise(const(exc)) w_value = w_arg1 - w_value = op.simple_call(const(ll_assert_not_none), w_value).eval(self) + check_not_none = True + if check_not_none: + w_value = op.simple_call(const(ll_assert_not_none), + w_value).eval(self) w_type = op.type(w_value).eval(self) return FSException(w_type, w_value) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -466,6 +466,14 @@ int_return $True """, transform=True) + def test_assert_disappears(self): + def f(i): + assert i > 5 + return i + self.encoding_test(f, [7], """ + int_return %i0 + """) + def test_int_floordiv_ovf_zer(self): def f(i, j): assert i >= 0 From pypy.commits at gmail.com Sat Dec 17 12:16:27 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 09:16:27 -0800 (PST) Subject: [pypy-commit] pypy default: Change https://www.verisign.net/, which has been failing for two days, Message-ID: <585572eb.ce941c0a.d731b.3865@mx.google.com> Author: Armin Rigo Branch: Changeset: r89119:ecfd182b1eb3 Date: 2016-12-17 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/ecfd182b1eb3/ Log: Change https://www.verisign.net/, which has been failing for two days, to https://gmail.com/ diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -169,8 +169,8 @@ } def setup_method(self, method): - # https://www.verisign.net/ - ADDR = "www.verisign.net", 443 + # https://gmail.com/ + ADDR = "gmail.com", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket From pypy.commits at gmail.com Sat Dec 17 12:48:27 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 09:48:27 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Do one configure_types call per ParsedSource instance Message-ID: <58557a6b.e337c20a.c90e9.b01e@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89120:d8f4db529986 Date: 2016-12-17 17:47 +0000 http://bitbucket.org/pypy/pypy/changeset/d8f4db529986/ Log: Do one configure_types call per ParsedSource instance diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -268,8 +268,6 @@ self.gil = gil self.result_borrowed = result_borrowed self.result_is_ll = result_is_ll - if result_is_ll: # means 'returns a low-level PyObject pointer' - assert is_PyObject(restype) # def get_llhelper(space): return llhelper(self.functype, self.get_wrapper(space)) @@ -624,6 +622,7 @@ typedef struct _typeobject PyTypeObject; """) +h.configure_types() Py_ssize_t = h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -671,11 +671,15 @@ class ParsedSource(object): def __init__(self, source, parser, definitions=None, macros=None): + from pypy.module.cpyext.api import CConfig self.source = source self.definitions = definitions if definitions is not None else {} self.macros = macros if macros is not None else {} self.structs = {} self.ctx = parser + self._Config = type('Config', (object,), {}) + self._Config._compilation_info_ = CConfig._compilation_info_ + self._TYPES = {} def add_typedef(self, name, obj): assert name not in self.definitions @@ -701,12 +705,17 @@ def realize_struct(self, struct, type_name): from pypy.module.cpyext.api import CConfig, TYPES configname = type_name.replace(' ', '__') - setattr(CConfig, configname, + setattr(self._Config, configname, rffi_platform.Struct(type_name, struct.fields)) forward = lltype.ForwardReference() - TYPES[configname] = forward + self._TYPES[configname] = forward return forward + def configure_types(self): + for name, TYPE in rffi_platform.configure(self._Config).iteritems(): + if name in self._TYPES: + self._TYPES[name].become(TYPE) + def convert_type(self, obj): if isinstance(obj, model.PrimitiveType): return cname_to_lltype(obj.name) From pypy.commits at gmail.com Sat Dec 17 13:15:30 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 17 Dec 2016 10:15:30 -0800 (PST) Subject: [pypy-commit] pypy issue2444: use FinalizerQueue (with minor hack for tests) Message-ID: <585580c2.ca06c20a.7600a.b4db@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89121:f66ee40b4bc5 Date: 2016-12-17 20:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f66ee40b4bc5/ Log: use FinalizerQueue (with minor hack for tests) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen +from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl, Py_buffer, Py_bufferP) @@ -343,7 +344,6 @@ self.releasebufferproc = releasebuffer def releasebuffer(self): - print '--------------' if self.releasebufferproc: func_target = rffi.cast(releasebufferproc, self.releasebufferproc) with lltype.scoped_alloc(Py_buffer) as pybuf: @@ -385,6 +385,17 @@ # absolutely no safety checks, what could go wrong? self.ptr[index] = char +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) @@ -395,8 +406,10 @@ size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(space, ptr[0], size, w_self, - releasebuffer=releasebuffer)) + buf = CPyBuffer(space, ptr[0], size, w_self, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getwritebuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) @@ -408,8 +421,10 @@ size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(space, ptr[0], size, w_self, readonly=False, - releasebuffer=releasebuffer)) + buf = CPyBuffer(space, ptr[0], size, w_self, readonly=False, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) @@ -436,11 +451,13 @@ format = rffi.charp2str(pybuf.c_format) else: format = 'B' - return space.newbuffer(CPyBuffer(space, ptr, size, w_self, format=format, + buf = CPyBuffer(space, ptr, size, w_self, format=format, ndim=ndim, shape=shape, strides=strides, itemsize=pybuf.c_itemsize, readonly=widen(pybuf.c_readonly), - releasebuffer = releasebuffer)) + releasebuffer = releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -114,9 +114,8 @@ """, ) import gc assert module.get_cnt() == 0 - print '++++++++++++++++++' a = memoryview(module.create_test()) - print 'xxxxxxxxxxxxxxxxxxxxxxx' assert module.get_cnt() == 1 del a + gc.collect(); gc.collect(); gc.collect() assert module.get_cnt() == 0 diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -323,7 +323,6 @@ def newbuffer(self, obj): ret = W_Buffer(obj) - ret.register_finalizer(self) return ret def newbytes(self, s): diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -450,6 +450,7 @@ "the object must have a __dict__" % (obj,)) assert (not hasattr(obj, '__slots__') or type(obj).__slots__ == () or + type(obj).__slots__ == ['readonly'] or type(obj).__slots__ == ('__weakref__',)), ( "%r: to run register_finalizer() untranslated, " "the object must not have __slots__" % (obj,)) From pypy.commits at gmail.com Sat Dec 17 13:48:41 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 10:48:41 -0800 (PST) Subject: [pypy-commit] pypy default: Propagate debug.ll_assert_not_none() through the JIT, using the same Message-ID: <58558889.8d071c0a.32208.5978@mx.google.com> Author: Armin Rigo Branch: Changeset: r89122:5efae655f1ce Date: 2016-12-17 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/5efae655f1ce/ Log: Propagate debug.ll_assert_not_none() through the JIT, using the same technique as jit.record_exact_class(). If we use it a bit inside PyPy it could remove a good number of guard_nonnull or guard_nonnull_class. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -283,6 +283,12 @@ def rewrite_op_jit_record_exact_class(self, op): return SpaceOperation("record_exact_class", [op.args[0], op.args[1]], None) + def rewrite_op_debug_assert_not_none(self, op): + if isinstance(op.args[0], Variable): + return SpaceOperation('assert_not_none', [op.args[0]], None) + else: + return [] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -402,7 +402,7 @@ self.encoding_test(f, [65], """ raise $<* struct object> - """) + """, transform=True) def test_exc_raise_2(self): def g(i): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -563,6 +563,10 @@ ll_assert((i & 1) == 1, "bhimpl_cast_int_to_ptr: not an odd int") return lltype.cast_int_to_ptr(llmemory.GCREF, i) + @arguments("r") + def bhimpl_assert_not_none(a): + assert a + @arguments("r", "i") def bhimpl_record_exact_class(a, b): pass diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -5,6 +5,7 @@ from rpython.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize +from rpython.rlib.debug import fatalerror from rpython.jit.metainterp.history import check_descr from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr @@ -321,6 +322,10 @@ def do_keepalive(cpu, _, x): pass +def do_assert_not_none(cpu, _, box): + if not box.getref_base(): + fatalerror("found during JITting: ll_assert_not_none() failed") + # ____________________________________________________________ diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -499,6 +499,9 @@ box = self.get_box_replacement(op.getarg(0)) self.make_constant(box, CONST_0) + def optimize_ASSERT_NOT_NONE(self, op): + self.make_nonnull(op.getarg(0)) + def optimize_RECORD_EXACT_CLASS(self, op): opinfo = self.getptrinfo(op.getarg(0)) expectedclassbox = op.getarg(1) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -42,6 +42,9 @@ # but it's a bit hard to implement robustly if heap.py is also run pass + def optimize_ASSERT_NOT_NONE(self, op): + pass + def optimize_RECORD_EXACT_CLASS(self, op): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5595,6 +5595,19 @@ """ self.optimize_loop(ops, expected) + def test_assert_not_none(self): + ops = """ + [p0] + assert_not_none(p0) + guard_nonnull(p0) [] + finish() + """ + expected = """ + [p0] + finish() + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -275,12 +275,18 @@ def opimpl_ptr_iszero(self, box): return self.execute(rop.PTR_EQ, box, history.CONST_NULL) + @arguments("box") + def opimpl_assert_not_none(self, box): + if self.metainterp.heapcache.is_nullity_known(box): + return + self.execute(rop.ASSERT_NOT_NONE, box) + self.metainterp.heapcache.nullity_now_known(box) + @arguments("box", "box") def opimpl_record_exact_class(self, box, clsbox): from rpython.rtyper.lltypesystem import llmemory if self.metainterp.heapcache.is_class_known(box): return - adr = clsbox.getaddr() self.execute(rop.RECORD_EXACT_CLASS, box, clsbox) self.metainterp.heapcache.class_now_known(box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1143,6 +1143,7 @@ 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5/n', 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr + 'ASSERT_NOT_NONE/1/n', # [objptr] 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', 'SAVE_EXCEPTION/0/r', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4585,3 +4585,30 @@ assert res == -42 res = self.interp_operations(f, [0, 200]) assert res == 205 + + def test_ll_assert_not_none(self): + # the presence of ll_assert_not_none(), even in cases where it + # doesn't influence the annotation, is a hint for the JIT + from rpython.rlib.debug import ll_assert_not_none + class X: + pass + class Y(X): + pass + def g(x, check): + if check: + x = ll_assert_not_none(x) + return isinstance(x, Y) + @dont_look_inside + def make(i): + if i == 1: + return X() + if i == 2: + return Y() + return None + def f(a, b, check): + return g(make(a), check) + g(make(b), check) * 10 + res = self.interp_operations(f, [1, 2, 1]) + assert res == 10 + self.check_operations_history(guard_nonnull=0, guard_nonnull_class=0, + guard_class=2, + assert_not_none=2) # before optimization diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1141,6 +1141,9 @@ """ Assure the JIT that value is an instance of cls. This is a precise class check, like a guard_class. + + See also debug.ll_assert_not_none(x), which asserts that x is not None + and also assures the JIT that it is the case. """ assert type(value) is cls diff --git a/rpython/rtyper/debug.py b/rpython/rtyper/debug.py --- a/rpython/rtyper/debug.py +++ b/rpython/rtyper/debug.py @@ -22,7 +22,7 @@ def ll_assert_not_none(x): """assert x is not None""" - assert x, "ll_assert_not_none(%r)" % (x,) + assert x is not None, "ll_assert_not_none(%r)" % (x,) return x class Entry(ExtRegistryEntry): @@ -33,11 +33,8 @@ def specialize_call(self, hop): [v0] = hop.inputargs(hop.args_r[0]) - assert isinstance(v0.concretetype, lltype.Ptr) - v1 = hop.genop('ptr_nonzero', [v0], resulttype=lltype.Bool) hop.exception_cannot_occur() - cmsg = hop.inputconst(lltype.Void, "ll_assert_not_none failed") - hop.genop('debug_assert', [v1, cmsg]) + hop.genop('debug_assert_not_none', [v0]) return v0 class FatalError(Exception): diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -521,6 +521,10 @@ if not x: raise LLAssertFailure(msg) + def op_debug_assert_not_none(self, x): + if not x: + raise LLAssertFailure("ll_assert_not_none() failed") + def op_debug_fatalerror(self, ll_msg, ll_exc=None): msg = ''.join(ll_msg.chars) if ll_exc is None: diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -78,7 +78,8 @@ def is_pure(self, args_v): if self.canfold: # canfold => pure operation return True - if self is llop.debug_assert: # debug_assert is pure enough + if (self is llop.debug_assert or # debug_assert is pure enough + self is llop.debug_assert_not_none): return True # reading from immutable if self is llop.getfield or self is llop.getarrayitem: @@ -552,6 +553,7 @@ 'debug_offset': LLOp(canrun=True), 'debug_flush': LLOp(canrun=True), 'debug_assert': LLOp(tryfold=True), + 'debug_assert_not_none': LLOp(tryfold=True), 'debug_fatalerror': LLOp(canrun=True), 'debug_llinterpcall': LLOp(canraise=(Exception,)), # Python func call 'res=arg[0](*arg[1:])' diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -812,6 +812,10 @@ return 'RPyAssert(%s, %s);' % (self.expr(op.args[0]), c_string_constant(op.args[1].value)) + def OP_DEBUG_ASSERT_NOT_NONE(self, op): + return 'RPyAssert(%s != NULL, "ll_assert_not_none() failed");' % ( + self.expr(op.args[0]),) + def OP_DEBUG_FATALERROR(self, op): # XXX from rpython.rtyper.lltypesystem.rstr import STR From pypy.commits at gmail.com Sat Dec 17 13:48:43 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 10:48:43 -0800 (PST) Subject: [pypy-commit] pypy default: Add a minimal amount of ll_assert_not_none(), notably on the popvalue() Message-ID: <5855888b.8c1f1c0a.f8a7f.5b57@mx.google.com> Author: Armin Rigo Branch: Changeset: r89123:7904f63e30ef Date: 2016-12-17 18:36 +0100 http://bitbucket.org/pypy/pypy/changeset/7904f63e30ef/ Log: Add a minimal amount of ll_assert_not_none(), notably on the popvalue() method diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,6 +4,7 @@ import sys from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg +from rpython.rlib.debug import ll_assert_not_none from rpython.rlib.jit import hint from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint @@ -298,7 +299,13 @@ # stack manipulation helpers def pushvalue(self, w_object): depth = self.valuestackdepth - self.locals_cells_stack_w[depth] = w_object + self.locals_cells_stack_w[depth] = ll_assert_not_none(w_object) + self.valuestackdepth = depth + 1 + + def pushvalue_none(self): + depth = self.valuestackdepth + # the entry is already None, and remains None + assert self.locals_cells_stack_w[depth] is None self.valuestackdepth = depth + 1 def _check_stack_index(self, index): @@ -311,6 +318,9 @@ return index >= stackstart def popvalue(self): + return ll_assert_not_none(self.popvalue_maybe_none()) + + def popvalue_maybe_none(self): depth = self.valuestackdepth - 1 assert self._check_stack_index(depth) assert depth >= 0 @@ -385,6 +395,9 @@ def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). + return ll_assert_not_none(self.peekvalue_maybe_none(index_from_top)) + + def peekvalue_maybe_none(self, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) @@ -396,7 +409,7 @@ index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) assert index >= 0 - self.locals_cells_stack_w[index] = w_object + self.locals_cells_stack_w[index] = ll_assert_not_none(w_object) @jit.unroll_safe def dropvaluesuntil(self, finaldepth): diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -80,14 +80,14 @@ if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) - f.pushvalue(None) + f.pushvalue_none() @jit.unroll_safe def CALL_METHOD(f, oparg, *ignored): # opargs contains the arg, and kwarg count, excluding the implicit 'self' n_args = oparg & 0xff n_kwargs = (oparg >> 8) & 0xff - w_self = f.peekvalue(n_args + (2 * n_kwargs)) + w_self = f.peekvalue_maybe_none(n_args + (2 * n_kwargs)) n = n_args + (w_self is not None) if not n_kwargs: @@ -115,7 +115,7 @@ arguments, keywords, keywords_w, None, None, methodcall=w_self is not None) if w_self is None: - f.popvalue() # removes w_self, which is None + f.popvalue_maybe_none() # removes w_self, which is None w_callable = f.popvalue() if f.get_is_being_profiled() and function.is_builtin_code(w_callable): w_result = f.space.call_args_and_c_profile(f, w_callable, args) From pypy.commits at gmail.com Sat Dec 17 13:59:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 10:59:01 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Handle function pointers and void Message-ID: <58558af5.2854c20a.5e529.deed@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89124:3ef445c0db43 Date: 2016-12-17 18:58 +0000 http://bitbucket.org/pypy/pypy/changeset/3ef445c0db43/ Log: Handle function pointers and void diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -621,6 +621,8 @@ } PyVarObject; typedef struct _typeobject PyTypeObject; + +typedef void (*freefunc)(void *); """) h.configure_types() diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -726,7 +726,18 @@ self.structs[obj] = result return result elif isinstance(obj, model.PointerType): - return lltype.Ptr(self.convert_type(obj.totype)) + TO = self.convert_type(obj.totype) + if TO is lltype.Void: + return rffi.VOIDP + return lltype.Ptr(TO) + elif isinstance(obj, model.FunctionPtrType): + if obj.ellipsis: + raise NotImplementedError + args = [self.convert_type(arg) for arg in obj.args] + res = self.convert_type(obj.result) + return lltype.Ptr(lltype.FuncType(args, res)) + elif isinstance(obj, model.VoidType): + return lltype.Void else: raise NotImplementedError diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -5,13 +5,15 @@ Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef -from pypy.module.cpyext.api import Py_bufferP +from pypy.module.cpyext.api import Py_bufferP, h P, FT, PyO = Ptr, FuncType, PyObject PyOPtr = Ptr(lltype.Array(PyO, hints={'nolength': True})) -freefunc = P(FT([rffi.VOIDP], Void)) +#freefunc = P(FT([rffi.VOIDP], Void)) +freefunc = h.definitions['freefunc'] + destructor = P(FT([PyO], Void)) printfunc = P(FT([PyO, FILEP, rffi.INT_real], rffi.INT)) getattrfunc = P(FT([PyO, rffi.CCHARP], PyO)) @@ -200,7 +202,7 @@ ("tp_clear", inquiry), #U # Assigned meaning in release 2.1 - # rich comparisons + # rich comparisons ("tp_richcompare", richcmpfunc), #N # weak reference enabler From pypy.commits at gmail.com Sat Dec 17 14:55:42 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 17 Dec 2016 11:55:42 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: dealloc at first opportunity, rather than wait for dict_dealloc (cfbolz) Message-ID: <5855983e.0bba1c0a.43c1d.6f56@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r89125:61a5f611cb3a Date: 2016-12-17 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/61a5f611cb3a/ Log: dealloc at first opportunity, rather than wait for dict_dealloc (cfbolz) diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -10,6 +10,7 @@ from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, as_pyobj, make_typedescr, track_reference, create_ref, from_ref, decref, Py_IncRef) +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall PyDictObjectStruct = lltype.ForwardReference() @@ -58,7 +59,7 @@ def dict_dealloc(space, py_obj): py_dict = rffi.cast(PyDictObject, py_obj) decref(space, py_dict.c_ob_keys) - from pypy.module.cpyext.object import _dealloc + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) _dealloc(space, py_obj) @cpython_api([], PyObject) @@ -270,6 +271,8 @@ w_keys = from_ref(space, py_dict.c_ob_keys) ppos[0] += 1 if pos >= space.len_w(w_keys): + decref(space, py_dict.c_ob_keys) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) return 0 w_key = space.listview(w_keys)[pos] w_value = space.getitem(w_dict, w_key) From pypy.commits at gmail.com Sat Dec 17 14:55:45 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 17 Dec 2016 11:55:45 -0800 (PST) Subject: [pypy-commit] pypy better-PyDict_Next: mrege known good default into branch Message-ID: <58559841.8675c20a.dfef3.dc60@mx.google.com> Author: Matti Picus Branch: better-PyDict_Next Changeset: r89126:6f9cd556bd03 Date: 2016-12-17 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/6f9cd556bd03/ Log: mrege known good default into branch diff too long, truncating to 2000 out of 13024 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -190,6 +190,12 @@ "make sure that all calls go through space.call_args", default=False), + BoolOption("disable_entrypoints", + "Disable external entry points, notably the" + " cpyext module and cffi's embedding mode.", + default=False, + requires=[("objspace.usemodules.cpyext", False)]), + OptionDescription("std", "Standard Object Space Options", [ BoolOption("withtproxy", "support transparent proxies", default=True), diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz @@ -307,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,145 +1,61 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++ (11, 14, etc.). +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a `backend`_, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ - -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -153,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -209,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -311,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -370,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -666,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -686,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -764,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -775,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -871,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -76,11 +76,14 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -45,3 +45,14 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,12 +83,18 @@ return 1 return exitcode + return entry_point, get_additional_entrypoints(space, w_initstdio) + + +def get_additional_entrypoints(space, w_initstdio): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype + if space.config.objspace.disable_entrypoints: + return {} + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -188,11 +194,11 @@ return -1 return 0 - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + return {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -428,6 +428,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,16 +247,15 @@ def descr_function_repr(self): return self.getrepr(self.space, 'function %s' % (self.name,)) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -264,10 +263,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -671,10 +671,10 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code - find = staticmethod(find) + return Function.find(space, identifier).code def signature(self): return self.sig diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,6 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi VERSION = "1.9.1" @@ -68,9 +68,14 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL - def startup(self, space): - from pypy.module._cffi_backend import embedding - embedding.glob.space = space + def __init__(self, space, *args): + MixedModule.__init__(self, space, *args) + # + if not space.config.objspace.disable_entrypoints: + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): @@ -85,11 +90,3 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value - - -# write this entrypoint() here, to make sure it is registered early enough - at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], - c_name='pypy_init_embedded_cffi_module') -def pypy_init_embedded_cffi_module(version, init_struct): - from pypy.module._cffi_backend import embedding - return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,4 +1,5 @@ import os +from rpython.rlib import entrypoint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -46,6 +47,8 @@ glob = Global() + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') def pypy_init_embedded_cffi_module(version, init_struct): # called from __init__.py name = "?" diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -77,7 +77,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -86,7 +86,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -169,8 +169,8 @@ } def setup_method(self, method): - # https://www.verisign.net/ - ADDR = "www.verisign.net", 443 + # https://gmail.com/ + ADDR = "gmail.com", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.wrap('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.wrap(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.wrap(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.wrap('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.wrap('_'+attr), space.wrap(cdata)) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) - - def iter_w(self): - return self.space.wrap(self) - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.wrap("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.wrap("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), 'is_enum' : ([c_ccharp], c_int), # type/class reflection information @@ -216,8 +249,14 @@ 'strtoull' : ([c_ccharp], c_ullong), 'free' : ([c_voidp], c_void), - 'charp2stdstring' : ([c_ccharp], c_object), + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + } # size/offset are backend-specific but fixed after load @@ -277,87 +316,99 @@ ptr = w_cdata.unsafe_escaping_ptr() return rffi.cast(rffi.VOIDP, ptr) +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + def c_load_dictionary(name): return libffi.CDLL(name) # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] + args = [_ArgH(cppscope.handle), _ArgL(iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): - return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) -def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] + args = [_ArgH(cppclass.handle), _ArgH(cppobj)] return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'actual_class', args))) # memory management ---------------------------------------------------------- def c_allocate(space, cppclass): - return _cdata_to_cobject(space, call_capi(space, 'allocate', [_Arg(h=cppclass.handle)])) + return _cdata_to_cobject(space, call_capi(space, 'allocate', [_ArgH(cppclass.handle)])) def c_deallocate(space, cppclass, cppobject): - call_capi(space, 'deallocate', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'deallocate', [_ArgH(cppclass.handle), _ArgH(cppobject)]) def c_destruct(space, cppclass, cppobject): - call_capi(space, 'destruct', [_Arg(h=cppclass.handle), _Arg(h=cppobject)]) + call_capi(space, 'destruct', [_ArgH(cppclass.handle), _ArgH(cppobject)]) # method/function dispatching ------------------------------------------------ def c_call_v(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] call_capi(space, 'call_v', args) def c_call_b(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.UCHAR, space.c_uint_w(call_capi(space, 'call_b', args))) def c_call_c(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.CHAR, space.str_w(call_capi(space, 'call_c', args))[0]) def c_call_h(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.SHORT, space.int_w(call_capi(space, 'call_h', args))) def c_call_i(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.INT, space.c_int_w(call_capi(space, 'call_i', args))) def c_call_l(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONG, space.int_w(call_capi(space, 'call_l', args))) def c_call_ll(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.LONGLONG, space.r_longlong_w(call_capi(space, 'call_ll', args))) def c_call_f(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.FLOAT, r_singlefloat(space.float_w(call_capi(space, 'call_f', args)))) def c_call_d(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return rffi.cast(rffi.DOUBLE, space.float_w(call_capi(space, 'call_d', args))) +def c_call_ld(space, cppmethod, cppobject, nargs, cargs): + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] + return rffi.cast(rffi.LONGDOUBLE, space.float_w(call_capi(space, 'call_ld', args))) def c_call_r(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] + args = [_ArgH(cppmethod), _ArgH(cppobject), _ArgL(nargs), _ArgP(cargs)] return _cdata_to_ptr(space, call_capi(space, 'call_r', args)) def c_call_s(space, cppmethod, cppobject, nargs, cargs): - args = [_Arg(h=cppmethod), _Arg(h=cppobject), _Arg(l=nargs), _Arg(vp=cargs)] - return call_capi(space, 'call_s', args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') From pypy.commits at gmail.com Sat Dec 17 16:38:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 17 Dec 2016 13:38:22 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5855b04e.69efc20a.b2fb5.0b1e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r835:99c5d63b977a Date: 2016-12-17 22:38 +0100 http://bitbucket.org/pypy/pypy.org/changeset/99c5d63b977a/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66464 of $105000 (63.3%) + $66474 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Sat Dec 17 22:54:40 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:40 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: pointers to DelayedStruct Message-ID: <58560880.4dd41c0a.c02f0.f201@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89127:69eba1948840 Date: 2016-12-18 01:16 +0000 http://bitbucket.org/pypy/pypy/changeset/69eba1948840/ Log: pointers to DelayedStruct diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -661,12 +661,13 @@ return CNAME_TO_LLTYPE[name] class DelayedStruct(object): - def __init__(self, name, fields): + def __init__(self, name, fields, TYPE): self.struct_name = name self.fields = fields + self.TYPE = TYPE def __repr__(self): - return "".format(vars(self)) + return "".format(**vars(self)) class ParsedSource(object): @@ -695,12 +696,12 @@ def new_struct(self, obj): if obj.fldtypes is None: - return lltype.ForwardReference() + fields = None else: fields = zip( obj.fldnames, [self.convert_type(field) for field in obj.fldtypes]) - return DelayedStruct(obj.name, fields) + return DelayedStruct(obj.name, fields, lltype.ForwardReference()) def realize_struct(self, struct, type_name): from pypy.module.cpyext.api import CConfig, TYPES @@ -729,6 +730,8 @@ TO = self.convert_type(obj.totype) if TO is lltype.Void: return rffi.VOIDP + elif isinstance(TO, DelayedStruct): + TO = TO.TYPE return lltype.Ptr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -31,6 +31,7 @@ initproc = P(FT([PyO, PyO, PyO], rffi.INT_real)) newfunc = P(FT([PyTypeObjectPtr, PyO, PyO], PyO)) allocfunc = P(FT([PyTypeObjectPtr, Py_ssize_t], PyO)) + unaryfunc = P(FT([PyO], PyO)) binaryfunc = P(FT([PyO, PyO], PyO)) ternaryfunc = P(FT([PyO, PyO, PyO], PyO)) From pypy.commits at gmail.com Sat Dec 17 22:54:42 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:42 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Add buffer to the pseudo-header, handle pointers to primitive types and fixed-size arrays Message-ID: <58560882.6249c20a.a829a.662c@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89128:456561b7910a Date: 2016-12-18 01:24 +0000 http://bitbucket.org/pypy/pypy/changeset/456561b7910a/ Log: Add buffer to the pseudo-header, handle pointers to primitive types and fixed-size arrays diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -623,6 +623,38 @@ typedef struct _typeobject PyTypeObject; typedef void (*freefunc)(void *); + +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 +#define Py_MAX_FMT 128 +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + + /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + Py_ssize_t itemsize; + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format[Py_MAX_FMT]; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; + /* static store for shape and strides of + mono-dimensional buffers. */ + /* Py_ssize_t smalltable[2]; */ + void *internal; /* always NULL for app-level objects */ +} Py_buffer; + + +typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); +typedef void (*releasebufferproc)(PyObject *, Py_buffer *); +/* end Py3k buffer interface */ + """) h.configure_types() @@ -645,26 +677,10 @@ PyVarObjectStruct = h.definitions['PyVarObject'].OF PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = cpython_struct( - "Py_buffer", ( - ('buf', rffi.VOIDP), - ('obj', PyObject), - ('len', Py_ssize_t), - ('itemsize', Py_ssize_t), +Py_buffer = h.definitions['Py_buffer'] +getbufferproc = h.definitions['getbufferproc'] +releasebufferproc = h.definitions['releasebufferproc'] - ('readonly', lltype.Signed), - ('ndim', lltype.Signed), - ('format', rffi.CCHARP), - ('shape', Py_ssize_tP), - ('strides', Py_ssize_tP), - ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), - ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('suboffsets', Py_ssize_tP), - #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - ('internal', rffi.VOIDP) - )) -Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() def is_PyObject(TYPE): diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -732,6 +732,8 @@ return rffi.VOIDP elif isinstance(TO, DelayedStruct): TO = TO.TYPE + elif isinstance(obj.totype, model.PrimitiveType): + return rffi.CArrayPtr(TO) return lltype.Ptr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: @@ -741,6 +743,8 @@ return lltype.Ptr(lltype.FuncType(args, res)) elif isinstance(obj, model.VoidType): return lltype.Void + elif isinstance(obj, model.ArrayType): + return rffi.CFixedArray(self.convert_type(obj.item), obj.length) else: raise NotImplementedError From pypy.commits at gmail.com Sat Dec 17 22:54:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:48 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: fix Message-ID: <58560888.45f6c20a.23a87.5653@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89131:b9189e339e2d Date: 2016-12-18 02:25 +0000 http://bitbucket.org/pypy/pypy/changeset/b9189e339e2d/ Log: fix diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -686,8 +686,8 @@ assert name not in self.definitions tp = self.convert_type(obj) if isinstance(tp, DelayedStruct): - tp = self.realize_struct(tp, name) - self.structs[obj] = tp + self.realize_struct(tp, name) + tp = self.structs[obj] = tp.TYPE self.definitions[name] = lltype.Typedef(tp, name) def add_macro(self, name, value): @@ -705,13 +705,10 @@ return struct def realize_struct(self, struct, type_name): - from pypy.module.cpyext.api import CConfig, TYPES configname = type_name.replace(' ', '__') setattr(self._Config, configname, rffi_platform.Struct(type_name, struct.fields)) - forward = lltype.ForwardReference() - self._TYPES[configname] = forward - return forward + self._TYPES[configname] = struct.TYPE def configure_types(self): for name, TYPE in rffi_platform.configure(self._Config).iteritems(): From pypy.commits at gmail.com Sat Dec 17 22:54:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:44 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Expand pseudo-header, fix handling of pointers to non-containers Message-ID: <58560884.e5ebc20a.8bf76.6624@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89129:5c47c038b83d Date: 2016-12-18 01:38 +0000 http://bitbucket.org/pypy/pypy/changeset/5c47c038b83d/ Log: Expand pseudo-header, fix handling of pointers to non-containers diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -623,6 +623,51 @@ typedef struct _typeobject PyTypeObject; typedef void (*freefunc)(void *); +typedef void (*destructor)(PyObject *); +typedef int (*printfunc)(PyObject *, FILE *, int); +typedef PyObject *(*getattrfunc)(PyObject *, char *); +typedef PyObject *(*getattrofunc)(PyObject *, PyObject *); +typedef int (*setattrfunc)(PyObject *, char *, PyObject *); +typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *); +typedef int (*cmpfunc)(PyObject *, PyObject *); +typedef PyObject *(*reprfunc)(PyObject *); +typedef long (*hashfunc)(PyObject *); +typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int); +typedef PyObject *(*getiterfunc) (PyObject *); +typedef PyObject *(*iternextfunc) (PyObject *); +typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *); +typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *); +typedef int (*initproc)(PyObject *, PyObject *, PyObject *); +typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *); +typedef PyObject *(*allocfunc)(struct _typeobject *, Py_ssize_t); + +typedef PyObject * (*unaryfunc)(PyObject *); +typedef PyObject * (*binaryfunc)(PyObject *, PyObject *); +typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *); +typedef int (*inquiry)(PyObject *); +typedef Py_ssize_t (*lenfunc)(PyObject *); +typedef int (*coercion)(PyObject **, PyObject **); +typedef PyObject *(*intargfunc)(PyObject *, int); +typedef PyObject *(*intintargfunc)(PyObject *, int, int); +typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t); +typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t); +typedef int(*intobjargproc)(PyObject *, int, PyObject *); +typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *); +typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *); +typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *); +typedef int(*objobjargproc)(PyObject *, PyObject *, PyObject *); + + +/* int-based buffer interface */ +typedef int (*getreadbufferproc)(PyObject *, int, void **); +typedef int (*getwritebufferproc)(PyObject *, int, void **); +typedef int (*getsegcountproc)(PyObject *, int *); +typedef int (*getcharbufferproc)(PyObject *, int, char **); +/* ssize_t-based buffer interface */ +typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **); +typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **); +typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *); +typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **); /* Py3k buffer interface, adapted for PyPy */ #define Py_MAX_NDIMS 32 diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -732,9 +732,10 @@ return rffi.VOIDP elif isinstance(TO, DelayedStruct): TO = TO.TYPE - elif isinstance(obj.totype, model.PrimitiveType): + if isinstance(TO, lltype.ContainerType): + return lltype.Ptr(TO) + else: return rffi.CArrayPtr(TO) - return lltype.Ptr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: raise NotImplementedError From pypy.commits at gmail.com Sat Dec 17 22:54:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:50 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Handle FILE* Message-ID: <5856088a.e2acc20a.4f0e7.533b@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89132:5d78c74e573b Date: 2016-12-18 02:56 +0000 http://bitbucket.org/pypy/pypy/changeset/5d78c74e573b/ Log: Handle FILE* diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -3,6 +3,7 @@ from cffi.commontypes import COMMON_TYPES, resolve_common_type import pycparser import weakref, re +from rpython.rlib.rfile import FILEP from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rfficache, rffi_platform @@ -644,7 +645,7 @@ CNAME_TO_LLTYPE = { 'char': rffi.CHAR, 'double': rffi.DOUBLE, 'long double': rffi.LONGDOUBLE, - 'float': rffi.FLOAT} + 'float': rffi.FLOAT, 'FILE': FILEP.TO} def add_inttypes(): for name in rffi.TYPES: @@ -695,6 +696,8 @@ self.macros[name] = value def new_struct(self, obj): + if obj.name == '_IO_FILE': # cffi weirdness + return cname_to_lltype('FILE') struct = DelayedStruct(obj.name, None, lltype.ForwardReference()) # Cache it early, to avoid infinite recursion self.structs[obj] = struct From pypy.commits at gmail.com Sat Dec 17 22:54:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:46 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Complete the declaration of PyTypeObject, fix some of the issues Message-ID: <58560886.0d1a1c0a.8731c.ed74@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89130:b3e206c8fe8c Date: 2016-12-18 02:07 +0000 http://bitbucket.org/pypy/pypy/changeset/b3e206c8fe8c/ Log: Complete the declaration of PyTypeObject, fix some of the issues diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -620,7 +620,7 @@ PyObject_VAR_HEAD } PyVarObject; -typedef struct _typeobject PyTypeObject; +struct _typeobject; typedef void (*freefunc)(void *); typedef void (*destructor)(PyObject *); @@ -700,6 +700,178 @@ typedef void (*releasebufferproc)(PyObject *, Py_buffer *); /* end Py3k buffer interface */ +typedef int (*objobjproc)(PyObject *, PyObject *); +typedef int (*visitproc)(PyObject *, void *); +typedef int (*traverseproc)(PyObject *, visitproc, void *); + +typedef struct { + /* For numbers without flag bit Py_TPFLAGS_CHECKTYPES set, all + arguments are guaranteed to be of the object's type (modulo + coercion hacks -- i.e. if the type's coercion function + returns other types, then these are allowed as well). Numbers that + have the Py_TPFLAGS_CHECKTYPES flag bit set should check *both* + arguments for proper type and implement the necessary conversions + in the slot functions themselves. */ + + binaryfunc nb_add; + binaryfunc nb_subtract; + binaryfunc nb_multiply; + binaryfunc nb_divide; + binaryfunc nb_remainder; + binaryfunc nb_divmod; + ternaryfunc nb_power; + unaryfunc nb_negative; + unaryfunc nb_positive; + unaryfunc nb_absolute; + inquiry nb_nonzero; + unaryfunc nb_invert; + binaryfunc nb_lshift; + binaryfunc nb_rshift; + binaryfunc nb_and; + binaryfunc nb_xor; + binaryfunc nb_or; + coercion nb_coerce; + unaryfunc nb_int; + unaryfunc nb_long; + unaryfunc nb_float; + unaryfunc nb_oct; + unaryfunc nb_hex; + /* Added in release 2.0 */ + binaryfunc nb_inplace_add; + binaryfunc nb_inplace_subtract; + binaryfunc nb_inplace_multiply; + binaryfunc nb_inplace_divide; + binaryfunc nb_inplace_remainder; + ternaryfunc nb_inplace_power; + binaryfunc nb_inplace_lshift; + binaryfunc nb_inplace_rshift; + binaryfunc nb_inplace_and; + binaryfunc nb_inplace_xor; + binaryfunc nb_inplace_or; + + /* Added in release 2.2 */ + /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ + binaryfunc nb_floor_divide; + binaryfunc nb_true_divide; + binaryfunc nb_inplace_floor_divide; + binaryfunc nb_inplace_true_divide; + + /* Added in release 2.5 */ + unaryfunc nb_index; +} PyNumberMethods; + +typedef struct { + lenfunc sq_length; + binaryfunc sq_concat; + ssizeargfunc sq_repeat; + ssizeargfunc sq_item; + ssizessizeargfunc sq_slice; + ssizeobjargproc sq_ass_item; + ssizessizeobjargproc sq_ass_slice; + objobjproc sq_contains; + /* Added in release 2.0 */ + binaryfunc sq_inplace_concat; + ssizeargfunc sq_inplace_repeat; +} PySequenceMethods; + +typedef struct { + lenfunc mp_length; + binaryfunc mp_subscript; + objobjargproc mp_ass_subscript; +} PyMappingMethods; + +typedef struct { + readbufferproc bf_getreadbuffer; + writebufferproc bf_getwritebuffer; + segcountproc bf_getsegcount; + charbufferproc bf_getcharbuffer; + getbufferproc bf_getbuffer; + releasebufferproc bf_releasebuffer; +} PyBufferProcs; + + + +typedef struct _typeobject { + PyObject_VAR_HEAD + const char *tp_name; /* For printing, in format "." */ + Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */ + + /* Methods to implement standard operations */ + + destructor tp_dealloc; + printfunc tp_print; + getattrfunc tp_getattr; + setattrfunc tp_setattr; + cmpfunc tp_compare; + reprfunc tp_repr; + + /* Method suites for standard classes */ + + PyNumberMethods *tp_as_number; + PySequenceMethods *tp_as_sequence; + PyMappingMethods *tp_as_mapping; + + /* More standard operations (here for binary compatibility) */ + + hashfunc tp_hash; + ternaryfunc tp_call; + reprfunc tp_str; + getattrofunc tp_getattro; + setattrofunc tp_setattro; + + /* Functions to access object as input/output buffer */ + PyBufferProcs *tp_as_buffer; + + /* Flags to define presence of optional/expanded features */ + long tp_flags; + + const char *tp_doc; /* Documentation string */ + + /* Assigned meaning in release 2.0 */ + /* call function for all accessible objects */ + traverseproc tp_traverse; + + /* delete references to contained objects */ + inquiry tp_clear; + + /* Assigned meaning in release 2.1 */ + /* rich comparisons */ + richcmpfunc tp_richcompare; + + /* weak reference enabler */ + Py_ssize_t tp_weaklistoffset; + + /* Added in release 2.2 */ + /* Iterators */ + getiterfunc tp_iter; + iternextfunc tp_iternext; + + /* Attribute descriptor and subclassing stuff */ + struct PyMethodDef *tp_methods; + struct PyMemberDef *tp_members; + struct PyGetSetDef *tp_getset; + struct _typeobject *tp_base; + PyObject *tp_dict; + descrgetfunc tp_descr_get; + descrsetfunc tp_descr_set; + Py_ssize_t tp_dictoffset; + initproc tp_init; + allocfunc tp_alloc; + newfunc tp_new; + freefunc tp_free; /* Low-level free-memory routine */ + inquiry tp_is_gc; /* For PyObject_IS_GC */ + PyObject *tp_bases; + PyObject *tp_mro; /* method resolution order */ + PyObject *tp_cache; + PyObject *tp_subclasses; + PyObject *tp_weaklist; + destructor tp_del; + + /* Type attribute cache version tag. Added in version 2.6 */ + unsigned int tp_version_tag; + +} PyTypeObject; + """) h.configure_types() @@ -722,7 +894,8 @@ PyVarObjectStruct = h.definitions['PyVarObject'].OF PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = h.definitions['Py_buffer'] +Py_buffer = h.definitions['Py_buffer'].OF +Py_bufferP = lltype.Ptr(Py_buffer) getbufferproc = h.definitions['getbufferproc'] releasebufferproc = h.definitions['releasebufferproc'] diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -695,13 +695,14 @@ self.macros[name] = value def new_struct(self, obj): - if obj.fldtypes is None: - fields = None - else: - fields = zip( + struct = DelayedStruct(obj.name, None, lltype.ForwardReference()) + # Cache it early, to avoid infinite recursion + self.structs[obj] = struct + if obj.fldtypes is not None: + struct.fields = zip( obj.fldnames, [self.convert_type(field) for field in obj.fldtypes]) - return DelayedStruct(obj.name, fields, lltype.ForwardReference()) + return struct def realize_struct(self, struct, type_name): from pypy.module.cpyext.api import CConfig, TYPES diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -235,6 +235,5 @@ ("tp_weaklist", PyObject), #U ("tp_del", destructor), #N ]) -cpython_struct("PyTypeObject", PyTypeObjectFields, PyTypeObject) From pypy.commits at gmail.com Sat Dec 17 22:54:53 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:53 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Add missing declarations needed by PyTypeObject Message-ID: <5856088d.52301c0a.a3729.f231@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89133:62289b903d10 Date: 2016-12-18 03:40 +0000 http://bitbucket.org/pypy/pypy/changeset/62289b903d10/ Log: Add missing declarations needed by PyTypeObject diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -789,6 +789,42 @@ releasebufferproc bf_releasebuffer; } PyBufferProcs; +/* from descrobject.h */ +typedef PyObject *(*getter)(PyObject *, void *); +typedef int (*setter)(PyObject *, PyObject *, void *); + +typedef struct PyGetSetDef { + char *name; + getter get; + setter set; + char *doc; + void *closure; +} PyGetSetDef; + +/* from methodobject.h */ +typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); +typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *, + PyObject *); +typedef PyObject *(*PyNoArgsFunction)(PyObject *); + +struct PyMethodDef { + const char *ml_name; /* The name of the built-in function/method */ + PyCFunction ml_meth; /* The C function that implements it */ + int ml_flags; /* Combination of METH_xxx flags, which mostly + describe the args expected by the C func */ + const char *ml_doc; /* The __doc__ attribute, or NULL */ +}; +typedef struct PyMethodDef PyMethodDef; + +/* from structmember.h */ +typedef struct PyMemberDef { + /* Current version, use this */ + char *name; + int type; + Py_ssize_t offset; + int flags; + char *doc; +} PyMemberDef; typedef struct _typeobject { diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -50,7 +50,7 @@ cache. CPython includes some extra checking here to make sure the module being initialized lines up with what's expected, but we don't. """ - from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr + from pypy.module.cpyext.api import PyTypeObjectPtr modname = rffi.charp2str(name) state = space.fromcache(State) f_name, f_path = state.package_context diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -6,9 +6,9 @@ from rpython.rlib.rarithmetic import widen from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - mangle_name, pypy_decl, Py_buffer, Py_bufferP) + mangle_name, pypy_decl, Py_buffer, Py_bufferP, PyTypeObjectPtr) from pypy.module.cpyext.typeobjectdefs import ( - unaryfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, + unaryfunc, ternaryfunc, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -5,7 +5,7 @@ from pypy.module.cpyext.bytesobject import new_empty_str, PyBytesObject from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP, generic_cpy_call from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref -from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr +from pypy.module.cpyext.api import PyTypeObjectPtr import py import sys diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,239 +1,62 @@ -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.lltypesystem.lltype import Ptr, FuncType, Void -from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, - PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, - Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref -from pypy.module.cpyext.modsupport import PyMethodDef -from pypy.module.cpyext.api import Py_bufferP, h +from pypy.module.cpyext.api import h -P, FT, PyO = Ptr, FuncType, PyObject -PyOPtr = Ptr(lltype.Array(PyO, hints={'nolength': True})) +freefunc = h.definitions['freefunc'] +destructor = h.definitions['destructor'] +printfunc = h.definitions['printfunc'] +getattrfunc = h.definitions['getattrfunc'] +getattrofunc = h.definitions['getattrofunc'] +setattrfunc = h.definitions['setattrfunc'] +setattrofunc = h.definitions['setattrofunc'] +cmpfunc = h.definitions['cmpfunc'] +reprfunc = h.definitions['reprfunc'] +hashfunc = h.definitions['hashfunc'] +richcmpfunc = h.definitions['richcmpfunc'] +getiterfunc = h.definitions['getiterfunc'] +iternextfunc = h.definitions['iternextfunc'] +descrgetfunc = h.definitions['descrgetfunc'] +descrsetfunc = h.definitions['descrsetfunc'] +initproc = h.definitions['initproc'] +newfunc = h.definitions['newfunc'] +allocfunc = h.definitions['allocfunc'] -#freefunc = P(FT([rffi.VOIDP], Void)) -freefunc = h.definitions['freefunc'] +unaryfunc = h.definitions['unaryfunc'] +binaryfunc = h.definitions['binaryfunc'] +ternaryfunc = h.definitions['ternaryfunc'] +inquiry = h.definitions['inquiry'] +lenfunc = h.definitions['lenfunc'] +coercion = h.definitions['coercion'] +intargfunc = h.definitions['intargfunc'] +intintargfunc = h.definitions['intintargfunc'] +ssizeargfunc = h.definitions['ssizeargfunc'] +ssizessizeargfunc = h.definitions['ssizessizeargfunc'] +intobjargproc = h.definitions['intobjargproc'] +intintobjargproc = h.definitions['intintobjargproc'] +ssizeobjargproc = h.definitions['ssizeobjargproc'] +ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'] +objobjargproc = h.definitions['objobjargproc'] -destructor = P(FT([PyO], Void)) -printfunc = P(FT([PyO, FILEP, rffi.INT_real], rffi.INT)) -getattrfunc = P(FT([PyO, rffi.CCHARP], PyO)) -getattrofunc = P(FT([PyO, PyO], PyO)) -setattrfunc = P(FT([PyO, rffi.CCHARP, PyO], rffi.INT_real)) -setattrofunc = P(FT([PyO, PyO, PyO], rffi.INT_real)) -cmpfunc = P(FT([PyO, PyO], rffi.INT_real)) -reprfunc = P(FT([PyO], PyO)) -hashfunc = P(FT([PyO], lltype.Signed)) -richcmpfunc = P(FT([PyO, PyO, rffi.INT_real], PyO)) -getiterfunc = P(FT([PyO], PyO)) -iternextfunc = P(FT([PyO], PyO)) -descrgetfunc = P(FT([PyO, PyO, PyO], PyO)) -descrsetfunc = P(FT([PyO, PyO, PyO], rffi.INT_real)) -initproc = P(FT([PyO, PyO, PyO], rffi.INT_real)) -newfunc = P(FT([PyTypeObjectPtr, PyO, PyO], PyO)) -allocfunc = P(FT([PyTypeObjectPtr, Py_ssize_t], PyO)) +objobjproc = h.definitions['objobjproc'] +visitproc = h.definitions['visitproc'] +traverseproc = h.definitions['traverseproc'] -unaryfunc = P(FT([PyO], PyO)) -binaryfunc = P(FT([PyO, PyO], PyO)) -ternaryfunc = P(FT([PyO, PyO, PyO], PyO)) -inquiry = P(FT([PyO], rffi.INT_real)) -lenfunc = P(FT([PyO], Py_ssize_t)) -coercion = P(FT([PyOPtr, PyOPtr], rffi.INT_real)) -intargfunc = P(FT([PyO, rffi.INT_real], PyO)) -intintargfunc = P(FT([PyO, rffi.INT_real, rffi.INT], PyO)) -ssizeargfunc = P(FT([PyO, Py_ssize_t], PyO)) -ssizessizeargfunc = P(FT([PyO, Py_ssize_t, Py_ssize_t], PyO)) -intobjargproc = P(FT([PyO, rffi.INT_real, PyO], rffi.INT)) -intintobjargproc = P(FT([PyO, rffi.INT_real, rffi.INT, PyO], rffi.INT)) -ssizeobjargproc = P(FT([PyO, Py_ssize_t, PyO], rffi.INT_real)) -ssizessizeobjargproc = P(FT([PyO, Py_ssize_t, Py_ssize_t, PyO], rffi.INT_real)) -objobjargproc = P(FT([PyO, PyO, PyO], rffi.INT_real)) +getter = h.definitions['getter'] +setter = h.definitions['setter'] -objobjproc = P(FT([PyO, PyO], rffi.INT_real)) -visitproc = P(FT([PyO, rffi.VOIDP], rffi.INT_real)) -traverseproc = P(FT([PyO, visitproc, rffi.VOIDP], rffi.INT_real)) +#wrapperfunc = h.definitions['wrapperfunc'] +#wrapperfunc_kwds = h.definitions['wrapperfunc_kwds'] -getter = P(FT([PyO, rffi.VOIDP], PyO)) -setter = P(FT([PyO, PyO, rffi.VOIDP], rffi.INT_real)) +readbufferproc = h.definitions['readbufferproc'] +writebufferproc = h.definitions['writebufferproc'] +segcountproc = h.definitions['segcountproc'] +charbufferproc = h.definitions['charbufferproc'] +getbufferproc = h.definitions['getbufferproc'] +releasebufferproc = h.definitions['releasebufferproc'] -wrapperfunc = P(FT([PyO, PyO, rffi.VOIDP], PyO)) -wrapperfunc_kwds = P(FT([PyO, PyO, rffi.VOIDP, PyO], PyO)) -readbufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) -writebufferproc = P(FT([PyO, Py_ssize_t, rffi.VOIDPP], Py_ssize_t)) -segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) -charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) -getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) -releasebufferproc = rffi.VOIDP - - -PyGetSetDef = cpython_struct("PyGetSetDef", ( - ("name", rffi.CCHARP), - ("get", getter), - ("set", setter), - ("doc", rffi.CCHARP), - ("closure", rffi.VOIDP), -)) - -PyNumberMethods = cpython_struct("PyNumberMethods", ( - ("nb_add", binaryfunc), - ("nb_subtract", binaryfunc), - ("nb_multiply", binaryfunc), - ("nb_divide", binaryfunc), - ("nb_remainder", binaryfunc), - ("nb_divmod", binaryfunc), - ("nb_power", ternaryfunc), - ("nb_negative", unaryfunc), - ("nb_positive", unaryfunc), - ("nb_absolute", unaryfunc), - ("nb_nonzero", inquiry), - ("nb_invert", unaryfunc), - ("nb_lshift", binaryfunc), - ("nb_rshift", binaryfunc), - ("nb_and", binaryfunc), - ("nb_xor", binaryfunc), - ("nb_or", binaryfunc), - ("nb_coerce", coercion), - ("nb_int", unaryfunc), - ("nb_long", unaryfunc), - ("nb_float", unaryfunc), - ("nb_oct", unaryfunc), - ("nb_hex", unaryfunc), - ("nb_inplace_add", binaryfunc), - ("nb_inplace_subtract", binaryfunc), - ("nb_inplace_multiply", binaryfunc), - ("nb_inplace_divide", binaryfunc), - ("nb_inplace_remainder", binaryfunc), - ("nb_inplace_power", ternaryfunc), - ("nb_inplace_lshift", binaryfunc), - ("nb_inplace_rshift", binaryfunc), - ("nb_inplace_and", binaryfunc), - ("nb_inplace_xor", binaryfunc), - ("nb_inplace_or", binaryfunc), - - ("nb_floor_divide", binaryfunc), - ("nb_true_divide", binaryfunc), - ("nb_inplace_floor_divide", binaryfunc), - ("nb_inplace_true_divide", binaryfunc), - - ("nb_index", unaryfunc), -)) - -PySequenceMethods = cpython_struct("PySequenceMethods", ( - ("sq_length", lenfunc), - ("sq_concat", binaryfunc), - ("sq_repeat", ssizeargfunc), - ("sq_item", ssizeargfunc), - ("sq_slice", ssizessizeargfunc), - ("sq_ass_item", ssizeobjargproc), - ("sq_ass_slice", ssizessizeobjargproc), - ("sq_contains", objobjproc), - ("sq_inplace_concat", binaryfunc), - ("sq_inplace_repeat", ssizeargfunc), -)) - -PyMappingMethods = cpython_struct("PyMappingMethods", ( - ("mp_length", lenfunc), - ("mp_subscript", binaryfunc), - ("mp_ass_subscript", objobjargproc), -)) - -PyBufferProcs = cpython_struct("PyBufferProcs", ( - ("bf_getreadbuffer", readbufferproc), - ("bf_getwritebuffer", writebufferproc), - ("bf_getsegcount", segcountproc), - ("bf_getcharbuffer", charbufferproc), - ("bf_getbuffer", getbufferproc), - ("bf_releasebuffer", releasebufferproc), -)) - -PyMemberDef = cpython_struct("PyMemberDef", ( - ("name", rffi.CCHARP), - ("type", rffi.INT_real), - ("offset", Py_ssize_t), - ("flags", rffi.INT_real), - ("doc", rffi.CCHARP), -)) - -# These fields are supported and used in different ways -# The following comments mean: -# #E essential, initialized for all PTOs -# #S supported -# #U unsupported -# #N not yet implemented -PyTypeObjectFields = [] -PyTypeObjectFields.extend(PyVarObjectFields) -PyTypeObjectFields.extend([ - ("tp_name", rffi.CCHARP), #E For printing, in format "." - ("tp_basicsize", Py_ssize_t), #E For allocation - ("tp_itemsize", Py_ssize_t), #E " - - # Methods to implement standard operations - ("tp_dealloc", destructor), #E - ("tp_print", printfunc), #U - ("tp_getattr", getattrfunc), #U - ("tp_setattr", setattrfunc), #U - ("tp_compare", cmpfunc), #N - ("tp_repr", reprfunc), #N - - # Method suites for standard classes - ("tp_as_number", Ptr(PyNumberMethods)), #N - ("tp_as_sequence", Ptr(PySequenceMethods)), #N - ("tp_as_mapping", Ptr(PyMappingMethods)), #N - - # More standard operations (here for binary compatibility) - ("tp_hash", hashfunc), #N - ("tp_call", ternaryfunc), #N - ("tp_str", reprfunc), #N - ("tp_getattro", getattrofunc),#N - ("tp_setattro", setattrofunc),#N - - # Functions to access object as input/output buffer - ("tp_as_buffer", Ptr(PyBufferProcs)), #U - - # Flags to define presence of optional/expanded features - ("tp_flags", lltype.Signed), #E - - ("tp_doc", rffi.CCHARP), #N Documentation string - - # Assigned meaning in release 2.0 - # call function for all accessible objects - ("tp_traverse", traverseproc),#U - - # delete references to contained objects - ("tp_clear", inquiry), #U - - # Assigned meaning in release 2.1 - # rich comparisons - ("tp_richcompare", richcmpfunc), #N - - # weak reference enabler - ("tp_weaklistoffset", Py_ssize_t), #U - - # Added in release 2.2 - # Iterators - ("tp_iter", getiterfunc), #N - ("tp_iternext", iternextfunc), #N - - # Attribute descriptor and subclassing stuff - ("tp_methods", Ptr(PyMethodDef)), #S - ("tp_members", Ptr(PyMemberDef)), #S - ("tp_getset", Ptr(PyGetSetDef)), #S - ("tp_base", Ptr(PyTypeObject)), #E - ("tp_dict", PyObject), #U - ("tp_descr_get", descrgetfunc), #N - ("tp_descr_set", descrsetfunc), #N - ("tp_dictoffset", Py_ssize_t), #U - ("tp_init", initproc), #N - ("tp_alloc", allocfunc), #N - ("tp_new", newfunc), #S - ("tp_free", freefunc), #E Low-level free-memory routine - ("tp_is_gc", inquiry), #U For PyObject_IS_GC - ("tp_bases", PyObject),#E - ("tp_mro", PyObject), #U method resolution order - ("tp_cache", PyObject),#S - ("tp_subclasses", PyObject), #U - ("tp_weaklist", PyObject), #U - ("tp_del", destructor), #N - ]) - - +PyGetSetDef = h.definitions['PyGetSetDef'].OF +PyNumberMethods = h.definitions['PyNumberMethods'].OF +PySequenceMethods = h.definitions['PySequenceMethods'].OF +PyMappingMethods = h.definitions['PyMappingMethods'].OF +PyBufferProcs = h.definitions['PyBufferProcs'].OF +PyMemberDef = h.definitions['PyMemberDef'].OF From pypy.commits at gmail.com Sat Dec 17 22:54:57 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:57 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: fix Message-ID: <58560891.e7b1c20a.678f4.5dcd@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89135:1c1d3f89270c Date: 2016-12-18 03:53 +0000 http://bitbucket.org/pypy/pypy/changeset/1c1d3f89270c/ Log: fix diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,57 +1,57 @@ from pypy.module.cpyext.api import h -freefunc = h.definitions['freefunc'] -destructor = h.definitions['destructor'] -printfunc = h.definitions['printfunc'] -getattrfunc = h.definitions['getattrfunc'] -getattrofunc = h.definitions['getattrofunc'] -setattrfunc = h.definitions['setattrfunc'] -setattrofunc = h.definitions['setattrofunc'] -cmpfunc = h.definitions['cmpfunc'] -reprfunc = h.definitions['reprfunc'] -hashfunc = h.definitions['hashfunc'] -richcmpfunc = h.definitions['richcmpfunc'] -getiterfunc = h.definitions['getiterfunc'] -iternextfunc = h.definitions['iternextfunc'] -descrgetfunc = h.definitions['descrgetfunc'] -descrsetfunc = h.definitions['descrsetfunc'] -initproc = h.definitions['initproc'] -newfunc = h.definitions['newfunc'] -allocfunc = h.definitions['allocfunc'] +freefunc = h.definitions['freefunc'].OF +destructor = h.definitions['destructor'].OF +printfunc = h.definitions['printfunc'].OF +getattrfunc = h.definitions['getattrfunc'].OF +getattrofunc = h.definitions['getattrofunc'].OF +setattrfunc = h.definitions['setattrfunc'].OF +setattrofunc = h.definitions['setattrofunc'].OF +cmpfunc = h.definitions['cmpfunc'].OF +reprfunc = h.definitions['reprfunc'].OF +hashfunc = h.definitions['hashfunc'].OF +richcmpfunc = h.definitions['richcmpfunc'].OF +getiterfunc = h.definitions['getiterfunc'].OF +iternextfunc = h.definitions['iternextfunc'].OF +descrgetfunc = h.definitions['descrgetfunc'].OF +descrsetfunc = h.definitions['descrsetfunc'].OF +initproc = h.definitions['initproc'].OF +newfunc = h.definitions['newfunc'].OF +allocfunc = h.definitions['allocfunc'].OF -unaryfunc = h.definitions['unaryfunc'] -binaryfunc = h.definitions['binaryfunc'] -ternaryfunc = h.definitions['ternaryfunc'] -inquiry = h.definitions['inquiry'] -lenfunc = h.definitions['lenfunc'] -coercion = h.definitions['coercion'] -intargfunc = h.definitions['intargfunc'] -intintargfunc = h.definitions['intintargfunc'] -ssizeargfunc = h.definitions['ssizeargfunc'] -ssizessizeargfunc = h.definitions['ssizessizeargfunc'] -intobjargproc = h.definitions['intobjargproc'] -intintobjargproc = h.definitions['intintobjargproc'] -ssizeobjargproc = h.definitions['ssizeobjargproc'] -ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'] -objobjargproc = h.definitions['objobjargproc'] +unaryfunc = h.definitions['unaryfunc'].OF +binaryfunc = h.definitions['binaryfunc'].OF +ternaryfunc = h.definitions['ternaryfunc'].OF +inquiry = h.definitions['inquiry'].OF +lenfunc = h.definitions['lenfunc'].OF +coercion = h.definitions['coercion'].OF +intargfunc = h.definitions['intargfunc'].OF +intintargfunc = h.definitions['intintargfunc'].OF +ssizeargfunc = h.definitions['ssizeargfunc'].OF +ssizessizeargfunc = h.definitions['ssizessizeargfunc'].OF +intobjargproc = h.definitions['intobjargproc'].OF +intintobjargproc = h.definitions['intintobjargproc'].OF +ssizeobjargproc = h.definitions['ssizeobjargproc'].OF +ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'].OF +objobjargproc = h.definitions['objobjargproc'].OF -objobjproc = h.definitions['objobjproc'] -visitproc = h.definitions['visitproc'] -traverseproc = h.definitions['traverseproc'] +objobjproc = h.definitions['objobjproc'].OF +visitproc = h.definitions['visitproc'].OF +traverseproc = h.definitions['traverseproc'].OF -getter = h.definitions['getter'] -setter = h.definitions['setter'] +getter = h.definitions['getter'].OF +setter = h.definitions['setter'].OF #wrapperfunc = h.definitions['wrapperfunc'] #wrapperfunc_kwds = h.definitions['wrapperfunc_kwds'] -readbufferproc = h.definitions['readbufferproc'] -writebufferproc = h.definitions['writebufferproc'] -segcountproc = h.definitions['segcountproc'] -charbufferproc = h.definitions['charbufferproc'] -getbufferproc = h.definitions['getbufferproc'] -releasebufferproc = h.definitions['releasebufferproc'] +readbufferproc = h.definitions['readbufferproc'].OF +writebufferproc = h.definitions['writebufferproc'].OF +segcountproc = h.definitions['segcountproc'].OF +charbufferproc = h.definitions['charbufferproc'].OF +getbufferproc = h.definitions['getbufferproc'].OF +releasebufferproc = h.definitions['releasebufferproc'].OF PyGetSetDef = h.definitions['PyGetSetDef'].OF From pypy.commits at gmail.com Sat Dec 17 22:54:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Sat, 17 Dec 2016 19:54:55 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: INT vs INT_real mess Message-ID: <5856088f.ca57c20a.96102.5a4b@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89134:49c4b821f3fe Date: 2016-12-18 03:42 +0000 http://bitbucket.org/pypy/pypy/changeset/49c4b821f3fe/ Log: INT vs INT_real mess diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -657,6 +657,7 @@ CNAME_TO_LLTYPE[name] = rfficache.platform.types[rname] add_inttypes() +CNAME_TO_LLTYPE['int'] = rffi.INT_real def cname_to_lltype(name): return CNAME_TO_LLTYPE[name] From pypy.commits at gmail.com Sun Dec 18 07:04:46 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 04:04:46 -0800 (PST) Subject: [pypy-commit] pypy default: Test and fix: ASSERT_NOT_NONE should not have any effect with the heapcache Message-ID: <58567b5e.d39a1c0a.25851.f7fe@mx.google.com> Author: Armin Rigo Branch: Changeset: r89136:db7c07b019cd Date: 2016-12-18 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/db7c07b019cd/ Log: Test and fix: ASSERT_NOT_NONE should not have any effect with the heapcache diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -230,7 +230,8 @@ opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and - opnum != rop.INSTANCE_PTR_NE): + opnum != rop.INSTANCE_PTR_NE and + opnum != rop.ASSERT_NOT_NONE): for box in argboxes: self._escape_box(box) @@ -263,7 +264,8 @@ opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or - opnum == rop.RAW_STORE): + opnum == rop.RAW_STORE or + opnum == rop.ASSERT_NOT_NONE): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3510,6 +3510,7 @@ self.check_resops(call_f=1) def test_look_inside_iff_virtual(self): + from rpython.rlib.debug import ll_assert_not_none # There's no good reason for this to be look_inside_iff, but it's a test! @look_inside_iff(lambda arg, n: isvirtual(arg)) def f(arg, n): @@ -3529,7 +3530,7 @@ if n == 0: i += f(a, n) else: - i += f(A(2), n) + i += f(ll_assert_not_none(A(2)), n) res = self.meta_interp(main, [0], enable_opts='') assert res == main(0) self.check_resops(call_i=1, getfield_gc_i=0) From pypy.commits at gmail.com Sun Dec 18 07:35:57 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 04:35:57 -0800 (PST) Subject: [pypy-commit] pypy default: Add HF_KNOWN_NULLITY in two more cases: in new() and in Message-ID: <585682ad.c4811c0a.16ab2.713a@mx.google.com> Author: Armin Rigo Branch: Changeset: r89137:33a1651e9061 Date: 2016-12-18 13:33 +0100 http://bitbucket.org/pypy/pypy/changeset/33a1651e9061/ Log: Add HF_KNOWN_NULLITY in two more cases: in new() and in class_now_known(). I think it's a safe way to avoid a couple of extra GUARD_NONNULLs. diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -373,7 +373,7 @@ def class_now_known(self, box): if isinstance(box, Const): return - self._set_flag(box, HF_KNOWN_CLASS) + self._set_flag(box, HF_KNOWN_CLASS | HF_KNOWN_NULLITY) def is_nullity_known(self, box): if isinstance(box, Const): @@ -403,7 +403,8 @@ def new(self, box): assert isinstance(box, RefFrontendOp) self.update_version(box) - add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED) + add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED + | HF_KNOWN_NULLITY) def new_array(self, box, lengthbox): self.new(box) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -83,6 +83,19 @@ assert not h.is_nullity_known(box1) assert not h.is_nullity_known(box2) + def test_known_nullity_more_cases(self): + h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + h.class_now_known(box1) + assert h.is_nullity_known(box1) + + h.new(box2) + assert h.is_nullity_known(box2) + + h.reset() + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) def test_nonstandard_virtualizable(self): h = HeapCache() From pypy.commits at gmail.com Sun Dec 18 08:14:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 05:14:48 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: fix Message-ID: <58568bc8.c4811c0a.16ab2.7c22@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89138:c53d7710ec54 Date: 2016-12-18 13:14 +0000 http://bitbucket.org/pypy/pypy/changeset/c53d7710ec54/ Log: fix diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -911,7 +911,7 @@ """) h.configure_types() -Py_ssize_t = h.definitions['Py_ssize_t'] +Py_ssize_t = h.definitions['Py_ssize_t'].OF Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed From pypy.commits at gmail.com Sun Dec 18 08:34:53 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 05:34:53 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Export the type itself in ParsedSource().definitions, we never want lltype.Typedef objects Message-ID: <5856907d.6737c20a.6877a.f109@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89139:539abd4173a3 Date: 2016-12-18 13:34 +0000 http://bitbucket.org/pypy/pypy/changeset/539abd4173a3/ Log: Export the type itself in ParsedSource().definitions, we never want lltype.Typedef objects diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -911,7 +911,7 @@ """) h.configure_types() -Py_ssize_t = h.definitions['Py_ssize_t'].OF +Py_ssize_t = h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -919,18 +919,18 @@ # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = h.definitions['PyTypeObject'].OF +PyTypeObject = h.definitions['PyTypeObject'] PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = h.definitions['PyObject'].OF +PyObjectStruct = h.definitions['PyObject'] PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -PyVarObjectStruct = h.definitions['PyVarObject'].OF +PyVarObjectStruct = h.definitions['PyVarObject'] PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = h.definitions['Py_buffer'].OF +Py_buffer = h.definitions['Py_buffer'] Py_bufferP = lltype.Ptr(Py_buffer) getbufferproc = h.definitions['getbufferproc'] releasebufferproc = h.definitions['releasebufferproc'] diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -690,7 +690,7 @@ if isinstance(tp, DelayedStruct): self.realize_struct(tp, name) tp = self.structs[obj] = tp.TYPE - self.definitions[name] = lltype.Typedef(tp, name) + self.definitions[name] = tp def add_macro(self, name, value): assert name not in self.macros diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,62 +1,62 @@ from pypy.module.cpyext.api import h -freefunc = h.definitions['freefunc'].OF -destructor = h.definitions['destructor'].OF -printfunc = h.definitions['printfunc'].OF -getattrfunc = h.definitions['getattrfunc'].OF -getattrofunc = h.definitions['getattrofunc'].OF -setattrfunc = h.definitions['setattrfunc'].OF -setattrofunc = h.definitions['setattrofunc'].OF -cmpfunc = h.definitions['cmpfunc'].OF -reprfunc = h.definitions['reprfunc'].OF -hashfunc = h.definitions['hashfunc'].OF -richcmpfunc = h.definitions['richcmpfunc'].OF -getiterfunc = h.definitions['getiterfunc'].OF -iternextfunc = h.definitions['iternextfunc'].OF -descrgetfunc = h.definitions['descrgetfunc'].OF -descrsetfunc = h.definitions['descrsetfunc'].OF -initproc = h.definitions['initproc'].OF -newfunc = h.definitions['newfunc'].OF -allocfunc = h.definitions['allocfunc'].OF +freefunc = h.definitions['freefunc'] +destructor = h.definitions['destructor'] +printfunc = h.definitions['printfunc'] +getattrfunc = h.definitions['getattrfunc'] +getattrofunc = h.definitions['getattrofunc'] +setattrfunc = h.definitions['setattrfunc'] +setattrofunc = h.definitions['setattrofunc'] +cmpfunc = h.definitions['cmpfunc'] +reprfunc = h.definitions['reprfunc'] +hashfunc = h.definitions['hashfunc'] +richcmpfunc = h.definitions['richcmpfunc'] +getiterfunc = h.definitions['getiterfunc'] +iternextfunc = h.definitions['iternextfunc'] +descrgetfunc = h.definitions['descrgetfunc'] +descrsetfunc = h.definitions['descrsetfunc'] +initproc = h.definitions['initproc'] +newfunc = h.definitions['newfunc'] +allocfunc = h.definitions['allocfunc'] -unaryfunc = h.definitions['unaryfunc'].OF -binaryfunc = h.definitions['binaryfunc'].OF -ternaryfunc = h.definitions['ternaryfunc'].OF -inquiry = h.definitions['inquiry'].OF -lenfunc = h.definitions['lenfunc'].OF -coercion = h.definitions['coercion'].OF -intargfunc = h.definitions['intargfunc'].OF -intintargfunc = h.definitions['intintargfunc'].OF -ssizeargfunc = h.definitions['ssizeargfunc'].OF -ssizessizeargfunc = h.definitions['ssizessizeargfunc'].OF -intobjargproc = h.definitions['intobjargproc'].OF -intintobjargproc = h.definitions['intintobjargproc'].OF -ssizeobjargproc = h.definitions['ssizeobjargproc'].OF -ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'].OF -objobjargproc = h.definitions['objobjargproc'].OF +unaryfunc = h.definitions['unaryfunc'] +binaryfunc = h.definitions['binaryfunc'] +ternaryfunc = h.definitions['ternaryfunc'] +inquiry = h.definitions['inquiry'] +lenfunc = h.definitions['lenfunc'] +coercion = h.definitions['coercion'] +intargfunc = h.definitions['intargfunc'] +intintargfunc = h.definitions['intintargfunc'] +ssizeargfunc = h.definitions['ssizeargfunc'] +ssizessizeargfunc = h.definitions['ssizessizeargfunc'] +intobjargproc = h.definitions['intobjargproc'] +intintobjargproc = h.definitions['intintobjargproc'] +ssizeobjargproc = h.definitions['ssizeobjargproc'] +ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'] +objobjargproc = h.definitions['objobjargproc'] -objobjproc = h.definitions['objobjproc'].OF -visitproc = h.definitions['visitproc'].OF -traverseproc = h.definitions['traverseproc'].OF +objobjproc = h.definitions['objobjproc'] +visitproc = h.definitions['visitproc'] +traverseproc = h.definitions['traverseproc'] -getter = h.definitions['getter'].OF -setter = h.definitions['setter'].OF +getter = h.definitions['getter'] +setter = h.definitions['setter'] #wrapperfunc = h.definitions['wrapperfunc'] #wrapperfunc_kwds = h.definitions['wrapperfunc_kwds'] -readbufferproc = h.definitions['readbufferproc'].OF -writebufferproc = h.definitions['writebufferproc'].OF -segcountproc = h.definitions['segcountproc'].OF -charbufferproc = h.definitions['charbufferproc'].OF -getbufferproc = h.definitions['getbufferproc'].OF -releasebufferproc = h.definitions['releasebufferproc'].OF +readbufferproc = h.definitions['readbufferproc'] +writebufferproc = h.definitions['writebufferproc'] +segcountproc = h.definitions['segcountproc'] +charbufferproc = h.definitions['charbufferproc'] +getbufferproc = h.definitions['getbufferproc'] +releasebufferproc = h.definitions['releasebufferproc'] -PyGetSetDef = h.definitions['PyGetSetDef'].OF -PyNumberMethods = h.definitions['PyNumberMethods'].OF -PySequenceMethods = h.definitions['PySequenceMethods'].OF -PyMappingMethods = h.definitions['PyMappingMethods'].OF -PyBufferProcs = h.definitions['PyBufferProcs'].OF -PyMemberDef = h.definitions['PyMemberDef'].OF +PyGetSetDef = h.definitions['PyGetSetDef'] +PyNumberMethods = h.definitions['PyNumberMethods'] +PySequenceMethods = h.definitions['PySequenceMethods'] +PyMappingMethods = h.definitions['PyMappingMethods'] +PyBufferProcs = h.definitions['PyBufferProcs'] +PyMemberDef = h.definitions['PyMemberDef'] From pypy.commits at gmail.com Sun Dec 18 08:44:49 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 05:44:49 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Update test Message-ID: <585692d1.4438c20a.b4b80.f8c8@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89140:849d88015265 Date: 2016-12-18 13:44 +0000 http://bitbucket.org/pypy/pypy/changeset/849d88015265/ Log: Update test diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -90,9 +90,9 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) - == ('Py_ssize_t', 'Py_ssize_t arg0')) + == ('Signed', 'Signed arg0')) assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) - == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + == ('Signed *', 'Signed *arg0')) PyPy_TypedefTest1(space, 0) ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') @@ -100,7 +100,7 @@ PyPy_TypedefTest2(space, ppos) lltype.free(ppos, flavor='raw') - at pytest.mark.skipif(os.environ.get('USER')=='root', + at pytest.mark.skipif(os.environ.get('USER')=='root', reason='root can write to all files') def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir, True) From pypy.commits at gmail.com Sun Dec 18 09:05:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 06:05:10 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Use cffi's copy of pycparser Message-ID: <58569796.a351c20a.48d77.fde7@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89141:cc761eef5b5a Date: 2016-12-18 14:03 +0000 http://bitbucket.org/pypy/pypy/changeset/cc761eef5b5a/ Log: Use cffi's copy of pycparser diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -1,7 +1,10 @@ from collections import OrderedDict from cffi import api, model from cffi.commontypes import COMMON_TYPES, resolve_common_type -import pycparser +try: + from cffi import _pycparser as pycparser +except ImportError: + import pycparser import weakref, re from rpython.rlib.rfile import FILEP from rpython.rtyper.lltypesystem import rffi, lltype From pypy.commits at gmail.com Sun Dec 18 09:05:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 06:05:12 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Rename api.h to object_h Message-ID: <58569798.2350c20a.e5711.f52d@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89142:a09184bcdf35 Date: 2016-12-18 14:04 +0000 http://bitbucket.org/pypy/pypy/changeset/a09184bcdf35/ Log: Rename api.h to object_h diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -600,7 +600,7 @@ % (cpyname, )) build_exported_objects() -h = parse_source(""" +object_h = parse_source(""" typedef ssize_t Py_ssize_t; #define PyObject_HEAD \ @@ -909,9 +909,9 @@ } PyTypeObject; """) -h.configure_types() +object_h.configure_types() -Py_ssize_t = h.definitions['Py_ssize_t'] +Py_ssize_t = object_h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -919,21 +919,21 @@ # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = h.definitions['PyTypeObject'] +PyTypeObject = object_h.definitions['PyTypeObject'] PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = h.definitions['PyObject'] +PyObjectStruct = object_h.definitions['PyObject'] PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -PyVarObjectStruct = h.definitions['PyVarObject'] +PyVarObjectStruct = object_h.definitions['PyVarObject'] PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = h.definitions['Py_buffer'] +Py_buffer = object_h.definitions['Py_buffer'] Py_bufferP = lltype.Ptr(Py_buffer) -getbufferproc = h.definitions['getbufferproc'] -releasebufferproc = h.definitions['releasebufferproc'] +getbufferproc = object_h.definitions['getbufferproc'] +releasebufferproc = object_h.definitions['releasebufferproc'] @specialize.memo() diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -1,62 +1,62 @@ -from pypy.module.cpyext.api import h +from pypy.module.cpyext.api import object_h -freefunc = h.definitions['freefunc'] -destructor = h.definitions['destructor'] -printfunc = h.definitions['printfunc'] -getattrfunc = h.definitions['getattrfunc'] -getattrofunc = h.definitions['getattrofunc'] -setattrfunc = h.definitions['setattrfunc'] -setattrofunc = h.definitions['setattrofunc'] -cmpfunc = h.definitions['cmpfunc'] -reprfunc = h.definitions['reprfunc'] -hashfunc = h.definitions['hashfunc'] -richcmpfunc = h.definitions['richcmpfunc'] -getiterfunc = h.definitions['getiterfunc'] -iternextfunc = h.definitions['iternextfunc'] -descrgetfunc = h.definitions['descrgetfunc'] -descrsetfunc = h.definitions['descrsetfunc'] -initproc = h.definitions['initproc'] -newfunc = h.definitions['newfunc'] -allocfunc = h.definitions['allocfunc'] +freefunc = object_h.definitions['freefunc'] +destructor = object_h.definitions['destructor'] +printfunc = object_h.definitions['printfunc'] +getattrfunc = object_h.definitions['getattrfunc'] +getattrofunc = object_h.definitions['getattrofunc'] +setattrfunc = object_h.definitions['setattrfunc'] +setattrofunc = object_h.definitions['setattrofunc'] +cmpfunc = object_h.definitions['cmpfunc'] +reprfunc = object_h.definitions['reprfunc'] +hashfunc = object_h.definitions['hashfunc'] +richcmpfunc = object_h.definitions['richcmpfunc'] +getiterfunc = object_h.definitions['getiterfunc'] +iternextfunc = object_h.definitions['iternextfunc'] +descrgetfunc = object_h.definitions['descrgetfunc'] +descrsetfunc = object_h.definitions['descrsetfunc'] +initproc = object_h.definitions['initproc'] +newfunc = object_h.definitions['newfunc'] +allocfunc = object_h.definitions['allocfunc'] -unaryfunc = h.definitions['unaryfunc'] -binaryfunc = h.definitions['binaryfunc'] -ternaryfunc = h.definitions['ternaryfunc'] -inquiry = h.definitions['inquiry'] -lenfunc = h.definitions['lenfunc'] -coercion = h.definitions['coercion'] -intargfunc = h.definitions['intargfunc'] -intintargfunc = h.definitions['intintargfunc'] -ssizeargfunc = h.definitions['ssizeargfunc'] -ssizessizeargfunc = h.definitions['ssizessizeargfunc'] -intobjargproc = h.definitions['intobjargproc'] -intintobjargproc = h.definitions['intintobjargproc'] -ssizeobjargproc = h.definitions['ssizeobjargproc'] -ssizessizeobjargproc = h.definitions['ssizessizeobjargproc'] -objobjargproc = h.definitions['objobjargproc'] +unaryfunc = object_h.definitions['unaryfunc'] +binaryfunc = object_h.definitions['binaryfunc'] +ternaryfunc = object_h.definitions['ternaryfunc'] +inquiry = object_h.definitions['inquiry'] +lenfunc = object_h.definitions['lenfunc'] +coercion = object_h.definitions['coercion'] +intargfunc = object_h.definitions['intargfunc'] +intintargfunc = object_h.definitions['intintargfunc'] +ssizeargfunc = object_h.definitions['ssizeargfunc'] +ssizessizeargfunc = object_h.definitions['ssizessizeargfunc'] +intobjargproc = object_h.definitions['intobjargproc'] +intintobjargproc = object_h.definitions['intintobjargproc'] +ssizeobjargproc = object_h.definitions['ssizeobjargproc'] +ssizessizeobjargproc = object_h.definitions['ssizessizeobjargproc'] +objobjargproc = object_h.definitions['objobjargproc'] -objobjproc = h.definitions['objobjproc'] -visitproc = h.definitions['visitproc'] -traverseproc = h.definitions['traverseproc'] +objobjproc = object_h.definitions['objobjproc'] +visitproc = object_h.definitions['visitproc'] +traverseproc = object_h.definitions['traverseproc'] -getter = h.definitions['getter'] -setter = h.definitions['setter'] +getter = object_h.definitions['getter'] +setter = object_h.definitions['setter'] -#wrapperfunc = h.definitions['wrapperfunc'] -#wrapperfunc_kwds = h.definitions['wrapperfunc_kwds'] +#wrapperfunc = object_h.definitions['wrapperfunc'] +#wrapperfunc_kwds = object_h.definitions['wrapperfunc_kwds'] -readbufferproc = h.definitions['readbufferproc'] -writebufferproc = h.definitions['writebufferproc'] -segcountproc = h.definitions['segcountproc'] -charbufferproc = h.definitions['charbufferproc'] -getbufferproc = h.definitions['getbufferproc'] -releasebufferproc = h.definitions['releasebufferproc'] +readbufferproc = object_h.definitions['readbufferproc'] +writebufferproc = object_h.definitions['writebufferproc'] +segcountproc = object_h.definitions['segcountproc'] +charbufferproc = object_h.definitions['charbufferproc'] +getbufferproc = object_h.definitions['getbufferproc'] +releasebufferproc = object_h.definitions['releasebufferproc'] -PyGetSetDef = h.definitions['PyGetSetDef'] -PyNumberMethods = h.definitions['PyNumberMethods'] -PySequenceMethods = h.definitions['PySequenceMethods'] -PyMappingMethods = h.definitions['PyMappingMethods'] -PyBufferProcs = h.definitions['PyBufferProcs'] -PyMemberDef = h.definitions['PyMemberDef'] +PyGetSetDef = object_h.definitions['PyGetSetDef'] +PyNumberMethods = object_h.definitions['PyNumberMethods'] +PySequenceMethods = object_h.definitions['PySequenceMethods'] +PyMappingMethods = object_h.definitions['PyMappingMethods'] +PyBufferProcs = object_h.definitions['PyBufferProcs'] +PyMemberDef = object_h.definitions['PyMemberDef'] From pypy.commits at gmail.com Sun Dec 18 09:16:43 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 06:16:43 -0800 (PST) Subject: [pypy-commit] pypy default: Fix for the new ll_assert_not_none() Message-ID: <58569a4b.0777c20a.c29ab.00dc@mx.google.com> Author: Armin Rigo Branch: Changeset: r89143:915f51d48f87 Date: 2016-12-18 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/915f51d48f87/ Log: Fix for the new ll_assert_not_none() diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -66,7 +66,7 @@ w_SystemExit = W_TypeObject("SystemExit") w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_VisibleDeprecationWarning = W_TypeObject("VisibleDeprecationWarning") - w_None = None + w_None = W_Root() w_bool = W_TypeObject("bool") w_int = W_TypeObject("int") From pypy.commits at gmail.com Sun Dec 18 09:17:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 06:17:29 -0800 (PST) Subject: [pypy-commit] pypy default: Remove this implementation of malloc removal, which was never used Message-ID: <58569a79.2a0bc30a.31061.fc86@mx.google.com> Author: Armin Rigo Branch: Changeset: r89144:b89ea292e8c6 Date: 2016-12-18 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/b89ea292e8c6/ Log: Remove this implementation of malloc removal, which was never used diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py deleted file mode 100644 --- a/rpython/translator/backendopt/mallocv.py +++ /dev/null @@ -1,1055 +0,0 @@ -from rpython.flowspace.model import Variable, Constant, Block, Link -from rpython.flowspace.model import SpaceOperation, copygraph -from rpython.flowspace.model import checkgraph -from rpython.translator.backendopt.support import log -from rpython.translator.simplify import join_blocks -from rpython.translator.unsimplify import varoftype -from rpython.rtyper.lltypesystem.lltype import getfunctionptr -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lloperation import llop - - -def virtualize_mallocs(translator, graphs, verbose=False): - newgraphs = graphs[:] - mallocv = MallocVirtualizer(newgraphs, translator.rtyper, verbose) - while mallocv.remove_mallocs_once(): - pass - for graph in newgraphs: - checkgraph(graph) - join_blocks(graph) - assert newgraphs[:len(graphs)] == graphs - del newgraphs[:len(graphs)] - translator.graphs.extend(newgraphs) - -# ____________________________________________________________ - - -class MallocTypeDesc(object): - - def __init__(self, MALLOCTYPE): - if not isinstance(MALLOCTYPE, lltype.GcStruct): - raise CannotRemoveThisType - self.MALLOCTYPE = MALLOCTYPE - self.check_no_destructor() - self.names_and_types = [] - self.name2index = {} - self.name2subtype = {} - self.initialize_type(MALLOCTYPE) - #self.immutable_struct = MALLOCTYPE._hints.get('immutable') - - def check_no_destructor(self): - STRUCT = self.MALLOCTYPE - try: - rttiptr = lltype.getRuntimeTypeInfo(STRUCT) - except ValueError: - return # ok - destr_ptr = getattr(rttiptr._obj, 'destructor_funcptr', None) - if destr_ptr: - raise CannotRemoveThisType - - def initialize_type(self, TYPE): - fieldnames = TYPE._names - firstname, FIRSTTYPE = TYPE._first_struct() - if FIRSTTYPE is not None: - self.initialize_type(FIRSTTYPE) - fieldnames = fieldnames[1:] - for name in fieldnames: - FIELDTYPE = TYPE._flds[name] - if isinstance(FIELDTYPE, lltype.ContainerType): - raise CannotRemoveThisType("inlined substructure") - self.name2index[name] = len(self.names_and_types) - self.names_and_types.append((name, FIELDTYPE)) - self.name2subtype[name] = TYPE - - -class SpecNode(object): - pass - - -class RuntimeSpecNode(SpecNode): - - def __init__(self, name, TYPE): - self.name = name - self.TYPE = TYPE - - def newvar(self): - v = Variable(self.name) - v.concretetype = self.TYPE - return v - - def getfrozenkey(self, memo): - return 'R' - - def accumulate_nodes(self, rtnodes, vtnodes): - rtnodes.append(self) - - def copy(self, memo, flagreadonly): - return RuntimeSpecNode(self.name, self.TYPE) - - def bind_rt_nodes(self, memo, newnodes_iter): - return newnodes_iter.next() - - -class VirtualSpecNode(SpecNode): - - def __init__(self, typedesc, fields, readonly=False): - self.typedesc = typedesc - self.fields = fields # list of SpecNodes - self.readonly = readonly - - def getfrozenkey(self, memo): - if self in memo: - return memo[self] - else: - memo[self] = len(memo) - result = [self.typedesc, self.readonly] - for subnode in self.fields: - result.append(subnode.getfrozenkey(memo)) - return tuple(result) - - def accumulate_nodes(self, rtnodes, vtnodes): - if self in vtnodes: - return - vtnodes[self] = True - for subnode in self.fields: - subnode.accumulate_nodes(rtnodes, vtnodes) - - def copy(self, memo, flagreadonly): - if self in memo: - return memo[self] - readonly = self.readonly or self in flagreadonly - newnode = VirtualSpecNode(self.typedesc, [], readonly) - memo[self] = newnode - for subnode in self.fields: - newnode.fields.append(subnode.copy(memo, flagreadonly)) - return newnode - - def bind_rt_nodes(self, memo, newnodes_iter): - if self in memo: - return memo[self] - newnode = VirtualSpecNode(self.typedesc, [], self.readonly) - memo[self] = newnode - for subnode in self.fields: - newnode.fields.append(subnode.bind_rt_nodes(memo, newnodes_iter)) - return newnode - - -class VirtualFrame(object): - - def __init__(self, sourceblock, nextopindex, - allnodes, callerframe=None, calledgraphs={}): - if isinstance(allnodes, dict): - self.varlist = vars_alive_through_op(sourceblock, nextopindex) - self.nodelist = [allnodes[v] for v in self.varlist] - else: - assert nextopindex == 0 - self.varlist = sourceblock.inputargs - self.nodelist = allnodes[:] - self.sourceblock = sourceblock - self.nextopindex = nextopindex - self.callerframe = callerframe - self.calledgraphs = calledgraphs - - def get_nodes_in_use(self): - return dict(zip(self.varlist, self.nodelist)) - - def shallowcopy(self): - newframe = VirtualFrame.__new__(VirtualFrame) - newframe.varlist = self.varlist - newframe.nodelist = self.nodelist - newframe.sourceblock = self.sourceblock - newframe.nextopindex = self.nextopindex - newframe.callerframe = self.callerframe - newframe.calledgraphs = self.calledgraphs - return newframe - - def copy(self, memo, flagreadonly={}): - newframe = self.shallowcopy() - newframe.nodelist = [node.copy(memo, flagreadonly) - for node in newframe.nodelist] - if newframe.callerframe is not None: - newframe.callerframe = newframe.callerframe.copy(memo, - flagreadonly) - return newframe - - def enum_call_stack(self): - frame = self - while frame is not None: - yield frame - frame = frame.callerframe - - def getfrozenkey(self): - memo = {} - key = [] - for frame in self.enum_call_stack(): - key.append(frame.sourceblock) - key.append(frame.nextopindex) - for node in frame.nodelist: - key.append(node.getfrozenkey(memo)) - return tuple(key) - - def find_all_nodes(self): - rtnodes = [] - vtnodes = {} - for frame in self.enum_call_stack(): - for node in frame.nodelist: - node.accumulate_nodes(rtnodes, vtnodes) - return rtnodes, vtnodes - - def find_rt_nodes(self): - rtnodes, vtnodes = self.find_all_nodes() - return rtnodes - - def find_vt_nodes(self): - rtnodes, vtnodes = self.find_all_nodes() - return vtnodes - - -def copynodes(nodelist, flagreadonly={}): - memo = {} - return [node.copy(memo, flagreadonly) for node in nodelist] - -def find_all_nodes(nodelist): - rtnodes = [] - vtnodes = {} - for node in nodelist: - node.accumulate_nodes(rtnodes, vtnodes) - return rtnodes, vtnodes - -def is_trivial_nodelist(nodelist): - for node in nodelist: - if not isinstance(node, RuntimeSpecNode): - return False - return True - -def bind_rt_nodes(srcnodelist, newnodes_list): - """Return srcnodelist with all RuntimeNodes replaced by nodes - coming from newnodes_list. - """ - memo = {} - newnodes_iter = iter(newnodes_list) - result = [node.bind_rt_nodes(memo, newnodes_iter) for node in srcnodelist] - rest = list(newnodes_iter) - assert rest == [], "too many nodes in newnodes_list" - return result - - -class CannotVirtualize(Exception): - pass - -class ForcedInline(Exception): - pass - -class CannotRemoveThisType(Exception): - pass - -# ____________________________________________________________ - - -class MallocVirtualizer(object): - - def __init__(self, graphs, rtyper, verbose=False): - self.graphs = graphs - self.rtyper = rtyper - self.excdata = rtyper.exceptiondata - self.graphbuilders = {} - self.specialized_graphs = {} - self.specgraphorigin = {} - self.inline_and_remove = {} # {graph: op_to_remove} - self.inline_and_remove_seen = {} # set of (graph, op_to_remove) - self.malloctypedescs = {} - self.count_virtualized = 0 - self.verbose = verbose - self.EXCTYPE_to_vtable = self.build_obscure_mapping() - - def build_obscure_mapping(self): - result = {} - for rinstance in self.rtyper.instance_reprs.values(): - result[rinstance.lowleveltype.TO] = rinstance.rclass.getvtable() - return result - - def report_result(self, progress): - if progress: - log.mallocv('removed %d mallocs so far' % self.count_virtualized) - else: - log.mallocv('done') - - def enum_all_mallocs(self, graph): - for block in graph.iterblocks(): - for op in block.operations: - if op.opname == 'malloc': - MALLOCTYPE = op.result.concretetype.TO - try: - self.getmalloctypedesc(MALLOCTYPE) - except CannotRemoveThisType: - pass - else: - yield (block, op) - elif op.opname == 'direct_call': - graph = graph_called_by(op) - if graph in self.inline_and_remove: - yield (block, op) - - def remove_mallocs_once(self): - self.flush_failed_specializations() - prev = self.count_virtualized - count_inline_and_remove = len(self.inline_and_remove) - for graph in self.graphs: - seen = {} - while True: - for block, op in self.enum_all_mallocs(graph): - if op.result not in seen: - seen[op.result] = True - if self.try_remove_malloc(graph, block, op): - break # graph mutated, restart enum_all_mallocs() - else: - break # enum_all_mallocs() exhausted, graph finished - progress1 = self.count_virtualized - prev - progress2 = len(self.inline_and_remove) - count_inline_and_remove - progress = progress1 or bool(progress2) - self.report_result(progress) - return progress - - def flush_failed_specializations(self): - for key, (mode, specgraph) in self.specialized_graphs.items(): - if mode == 'fail': - del self.specialized_graphs[key] - - def fixup_except_block(self, exceptblock): - # hack: this block's inputargs may be missing concretetypes... - e1, v1 = exceptblock.inputargs - e1.concretetype = self.excdata.lltype_of_exception_type - v1.concretetype = self.excdata.lltype_of_exception_value - - def getmalloctypedesc(self, MALLOCTYPE): - try: - dsc = self.malloctypedescs[MALLOCTYPE] - except KeyError: - dsc = self.malloctypedescs[MALLOCTYPE] = MallocTypeDesc(MALLOCTYPE) - return dsc - - def try_remove_malloc(self, graph, block, op): - if (graph, op) in self.inline_and_remove_seen: - return False # no point in trying again - graphbuilder = GraphBuilder(self, graph) - if graph in self.graphbuilders: - graphbuilder.initialize_from_old_builder(self.graphbuilders[graph]) - graphbuilder.start_from_a_malloc(graph, block, op.result) - try: - graphbuilder.propagate_specializations() - except CannotVirtualize as e: - self.logresult(op, 'failed', e) - return False - except ForcedInline as e: - self.logresult(op, 'forces inlining', e) - self.inline_and_remove[graph] = op - self.inline_and_remove_seen[graph, op] = True - return False - else: - self.logresult(op, 'removed') - graphbuilder.finished_removing_malloc() - self.graphbuilders[graph] = graphbuilder - self.count_virtualized += 1 - return True - - def logresult(self, op, msg, exc=None): # only for nice log outputs - if self.verbose: - if exc is None: - exc = '' - else: - exc = ': %s' % (exc,) - chain = [] - while True: - chain.append(str(op.result)) - if op.opname != 'direct_call': - break - fobj = op.args[0].value._obj - op = self.inline_and_remove[fobj.graph] - log.mallocv('%s %s%s' % ('->'.join(chain), msg, exc)) - elif exc is None: - log.dot() - - def get_specialized_graph(self, graph, nodelist): - assert len(graph.getargs()) == len(nodelist) - if is_trivial_nodelist(nodelist): - return 'trivial', graph - if graph in self.specgraphorigin: - orggraph, orgnodelist = self.specgraphorigin[graph] - nodelist = bind_rt_nodes(orgnodelist, nodelist) - graph = orggraph - virtualframe = VirtualFrame(graph.startblock, 0, nodelist) - key = virtualframe.getfrozenkey() - try: - return self.specialized_graphs[key] - except KeyError: - self.build_specialized_graph(graph, key, nodelist) - return self.specialized_graphs[key] - - def build_specialized_graph(self, graph, key, nodelist): - graph2 = copygraph(graph) - virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) - graphbuilder = GraphBuilder(self, graph2) - specblock = graphbuilder.start_from_virtualframe(virtualframe) - specgraph = graph2 - specgraph.name += '_mallocv' - specgraph.startblock = specblock - self.specialized_graphs[key] = ('call', specgraph) - try: - graphbuilder.propagate_specializations() - except ForcedInline as e: - if self.verbose: - log.mallocv('%s inlined: %s' % (graph.name, e)) - self.specialized_graphs[key] = ('inline', None) - except CannotVirtualize as e: - if self.verbose: - log.mallocv('%s failing: %s' % (graph.name, e)) - self.specialized_graphs[key] = ('fail', None) - else: - self.graphbuilders[specgraph] = graphbuilder - self.specgraphorigin[specgraph] = graph, nodelist - self.graphs.append(specgraph) - - -class GraphBuilder(object): - - def __init__(self, mallocv, graph): - self.mallocv = mallocv - self.graph = graph - self.specialized_blocks = {} - self.pending_specializations = [] - - def initialize_from_old_builder(self, oldbuilder): - self.specialized_blocks.update(oldbuilder.specialized_blocks) - - def start_from_virtualframe(self, startframe): - spec = BlockSpecializer(self) - spec.initialize_renamings(startframe) - self.pending_specializations.append(spec) - return spec.specblock - - def start_from_a_malloc(self, graph, block, v_result): - assert v_result in [op.result for op in block.operations] - nodelist = [] - for v in block.inputargs: - nodelist.append(RuntimeSpecNode(v, v.concretetype)) - trivialframe = VirtualFrame(block, 0, nodelist) - spec = BlockSpecializer(self, v_result) - spec.initialize_renamings(trivialframe, keep_inputargs=True) - self.pending_specializations.append(spec) - self.pending_patch = (block, spec.specblock) - - def finished_removing_malloc(self): - (srcblock, specblock) = self.pending_patch - srcblock.inputargs = specblock.inputargs - srcblock.operations = specblock.operations - srcblock.exitswitch = specblock.exitswitch - srcblock.recloseblock(*specblock.exits) - - def create_outgoing_link(self, currentframe, targetblock, - nodelist, renamings, v_expand_malloc=None): - assert len(nodelist) == len(targetblock.inputargs) - # - if is_except(targetblock): - v_expand_malloc = None - while currentframe.callerframe is not None: - currentframe = currentframe.callerframe - newlink = self.handle_catch(currentframe, nodelist, renamings) - if newlink: - return newlink - else: - targetblock = self.exception_escapes(nodelist, renamings) - assert len(nodelist) == len(targetblock.inputargs) - - if (currentframe.callerframe is None and - is_trivial_nodelist(nodelist)): - # there is no more VirtualSpecNodes being passed around, - # so we can stop specializing - rtnodes = nodelist - specblock = targetblock - else: - if is_return(targetblock): - v_expand_malloc = None - newframe = self.return_to_caller(currentframe, nodelist[0]) - else: - targetnodes = dict(zip(targetblock.inputargs, nodelist)) - newframe = VirtualFrame(targetblock, 0, targetnodes, - callerframe=currentframe.callerframe, - calledgraphs=currentframe.calledgraphs) - rtnodes = newframe.find_rt_nodes() - specblock = self.get_specialized_block(newframe, v_expand_malloc) - - linkargs = [renamings[rtnode] for rtnode in rtnodes] - return Link(linkargs, specblock) - - def return_to_caller(self, currentframe, retnode): - callerframe = currentframe.callerframe - if callerframe is None: - raise ForcedInline("return block") - nodelist = callerframe.nodelist - callerframe = callerframe.shallowcopy() - callerframe.nodelist = [] - for node in nodelist: - if isinstance(node, FutureReturnValue): - node = retnode - callerframe.nodelist.append(node) - return callerframe - - def handle_catch(self, catchingframe, nodelist, renamings): - if not self.has_exception_catching(catchingframe): - return None - [exc_node, exc_value_node] = nodelist - v_exc_type = renamings.get(exc_node) - if isinstance(v_exc_type, Constant): - exc_type = v_exc_type.value - elif isinstance(exc_value_node, VirtualSpecNode): - EXCTYPE = exc_value_node.typedesc.MALLOCTYPE - exc_type = self.mallocv.EXCTYPE_to_vtable[EXCTYPE] - else: - raise CannotVirtualize("raising non-constant exc type") - excdata = self.mallocv.excdata - assert catchingframe.sourceblock.exits[0].exitcase is None - for catchlink in catchingframe.sourceblock.exits[1:]: - if excdata.fn_exception_match(exc_type, catchlink.llexitcase): - # Match found. Follow this link. - mynodes = catchingframe.get_nodes_in_use() - for node, attr in zip(nodelist, - ['last_exception', 'last_exc_value']): - v = getattr(catchlink, attr) - if isinstance(v, Variable): - mynodes[v] = node - # - nodelist = [] - for v in catchlink.args: - if isinstance(v, Variable): - node = mynodes[v] - else: - node = getconstnode(v, renamings) - nodelist.append(node) - return self.create_outgoing_link(catchingframe, - catchlink.target, - nodelist, renamings) - else: - # No match at all, propagate the exception to the caller - return None - - def has_exception_catching(self, catchingframe): - if not catchingframe.sourceblock.canraise: - return False - else: - operations = catchingframe.sourceblock.operations - assert 1 <= catchingframe.nextopindex <= len(operations) - return catchingframe.nextopindex == len(operations) - - def exception_escapes(self, nodelist, renamings): - # the exception escapes - if not is_trivial_nodelist(nodelist): - # start of hacks to help handle_catch() - [exc_node, exc_value_node] = nodelist - v_exc_type = renamings.get(exc_node) - if isinstance(v_exc_type, Constant): - # cannot improve: handle_catch() would already be happy - # by seeing the exc_type as a constant - pass - elif isinstance(exc_value_node, VirtualSpecNode): - # can improve with a strange hack: we pretend that - # the source code jumps to a block that itself allocates - # the exception, sets all fields, and raises it by - # passing a constant type. - typedesc = exc_value_node.typedesc - return self.get_exc_reconstruction_block(typedesc) - else: - # cannot improve: handle_catch() will have no clue about - # the exception type - pass - raise CannotVirtualize("except block") - targetblock = self.graph.exceptblock - self.mallocv.fixup_except_block(targetblock) - return targetblock - - def get_exc_reconstruction_block(self, typedesc): - exceptblock = self.graph.exceptblock - self.mallocv.fixup_except_block(exceptblock) - TEXC = exceptblock.inputargs[0].concretetype - TVAL = exceptblock.inputargs[1].concretetype - # - v_ignored_type = varoftype(TEXC) - v_incoming_value = varoftype(TVAL) - block = Block([v_ignored_type, v_incoming_value]) - # - c_EXCTYPE = Constant(typedesc.MALLOCTYPE, lltype.Void) - v = varoftype(lltype.Ptr(typedesc.MALLOCTYPE)) - c_flavor = Constant({'flavor': 'gc'}, lltype.Void) - op = SpaceOperation('malloc', [c_EXCTYPE, c_flavor], v) - block.operations.append(op) - # - for name, FIELDTYPE in typedesc.names_and_types: - EXACTPTR = lltype.Ptr(typedesc.name2subtype[name]) - c_name = Constant(name) - c_name.concretetype = lltype.Void - # - v_in = varoftype(EXACTPTR) - op = SpaceOperation('cast_pointer', [v_incoming_value], v_in) - block.operations.append(op) - # - v_field = varoftype(FIELDTYPE) - op = SpaceOperation('getfield', [v_in, c_name], v_field) - block.operations.append(op) - # - v_out = varoftype(EXACTPTR) - op = SpaceOperation('cast_pointer', [v], v_out) - block.operations.append(op) - # - v0 = varoftype(lltype.Void) - op = SpaceOperation('setfield', [v_out, c_name, v_field], v0) - block.operations.append(op) - # - v_exc_value = varoftype(TVAL) - op = SpaceOperation('cast_pointer', [v], v_exc_value) - block.operations.append(op) - # - exc_type = self.mallocv.EXCTYPE_to_vtable[typedesc.MALLOCTYPE] - c_exc_type = Constant(exc_type, TEXC) - block.closeblock(Link([c_exc_type, v_exc_value], exceptblock)) - return block - - def get_specialized_block(self, virtualframe, v_expand_malloc=None): - key = virtualframe.getfrozenkey() - specblock = self.specialized_blocks.get(key) - if specblock is None: - orgblock = virtualframe.sourceblock - assert len(orgblock.exits) != 0 - spec = BlockSpecializer(self, v_expand_malloc) - spec.initialize_renamings(virtualframe) - self.pending_specializations.append(spec) - specblock = spec.specblock - self.specialized_blocks[key] = specblock - return specblock - - def propagate_specializations(self): - while self.pending_specializations: - spec = self.pending_specializations.pop() - spec.specialize_operations() - spec.follow_exits() - - -class BlockSpecializer(object): - - def __init__(self, graphbuilder, v_expand_malloc=None): - self.graphbuilder = graphbuilder - self.v_expand_malloc = v_expand_malloc - self.specblock = Block([]) - - def initialize_renamings(self, virtualframe, keep_inputargs=False): - # we make a copy of the original 'virtualframe' because the - # specialize_operations() will mutate some of its content. - virtualframe = virtualframe.copy({}) - self.virtualframe = virtualframe - self.nodes = virtualframe.get_nodes_in_use() - self.renamings = {} # {RuntimeSpecNode(): Variable()} - if keep_inputargs: - assert virtualframe.varlist == virtualframe.sourceblock.inputargs - specinputargs = [] - for i, rtnode in enumerate(virtualframe.find_rt_nodes()): - if keep_inputargs: - v = virtualframe.varlist[i] - assert v.concretetype == rtnode.TYPE - else: - v = rtnode.newvar() - self.renamings[rtnode] = v - specinputargs.append(v) - self.specblock.inputargs = specinputargs - - def setnode(self, v, node): - assert v not in self.nodes - self.nodes[v] = node - - def getnode(self, v): - if isinstance(v, Variable): - return self.nodes[v] - else: - return getconstnode(v, self.renamings) - - def rename_nonvirtual(self, v, where=None): - if not isinstance(v, Variable): - return v - node = self.nodes[v] - if not isinstance(node, RuntimeSpecNode): - raise CannotVirtualize(where) - return self.renamings[node] - - def expand_nodes(self, nodelist): - rtnodes, vtnodes = find_all_nodes(nodelist) - return [self.renamings[rtnode] for rtnode in rtnodes] - - def specialize_operations(self): - newoperations = [] - self.ops_produced_by_last_op = 0 - # note that 'self.virtualframe' can be changed during the loop! - while True: - operations = self.virtualframe.sourceblock.operations - try: - op = operations[self.virtualframe.nextopindex] - self.virtualframe.nextopindex += 1 - except IndexError: - break - - meth = getattr(self, 'handle_op_' + op.opname, - self.handle_default) - newops_for_this_op = meth(op) - newoperations += newops_for_this_op - self.ops_produced_by_last_op = len(newops_for_this_op) - for op in newoperations: - if op.opname == 'direct_call': - graph = graph_called_by(op) - if graph in self.virtualframe.calledgraphs: - raise CannotVirtualize("recursion in residual call") - self.specblock.operations = newoperations - - def follow_exits(self): - block = self.virtualframe.sourceblock - self.specblock.exitswitch = self.rename_nonvirtual(block.exitswitch, - 'exitswitch') - links = block.exits - catch_exc = self.specblock.canraise - - if not catch_exc and isinstance(self.specblock.exitswitch, Constant): - # constant-fold the switch - for link in links: - if link.exitcase == 'default': - break - if link.llexitcase == self.specblock.exitswitch.value: - break - else: - raise Exception("exit case not found?") - links = (link,) - self.specblock.exitswitch = None - - if catch_exc and self.ops_produced_by_last_op == 0: - # the last op of the sourceblock did not produce any - # operation in specblock, so we need to discard the - # exception-catching. - catch_exc = False - links = links[:1] - assert links[0].exitcase is None # the non-exception-catching case - self.specblock.exitswitch = None - - newlinks = [] - for link in links: - is_catch_link = catch_exc and link.exitcase is not None - if is_catch_link: - extravars = [] - for attr in ['last_exception', 'last_exc_value']: - v = getattr(link, attr) - if isinstance(v, Variable): - rtnode = RuntimeSpecNode(v, v.concretetype) - self.setnode(v, rtnode) - self.renamings[rtnode] = v = rtnode.newvar() - extravars.append(v) - - linkargsnodes = [self.getnode(v1) for v1 in link.args] - # - newlink = self.graphbuilder.create_outgoing_link( - self.virtualframe, link.target, linkargsnodes, - self.renamings, self.v_expand_malloc) - # - if self.specblock.exitswitch is not None: - newlink.exitcase = link.exitcase - if hasattr(link, 'llexitcase'): - newlink.llexitcase = link.llexitcase - if is_catch_link: - newlink.extravars(*extravars) - newlinks.append(newlink) - - self.specblock.closeblock(*newlinks) - - def make_rt_result(self, v_result): - newrtnode = RuntimeSpecNode(v_result, v_result.concretetype) - self.setnode(v_result, newrtnode) - v_new = newrtnode.newvar() - self.renamings[newrtnode] = v_new - return v_new - - def make_const_rt_result(self, v_result, value): - newrtnode = RuntimeSpecNode(v_result, v_result.concretetype) - self.setnode(v_result, newrtnode) - if v_result.concretetype is not lltype.Void: - assert v_result.concretetype == lltype.typeOf(value) - c_value = Constant(value) - c_value.concretetype = v_result.concretetype - self.renamings[newrtnode] = c_value - - def handle_default(self, op): - newargs = [self.rename_nonvirtual(v, op) for v in op.args] - constresult = try_fold_operation(op.opname, newargs, - op.result.concretetype) - if constresult: - self.make_const_rt_result(op.result, constresult[0]) - return [] - else: - newresult = self.make_rt_result(op.result) - return [SpaceOperation(op.opname, newargs, newresult)] - - def handle_unreachable(self, op): - from rpython.rtyper.lltypesystem.rstr import string_repr - msg = 'unreachable: %s' % (op,) - ll_msg = string_repr.convert_const(msg) - c_msg = Constant(ll_msg, lltype.typeOf(ll_msg)) - newresult = self.make_rt_result(op.result) - return [SpaceOperation('debug_fatalerror', [c_msg], newresult)] - - def handle_op_getfield(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - fieldname = op.args[1].value - index = node.typedesc.name2index[fieldname] - self.setnode(op.result, node.fields[index]) - return [] - else: - return self.handle_default(op) - - def handle_op_setfield(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - if node.readonly: - raise ForcedInline(op) - fieldname = op.args[1].value - index = node.typedesc.name2index[fieldname] - node.fields[index] = self.getnode(op.args[2]) - return [] - else: - return self.handle_default(op) - - def handle_op_same_as(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - node = self.getnode(op.args[0]) - self.setnode(op.result, node) - return [] - else: - return self.handle_default(op) - - def handle_op_cast_pointer(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - node = self.getnode(op.args[0]) - SOURCEPTR = lltype.Ptr(node.typedesc.MALLOCTYPE) - TARGETPTR = op.result.concretetype - try: - if lltype.castable(TARGETPTR, SOURCEPTR) < 0: - raise lltype.InvalidCast - except lltype.InvalidCast: - return self.handle_unreachable(op) - self.setnode(op.result, node) - return [] - else: - return self.handle_default(op) - - def handle_op_ptr_nonzero(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - self.make_const_rt_result(op.result, True) - return [] - else: - return self.handle_default(op) - - def handle_op_ptr_iszero(self, op): - node = self.getnode(op.args[0]) - if isinstance(node, VirtualSpecNode): - self.make_const_rt_result(op.result, False) - return [] - else: - return self.handle_default(op) - - def handle_op_ptr_eq(self, op): - node0 = self.getnode(op.args[0]) - node1 = self.getnode(op.args[1]) - if (isinstance(node0, VirtualSpecNode) or - isinstance(node1, VirtualSpecNode)): - self.make_const_rt_result(op.result, node0 is node1) - return [] - else: - return self.handle_default(op) - - def handle_op_ptr_ne(self, op): - node0 = self.getnode(op.args[0]) - node1 = self.getnode(op.args[1]) - if (isinstance(node0, VirtualSpecNode) or - isinstance(node1, VirtualSpecNode)): - self.make_const_rt_result(op.result, node0 is not node1) - return [] - else: - return self.handle_default(op) - - def handle_op_malloc(self, op): - if op.result is self.v_expand_malloc: - MALLOCTYPE = op.result.concretetype.TO - typedesc = self.graphbuilder.mallocv.getmalloctypedesc(MALLOCTYPE) - virtualnode = VirtualSpecNode(typedesc, []) - self.setnode(op.result, virtualnode) - for name, FIELDTYPE in typedesc.names_and_types: - fieldnode = RuntimeSpecNode(name, FIELDTYPE) - virtualnode.fields.append(fieldnode) - c = Constant(FIELDTYPE._defl()) - c.concretetype = FIELDTYPE - self.renamings[fieldnode] = c - self.v_expand_malloc = None # done - return [] - else: - return self.handle_default(op) - - def handle_op_direct_call(self, op): - graph = graph_called_by(op) - if graph is None: - return self.handle_default(op) - nb_args = len(op.args) - 1 - assert nb_args == len(graph.getargs()) - newnodes = [self.getnode(v) for v in op.args[1:]] - myframe = self.get_updated_frame(op) - mallocv = self.graphbuilder.mallocv - - if op.result is self.v_expand_malloc: - # move to inlining the callee, and continue looking for the - # malloc to expand in the callee's graph - op_to_remove = mallocv.inline_and_remove[graph] - self.v_expand_malloc = op_to_remove.result - return self.handle_inlined_call(myframe, graph, newnodes) - - argnodes = copynodes(newnodes, flagreadonly=myframe.find_vt_nodes()) - kind, newgraph = mallocv.get_specialized_graph(graph, argnodes) - if kind == 'trivial': - return self.handle_default(op) - elif kind == 'inline': - return self.handle_inlined_call(myframe, graph, newnodes) - elif kind == 'call': - return self.handle_residual_call(op, newgraph, newnodes) - elif kind == 'fail': - raise CannotVirtualize(op) - else: - raise ValueError(kind) - - def get_updated_frame(self, op): - sourceblock = self.virtualframe.sourceblock - nextopindex = self.virtualframe.nextopindex - self.nodes[op.result] = FutureReturnValue(op) - myframe = VirtualFrame(sourceblock, nextopindex, self.nodes, - self.virtualframe.callerframe, - self.virtualframe.calledgraphs) - del self.nodes[op.result] - return myframe - - def handle_residual_call(self, op, newgraph, newnodes): - fspecptr = getfunctionptr(newgraph) - newargs = [Constant(fspecptr, - concretetype=lltype.typeOf(fspecptr))] - newargs += self.expand_nodes(newnodes) - newresult = self.make_rt_result(op.result) - newop = SpaceOperation('direct_call', newargs, newresult) - return [newop] - - def handle_inlined_call(self, myframe, graph, newnodes): - assert len(graph.getargs()) == len(newnodes) - targetnodes = dict(zip(graph.getargs(), newnodes)) - calledgraphs = myframe.calledgraphs.copy() - if graph in calledgraphs: - raise CannotVirtualize("recursion during inlining") - calledgraphs[graph] = True - calleeframe = VirtualFrame(graph.startblock, 0, - targetnodes, myframe, calledgraphs) - self.virtualframe = calleeframe - self.nodes = calleeframe.get_nodes_in_use() - return [] - - def handle_op_indirect_call(self, op): - v_func = self.rename_nonvirtual(op.args[0], op) - if isinstance(v_func, Constant): - op = SpaceOperation('direct_call', [v_func] + op.args[1:-1], - op.result) - return self.handle_op_direct_call(op) - else: - return self.handle_default(op) - - -class FutureReturnValue(object): - def __init__(self, op): - self.op = op # for debugging - def getfrozenkey(self, memo): - return None - def accumulate_nodes(self, rtnodes, vtnodes): - pass - def copy(self, memo, flagreadonly): - return self - -# ____________________________________________________________ -# helpers - -def vars_alive_through_op(block, index): - # NB. make sure this always returns the variables in the same order - if len(block.exits) == 0: - return block.inputargs # return or except block - result = [] - seen = {} - def see(v): - if isinstance(v, Variable) and v not in seen: - result.append(v) - seen[v] = True - # don't include the variables produced by the current or future operations - for op in block.operations[index:]: - seen[op.result] = True - # don't include the extra vars produced by exception-catching links - for link in block.exits: - for v in link.getextravars(): - seen[v] = True - # but include the variables consumed by the current or any future operation - for op in block.operations[index:]: - for v in op.args: - see(v) - see(block.exitswitch) - for link in block.exits: - for v in link.args: - see(v) - return result - -def is_return(block): - return len(block.exits) == 0 and len(block.inputargs) == 1 - -def is_except(block): - return len(block.exits) == 0 and len(block.inputargs) == 2 - -class CannotConstFold(Exception): - pass - -def try_fold_operation(opname, args_v, RESTYPE): - args = [] - for c in args_v: - if not isinstance(c, Constant): - return - args.append(c.value) - try: - op = getattr(llop, opname) - except AttributeError: - return - if not op.is_pure(args_v): - return - try: - result = op(RESTYPE, *args) - except TypeError: - pass - except (KeyboardInterrupt, SystemExit): - raise - except Exception as e: - pass - #log.WARNING('constant-folding %s%r:' % (opname, args_v)) - #log.WARNING(' %s: %s' % (e.__class__.__name__, e)) - else: - return (result,) - -def getconstnode(v, renamings): - rtnode = RuntimeSpecNode(None, v.concretetype) - renamings[rtnode] = v - return rtnode - -def graph_called_by(op): - assert op.opname == 'direct_call' - fobj = op.args[0].value._obj - graph = getattr(fobj, 'graph', None) - return graph diff --git a/rpython/translator/backendopt/test/test_mallocv.py b/rpython/translator/backendopt/test/test_mallocv.py deleted file mode 100644 --- a/rpython/translator/backendopt/test/test_mallocv.py +++ /dev/null @@ -1,786 +0,0 @@ -import py -import sys -from rpython.translator.backendopt.mallocv import MallocVirtualizer -from rpython.translator.translator import TranslationContext, graphof -from rpython.flowspace.model import summary -from rpython.rtyper.llinterp import LLInterpreter, LLException -from rpython.rtyper.lltypesystem import lltype, lloperation -from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.rarithmetic import ovfcheck -from rpython.conftest import option - -DONT_CHECK_RESULT = object() -class CHECK_RAISES: - def __init__(self, excname): - assert isinstance(excname, str) - self.excname = excname - - -class TestMallocRemoval(object): - def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): - count_mallocs = 0 - count_calls = 0 - for node in graph.iterblocks(): - for op in node.operations: - if op.opname == 'malloc': - count_mallocs += 1 - if op.opname == 'direct_call': - count_calls += 1 - assert count_mallocs == expected_mallocs - assert count_calls == expected_calls - check_malloc_removed = classmethod(check_malloc_removed) - - def check(self, fn, signature, args, expected_result, - expected_mallocs=0, expected_calls=0): - t = TranslationContext() - self.translator = t - t.buildannotator().build_types(fn, signature) - t.buildrtyper().specialize() - graph = graphof(t, fn) - if option.view: - t.view() - self.original_graph_count = len(t.graphs) - # to detect broken intermediate graphs, - # we do the loop ourselves instead of calling remove_simple_mallocs() - maxiter = 100 - mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) - while True: - progress = mallocv.remove_mallocs_once() - if progress and option.view: - t.view() - t.checkgraphs() - if expected_result is not DONT_CHECK_RESULT: - interp = LLInterpreter(t.rtyper) - if not isinstance(expected_result, CHECK_RAISES): - res = interp.eval_graph(graph, args) - assert res == expected_result - else: - excinfo = py.test.raises(LLException, - interp.eval_graph, graph, args) - assert expected_result.excname in str(excinfo.value) - if not progress: - break - maxiter -= 1 - assert maxiter > 0, "infinite loop?" - self.check_malloc_removed(graph, expected_mallocs, expected_calls) - return graph - - def test_fn1(self): - def fn1(x, y): - if x > 0: - t = x+y, x-y - else: - t = x-y, x+y - s, d = t - return s*d - graph = self.check(fn1, [int, int], [15, 10], 125) - insns = summary(graph) - assert insns['int_mul'] == 1 - - def test_aliasing1(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - def fn1(x): - a1 = lltype.malloc(A) - a1.x = 123 - if x > 0: - a2 = a1 - else: - a2 = lltype.malloc(A) - a2.x = 456 - a1.x += 1 - return a2.x - self.check(fn1, [int], [3], 124) - self.check(fn1, [int], [-3], 456) - - def test_direct_call(self): - def g(t): - a, b = t - return a * b - def f(x): - return g((x+1, x-1)) - graph = self.check(f, [int], [10], 99, - expected_calls=1) # not inlined - - def test_direct_call_mutable_simple(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - def g(a): - a.x += 1 - def f(x): - a = lltype.malloc(A) - a.x = x - g(a) - return a.x - graph = self.check(f, [int], [41], 42, - expected_calls=0) # no more call, inlined - - def test_direct_call_mutable_retval(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - def g(a): - a.x += 1 - return a.x * 100 - def f(x): - a = lltype.malloc(A) - a.x = x - y = g(a) - return a.x + y - graph = self.check(f, [int], [41], 4242, - expected_calls=0) # no more call, inlined - - def test_direct_call_mutable_ret_virtual(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - def g(a): - a.x += 1 - return a - def f(x): - a = lltype.malloc(A) - a.x = x - b = g(a) - return a.x + b.x - graph = self.check(f, [int], [41], 84, - expected_calls=0) # no more call, inlined - - def test_direct_call_mutable_lastref(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - def g(a): - a.x *= 10 - return a.x - def f(x): - a = lltype.malloc(A) - a.x = x - y = g(a) - return x - y - graph = self.check(f, [int], [5], -45, - expected_calls=1) # not inlined - - def test_direct_call_ret_virtual(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - prebuilt_a = lltype.malloc(A) - def g(a): - prebuilt_a.x += a.x - return a - def f(n): - prebuilt_a.x = n - a = lltype.malloc(A) - a.x = 2 - a = g(a) - return prebuilt_a.x * a.x - graph = self.check(f, [int], [19], 42, - expected_calls=0) # inlined - - def test_direct_call_unused_arg(self): - A = lltype.GcStruct('A', ('x', lltype.Signed)) - prebuilt_a = lltype.malloc(A) - def g(a, unused): - return a.x - def f(n): - a = lltype.malloc(A) - a.x = 15 - return g(a, n) - graph = self.check(f, [int], [42], 15, - expected_calls=1) # not inlined - - def test_raises_simple(self): - class MyExc(Exception): - pass - def f(n): - if n < 3: - e = MyExc() - e.n = n - raise e - return n - self.check(f, [int], [5], 5, expected_mallocs=1) - self.check(f, [int], [-5], CHECK_RAISES("MyExc"), expected_mallocs=1) - - def test_catch_simple(self): - class A: - pass - class E(Exception): - def __init__(self, n): - self.n = n - def g(n): - if n < 0: - raise E(n) - def f(n): - a = A() - a.n = 10 - try: - g(n) # this call should not be inlined - except E as e: - a.n = e.n - return a.n - self.check(f, [int], [15], 10, expected_calls=1) - self.check(f, [int], [-15], -15, expected_calls=1) - - def test_raise_catch(self): - class A: - pass - class E(Exception): - def __init__(self, n): - self.n = n - def f(n): - a = A() - e1 = E(n) - try: - raise e1 - except E as e: - a.n = e.n - return a.n - self.check(f, [int], [15], 15) - - def test_raising_op(self): - class A: - pass - def f(n): - a = A() - a.n = n - try: - a.n = ovfcheck(a.n + 1) - except OverflowError: - return -42 - return a.n - self.check(f, [int], [19], 20) - self.check(f, [int], [sys.maxint], -42) - - def test_raises_through_spec_graph(self): - class A: - pass - def g(a): - if a.n < 0: - raise ValueError - def f(n): - a = A() - a.n = n - g(a) - return a.n - self.check(f, [int], [19], 19, - expected_calls=1) - self.check(f, [int], [-19], CHECK_RAISES("ValueError"), - expected_calls=1) - - def test_raises_through_inlining(self): - class A: - pass - def g(a): - a.n -= 1 - if a.n < 0: - raise ValueError - def f(n): - a = A() - a.n = n - g(a) - return a.n - self.check(f, [int], [19], 18) - self.check(f, [int], [-19], CHECK_RAISES("ValueError")) - - def test_call_raise_catch(self): - class A: - pass - def g(a): - a.n -= 1 - if a.n <= 0: - raise StopIteration - return a.n * 10 - def f(n): - a = A() - a.n = n - total = 0 - try: - while True: - total += g(a) - except StopIteration: - pass - return total - graph = self.check(f, [int], [11], 550, - expected_calls=0) # inlined - - def test_call_raise_catch_inspect(self): - class A: - pass - class E(Exception): - def __init__(self, n): - self.n = n - def g(a): - a.n -= 1 - if a.n < 0: - raise E(a.n * 10) - def f(n): - a = A() - a.n = n - try: - g(a) # this call should be inlined - except E as e: - a.n = e.n - return a.n - self.check(f, [int], [15], 14, expected_calls=0) - self.check(f, [int], [-15], -160, expected_calls=0) - - def test_fn2(self): - class T: - pass - def fn2(x, y): - t = T() - t.x = x - t.y = y - if x > 0: - return t.x + t.y - else: - return t.x - t.y - self.check(fn2, [int, int], [-6, 7], -13) - - def test_fn3(self): - def fn3(x): - a, ((b, c), d, e) = x+1, ((x+2, x+3), x+4, x+5) - return a+b+c+d+e - self.check(fn3, [int], [10], 65) - - def test_fn4(self): - class A: - pass - class B(A): - pass - def fn4(i): - a = A() - b = B() - a.b = b - b.i = i - return a.b.i - self.check(fn4, [int], [42], 42) - - def test_fn5(self): - class A: - attr = 666 - class B(A): - attr = 42 - def fn5(): - b = B() - return b.attr - self.check(fn5, [], [], 42) - - def test_aliasing(self): - class A: - pass - def fn6(n): - a1 = A() - a1.x = 5 - a2 = A() - a2.x = 6 - if n > 0: - a = a1 - else: - a = a2 - a.x = 12 - return a1.x - self.check(fn6, [int], [1], 12) - - def test_with__del__(self): - class A(object): - def __del__(self): - pass - def fn7(): - A() - self.check(fn7, [], [], None, expected_mallocs=1) # don't remove - - def test_call_to_allocating(self): - class A: - pass - def g(n): - a = A() - a.x = n - a.y = n + 1 - return a - def fn8(n): - a = g(n) - return a.x * a.y - self.check(fn8, [int], [6], 42, expected_calls=0) # inlined - - def test_many_calls_to_allocating(self): - class A: - pass - def g(n): - a = A() - a.x = n - return a - def h(n): - a = g(n) - a.y = n - return a - def i(n): - a = h(n) - a.y += 1 - return a - def fn9(n): - a = i(n) - return a.x * a.y - self.check(fn9, [int], [6], 42, expected_calls=0) # inlined - - def test_remove_for_in_range(self): - def fn10(n): - total = 0 - for i in range(n): - total += i - return total - self.check(fn10, [int], [10], 45) - - def test_recursion_spec(self): - class A: - pass - def make_chain(n): - a = A() - if n >= 0: - a.next = make_chain(n-1) - a.value = a.next.value + n - else: - a.value = 0 - return a - def fn11(n): - return make_chain(n).value - self.check(fn11, [int], [10], 55, - expected_calls=1) - - def test_recursion_inlining(self): - class A: - pass - def make_chain(a, n): - if n >= 0: - a.next = A() - make_chain(a.next, n-1) - a.value = a.next.value + n - else: - a.value = 0 - def fn12(n): - a = A() - make_chain(a, n) - return a.value - self.check(fn12, [int], [10], 55, - expected_mallocs=1, expected_calls=1) - - def test_constfold_exitswitch(self): - class A: - pass - def fn13(n): - a = A() - if lloperation.llop.same_as(lltype.Bool, True): - a.n = 4 - else: - a.n = -13 - return a.n - self.check(fn13, [int], [10], 4) - - def test_constfold_indirect_call(self): - F = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed)) - class A: - pass - def h1(n): - return n - 1 - def fn16(n): - a = A() - a.n = n - h = llhelper(F, h1) - h2 = lloperation.llop.same_as(F, h) - return h2(a.n) - self.check(fn16, [int], [10], 9, expected_calls=1) - - def test_bug_on_links_to_return(self): - class A: - pass - def h1(n): - return n - 1 - def h2(n): - return n - 2 - def g(a): - a.n += 1 - if a.n > 5: - return h1 - else: - return h2 - def fn15(n): - a = A() - a.n = n - m = g(a)(n) - return a.n * m - assert fn15(10) == 99 - self.check(fn15, [int], [10], 99) - - def test_preserve_annotations_on_graph(self): - class A: - pass - def fn14(n): - a = A() - a.n = n + 1 - return a.n - graph = self.check(fn14, [int], [10], 11) - annotator = self.translator.annotator - assert annotator.binding(graph.getargs()[0]).knowntype is int - assert annotator.binding(graph.getreturnvar()).knowntype is int - - def test_double_spec_order(self): - class A: - pass - def g(a1, a2): - return a1.x - a2.y - # - def fn17(): - a1 = A(); a2 = A() - a1.x = 5; a1.y = 6; a2.x = 7; a2.y = 8 - n1 = g(a1, a2) - # - a1 = A(); a2 = A() - a1.x = 50; a1.y = 60; a2.x = 70; a2.y = 80 - n2 = g(a2, a1) - # - return n1 * n2 - # - assert fn17() == -30 - self.check(fn17, [], [], -30, expected_calls=2) - extra_graphs = len(self.translator.graphs) - self.original_graph_count - assert extra_graphs <= 3 # g(Virtual, Runtime) - # g(Runtime, Virtual) - # g(Virtual, Virtual) - - - def test_getsubstruct(self): - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def fn(n1, n2): - b = lltype.malloc(BIG) - b.z = n1 - b.s.x = n2 - return b.z - b.s.x - - self.check(fn, [int, int], [100, 58], 42, - expected_mallocs=1) # no support for interior structs - - def test_fixedsizearray(self): - A = lltype.FixedSizeArray(lltype.Signed, 3) - S = lltype.GcStruct('S', ('a', A)) - - def fn(n1, n2): - s = lltype.malloc(S) - a = s.a - a[0] = n1 - a[2] = n2 - return a[0]-a[2] - - self.check(fn, [int, int], [100, 42], 58, - expected_mallocs=1) # no support for interior arrays - - def test_wrapper_cannot_be_removed(self): - SMALL = lltype.OpaqueType('SMALL') - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - - def g(small): - return -1 - def fn(): - b = lltype.malloc(BIG) - g(b.s) - - self.check(fn, [], [], None, - expected_mallocs=1, # no support for interior opaques - expected_calls=1) - - def test_direct_fieldptr(self): - py.test.skip("llptr support not really useful any more") - S = lltype.GcStruct('S', ('x', lltype.Signed)) - - def fn(): - s = lltype.malloc(S) - s.x = 11 - p = lltype.direct_fieldptr(s, 'x') - return p[0] - - self.check(fn, [], [], 11) - - def test_direct_fieldptr_2(self): - py.test.skip("llptr support not really useful any more") - T = lltype.GcStruct('T', ('z', lltype.Signed)) - S = lltype.GcStruct('S', ('t', T), - ('x', lltype.Signed), - ('y', lltype.Signed)) - def fn(): - s = lltype.malloc(S) - s.x = 10 - s.t.z = 1 - px = lltype.direct_fieldptr(s, 'x') - py = lltype.direct_fieldptr(s, 'y') - pz = lltype.direct_fieldptr(s.t, 'z') - py[0] = 31 - return px[0] + s.y + pz[0] - - self.check(fn, [], [], 42) - - def test_getarraysubstruct(self): - py.test.skip("llptr support not really useful any more") - U = lltype.Struct('U', ('n', lltype.Signed)) - for length in [1, 2]: - S = lltype.GcStruct('S', ('a', lltype.FixedSizeArray(U, length))) - for index in range(length): - - def fn(): - s = lltype.malloc(S) - s.a[index].n = 12 - return s.a[index].n - self.check(fn, [], [], 12) - - def test_ptr_nonzero(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - return lloperation.llop.ptr_nonzero(lltype.Bool, s) - self.check(fn, [], [], True, expected_mallocs=0) - - def test_ptr_iszero(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - return lloperation.llop.ptr_iszero(lltype.Bool, s) - self.check(fn, [], [], False, expected_mallocs=0) - - def test_ptr_eq_null_left(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - null = lltype.nullptr(S) - return lloperation.llop.ptr_eq(lltype.Bool, s, null) - self.check(fn, [], [], False, expected_mallocs=0) - - def test_ptr_ne_null_left(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - null = lltype.nullptr(S) - return lloperation.llop.ptr_ne(lltype.Bool, s, null) - self.check(fn, [], [], True, expected_mallocs=0) - - def test_ptr_eq_null_right(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - null = lltype.nullptr(S) - return lloperation.llop.ptr_eq(lltype.Bool, null, s) - self.check(fn, [], [], False, expected_mallocs=0) - - def test_ptr_ne_null_right(self): - S = lltype.GcStruct('S') - def fn(): - s = lltype.malloc(S) - null = lltype.nullptr(S) - return lloperation.llop.ptr_ne(lltype.Bool, null, s) - self.check(fn, [], [], True, expected_mallocs=0) - - def test_ptr_eq_same_struct(self): - S = lltype.GcStruct('S') - def fn(): - s1 = lltype.malloc(S) - return lloperation.llop.ptr_eq(lltype.Bool, s1, s1) - self.check(fn, [], [], True, expected_mallocs=0) - - def test_ptr_ne_same_struct(self): - S = lltype.GcStruct('S') - def fn(): - s1 = lltype.malloc(S) - return lloperation.llop.ptr_ne(lltype.Bool, s1, s1) - self.check(fn, [], [], False, expected_mallocs=0) - - def test_ptr_eq_diff_struct(self): - S = lltype.GcStruct('S') - def fn(): - s1 = lltype.malloc(S) - s2 = lltype.malloc(S) - return lloperation.llop.ptr_eq(lltype.Bool, s1, s2) - self.check(fn, [], [], False, expected_mallocs=0) - - def test_ptr_ne_diff_struct(self): - S = lltype.GcStruct('S') - def fn(): - s1 = lltype.malloc(S) - s2 = lltype.malloc(S) - return lloperation.llop.ptr_ne(lltype.Bool, s1, s2) - self.check(fn, [], [], True, expected_mallocs=0) - - def test_substruct_not_accessed(self): - py.test.skip("llptr support not really useful any more") - SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) - BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) - def fn(): - x = lltype.malloc(BIG) - while x.z < 10: # makes several blocks - x.z += 3 - return x.z - self.check(fn, [], [], 12) - - def test_union(self): - py.test.skip("llptr support not really useful any more") - UNION = lltype.Struct('UNION', ('a', lltype.Signed), ('b', lltype.Signed), - hints = {'union': True}) - BIG = lltype.GcStruct('BIG', ('u1', UNION), ('u2', UNION)) - def fn(): - x = lltype.malloc(BIG) - x.u1.a = 3 - x.u2.b = 6 - return x.u1.b * x.u2.a - self.check(fn, [], [], DONT_CHECK_RESULT) - - def test_nested_struct(self): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('s', S)) - def f(x): - t = lltype.malloc(T) - s = t.s - if x: - s.x = x - return t.s.x + s.x - graph = self.check(f, [int], [42], 2 * 42) - - def test_interior_ptr(self): - py.test.skip("llptr support not really useful any more") - S = lltype.Struct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('s', S)) - def f(x): - t = lltype.malloc(T) - t.s.x = x - return t.s.x - graph = self.check(f, [int], [42], 42) - - def test_interior_ptr_with_index(self): - py.test.skip("llptr support not really useful any more") - S = lltype.Struct("S", ('x', lltype.Signed)) - T = lltype.GcArray(S) - def f(x): - t = lltype.malloc(T, 1) - t[0].x = x - return t[0].x - graph = self.check(f, [int], [42], 42) - - def test_interior_ptr_with_field_and_index(self): - py.test.skip("llptr support not really useful any more") - S = lltype.Struct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('items', lltype.Array(S))) - def f(x): - t = lltype.malloc(T, 1) - t.items[0].x = x - return t.items[0].x - graph = self.check(f, [int], [42], 42) - - def test_interior_ptr_with_index_and_field(self): - py.test.skip("llptr support not really useful any more") - S = lltype.Struct("S", ('x', lltype.Signed)) - T = lltype.Struct("T", ('s', S)) - U = lltype.GcArray(T) - def f(x): - u = lltype.malloc(U, 1) - u[0].s.x = x - return u[0].s.x - graph = self.check(f, [int], [42], 42) - - def test_bogus_cast_pointer(self): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('s', S), ('y', lltype.Signed)) - def f(x): - s = lltype.malloc(S) - s.x = 123 - if x < 0: - t = lltype.cast_pointer(lltype.Ptr(T), s) - t.y += 1 - return s.x - graph = self.check(f, [int], [5], 123) From pypy.commits at gmail.com Sun Dec 18 10:04:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 07:04:29 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: start the stdlib-2.7.13 branch by merging with vendor/stdlib Message-ID: <5856a57d.4673c20a.fafa7.031f@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89146:6b4798f95435 Date: 2016-12-18 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/6b4798f95435/ Log: start the stdlib-2.7.13 branch by merging with vendor/stdlib diff too long, truncating to 2000 out of 21301 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Sun Dec 18 10:04:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 07:04:26 -0800 (PST) Subject: [pypy-commit] pypy vendor/stdlib: update the 2.7 stdlib to 2.7.13 Message-ID: <5856a57a.a285c20a.d98b9.0482@mx.google.com> Author: Armin Rigo Branch: vendor/stdlib Changeset: r89145:94ae9975cd0a Date: 2016-12-18 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/94ae9975cd0a/ Log: update the 2.7 stdlib to 2.7.13 diff too long, truncating to 2000 out of 21347 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -246,6 +247,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -76,7 +76,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -86,25 +88,28 @@ import re, tempfile, errno def _findLib_gcc(name): + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -116,13 +121,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -131,16 +140,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -151,23 +156,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -180,16 +192,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -223,11 +251,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -161,6 +161,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -347,7 +347,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -230,6 +230,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -132,7 +132,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -163,7 +163,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -228,7 +228,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -325,7 +325,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -28,7 +28,7 @@ #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Sun Dec 18 10:45:45 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 07:45:45 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Refactor includes Message-ID: <5856af29.a351c20a.48d77.1ade@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89147:1237824b7174 Date: 2016-12-18 15:31 +0000 http://bitbucket.org/pypy/pypy/changeset/1237824b7174/ Log: Refactor includes diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -686,6 +686,12 @@ self._Config = type('Config', (object,), {}) self._Config._compilation_info_ = CConfig._compilation_info_ self._TYPES = {} + self.includes = [] + + def include(self, other): + self.ctx.include(other.ctx) + self.structs.update(other.structs) + self.includes.append(other) def add_typedef(self, name, obj): assert name not in self.definitions @@ -757,12 +763,11 @@ def parse_source(source, includes=None): ctx = Parser() + src = ParsedSource(source, ctx) if includes is not None: for header in includes: - ctx.include(header.ctx) - + src.include(header) ctx.parse(source) - src = ParsedSource(source, ctx) for name, (obj, quals) in ctx._declarations.iteritems(): if obj in ctx._included_declarations: continue diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -47,13 +47,19 @@ #define PyObject_HEAD \ Py_ssize_t ob_refcnt; \ Py_ssize_t ob_pypy_link; \ + + typedef struct { + char *name; + } Type; """ hdr1 = parse_source(cdef1) cdef2 = """ typedef struct { PyObject_HEAD Py_ssize_t ob_foo; + Type *type; } Object; """ hdr2 = parse_source(cdef2, includes=[hdr1]) assert 'Object' in hdr2.definitions + assert 'Type' not in hdr2.definitions From pypy.commits at gmail.com Sun Dec 18 10:45:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 07:45:46 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Use parse_source() in floatobject.py Message-ID: <5856af2a.c4811c0a.16ab2.a6e0@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89148:f4d014183be5 Date: 2016-12-18 15:40 +0000 http://bitbucket.org/pypy/pypy/changeset/f4d014183be5/ Log: Use parse_source() in floatobject.py diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -1,18 +1,26 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function, - cpython_struct, +from pypy.module.cpyext.api import ( + bootstrap_function, object_h, CANNOT_FAIL, cpython_api, PyObject, build_type_checkers, CONST_STRING) from pypy.module.cpyext.pyobject import ( make_typedescr, track_reference, from_ref) -from pypy.interpreter.error import OperationError from rpython.rlib.rstruct import runpack from pypy.objspace.std.floatobject import W_FloatObject +from pypy.module.cpyext.cparser import parse_source -PyFloatObjectStruct = lltype.ForwardReference() + +cdef = """\ +typedef struct { + PyObject_HEAD + double ob_fval; +} PyFloatObject; +""" +float_h = parse_source(cdef, includes=[object_h]) +float_h.configure_types() + +PyFloatObjectStruct = float_h.definitions['PyFloatObject'] +assert not isinstance(PyFloatObjectStruct, lltype.ForwardReference) PyFloatObject = lltype.Ptr(PyFloatObjectStruct) -PyFloatObjectFields = PyObjectFields + \ - (("ob_fval", rffi.DOUBLE),) -cpython_struct("PyFloatObject", PyFloatObjectFields, PyFloatObjectStruct) @bootstrap_function def init_floatobject(space): @@ -83,4 +91,3 @@ return runpack.runpack("d", input) - diff --git a/pypy/module/cpyext/test/test_floatobject.py b/pypy/module/cpyext/test/test_floatobject.py --- a/pypy/module/cpyext/test/test_floatobject.py +++ b/pypy/module/cpyext/test/test_floatobject.py @@ -1,6 +1,8 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi +from pypy.module.cpyext.pyobject import create_ref +from pypy.module.cpyext.floatobject import PyFloatObject class TestFloatObject(BaseApiTest): def test_floatobject(self, space, api): @@ -32,6 +34,12 @@ with rffi.scoped_str2charp("@\t\x1e\xb8Q\xeb\x85\x1f") as ptr: assert abs(api._PyFloat_Unpack8(ptr, 0) - 3.14) < 1e-15 + def test_cast(self, space): + w_f = space.newfloat(1.5) + py_obj = create_ref(space, w_f) + py_float = rffi.cast(PyFloatObject, py_obj) + assert py_float._TYPE is PyFloatObject + class AppTestFloatObject(AppTestCpythonExtensionBase): def test_fromstring(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Sun Dec 18 11:00:02 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 18 Dec 2016 08:00:02 -0800 (PST) Subject: [pypy-commit] pypy default: merge better-PyDict_Next into default, which also provides a basic but non-functioning GetSetProperty-to-PyGetSetDescrObject Message-ID: <5856b282.ca57c20a.96102.1528@mx.google.com> Author: Matti Picus Branch: Changeset: r89149:1483fff7dd66 Date: 2016-12-18 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/1483fff7dd66/ Log: merge better-PyDict_Next into default, which also provides a basic but non-functioning GetSetProperty-to-PyGetSetDescrObject diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -602,7 +602,7 @@ GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyClassObject'''.split(): + PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -31,7 +31,7 @@ dealloc=buffer_dealloc, realize=buffer_realize) -def buffer_attach(space, py_obj, w_obj): +def buffer_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyBufferObject with the given (str) buffer object. """ diff --git a/pypy/module/cpyext/bytearrayobject.py b/pypy/module/cpyext/bytearrayobject.py --- a/pypy/module/cpyext/bytearrayobject.py +++ b/pypy/module/cpyext/bytearrayobject.py @@ -7,7 +7,7 @@ PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, make_typedescr, get_typedescr, Py_IncRef) # Type PyByteArrayObject represents a mutable array of bytes. # The Python API is that of a sequence; diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -73,7 +73,7 @@ py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str -def bytes_attach(space, py_obj, w_obj): +def bytes_attach(space, py_obj, w_obj, w_userdata=None): """ Copy RPython string object contents to a PyBytesObject. The c_ob_sval must not be modified. diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -29,7 +29,7 @@ attach=complex_attach, realize=complex_realize) -def complex_attach(space, py_obj, w_obj): +def complex_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyComplexObject with the given complex object. The value must not be modified. diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -1,11 +1,66 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.objectmodel import specialize +from pypy.interpreter.error import OperationError +from pypy.objspace.std.classdict import ClassDictStrategy +from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, - Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_pyobj + Py_ssize_tP, CONST_STRING, PyObjectFields, cpython_struct, + bootstrap_function) +from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, as_pyobj, + make_typedescr, track_reference, create_ref, from_ref, decref, + Py_IncRef) +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import specialize + +PyDictObjectStruct = lltype.ForwardReference() +PyDictObject = lltype.Ptr(PyDictObjectStruct) +PyDictObjectFields = PyObjectFields + \ + (("ob_keys", PyObject),) +cpython_struct("PyDictObject", PyDictObjectFields, PyDictObjectStruct) + + at bootstrap_function +def init_dictobject(space): + "Type description of PyDictObject" + make_typedescr(space.w_dict.layout.typedef, + basestruct=PyDictObject.TO, + attach=dict_attach, + dealloc=dict_dealloc, + realize=dict_realize) + +def dict_attach(space, py_obj, w_obj, w_userdata=None): + """ + Fills a newly allocated PyDictObject with the given dict object. + """ + py_dict = rffi.cast(PyDictObject, py_obj) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + # Problems: if this dict is a typedict, we may have unbound GetSetProperty + # functions in the dict. The corresponding PyGetSetDescrObject must be + # bound to a class, but the actual w_type will be unavailable later on. + # Solution: use the w_userdata argument when assigning a PyTypeObject's + # tp_dict slot to pass a w_type in, and force creation of the pair here + if not space.is_w(w_userdata, space.gettypefor(GetSetProperty)): + # do not do this for type dict of GetSetProperty, that would recurse + w_vals = space.call_method(space.w_dict, "values", w_obj) + vals = space.listview(w_vals) + for w_v in vals: + if isinstance(w_v, GetSetProperty): + pyobj = as_pyobj(space, w_v, w_userdata) + # refcnt will be REFCNT_FROM_PYPY, no need to inc or dec + +def dict_realize(space, py_obj): + """ + Creates the dict in the interpreter + """ + w_obj = space.newdict() + track_reference(space, py_obj, w_obj) + + at cpython_api([PyObject], lltype.Void, header=None) +def dict_dealloc(space, py_obj): + py_dict = rffi.cast(PyDictObject, py_obj) + decref(space, py_dict.c_ob_keys) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + _dealloc(space, py_obj) @cpython_api([], PyObject) def PyDict_New(space): @@ -181,9 +236,9 @@ } The dictionary p should not be mutated during iteration. It is safe - (since Python 2.1) to modify the values of the keys as you iterate over the - dictionary, but only so long as the set of keys does not change. For - example: + (since Python 2.1) to modify the values but not the keys as you iterate + over the dictionary, the keys must not change. + For example: PyObject *key, *value; Py_ssize_t pos = 0; @@ -199,34 +254,32 @@ } Py_DECREF(o); }""" + if w_dict is None: return 0 - # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably - # work, but we can't work out how to not leak it if iteration does - # not complete. Alternatively, we could add some RPython-only - # dict-iterator method to move forward by N steps. - - w_dict.ensure_object_strategy() # make sure both keys and values can - # be borrwed - try: - w_iter = space.call_method(space.w_dict, "iteritems", w_dict) - pos = ppos[0] - while pos: - space.call_method(w_iter, "next") - pos -= 1 - - w_item = space.call_method(w_iter, "next") - w_key, w_value = space.fixedview(w_item, 2) - if pkey: - pkey[0] = as_pyobj(space, w_key) - if pvalue: - pvalue[0] = as_pyobj(space, w_value) - ppos[0] += 1 - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise + pos = ppos[0] + py_obj = as_pyobj(space, w_dict) + py_dict = rffi.cast(PyDictObject, py_obj) + if pos == 0: + # Store the current keys in the PyDictObject. + decref(space, py_dict.c_ob_keys) + w_keys = space.call_method(space.w_dict, "keys", w_dict) + py_dict.c_ob_keys = create_ref(space, w_keys) + Py_IncRef(space, py_dict.c_ob_keys) + else: + w_keys = from_ref(space, py_dict.c_ob_keys) + ppos[0] += 1 + if pos >= space.len_w(w_keys): + decref(space, py_dict.c_ob_keys) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) return 0 + w_key = space.listview(w_keys)[pos] + w_value = space.getitem(w_dict, w_key) + if pkey: + pkey[0] = as_pyobj(space, w_key) + if pvalue: + pvalue[0] = as_pyobj(space, w_value) return 1 @specialize.memo() diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -22,7 +22,7 @@ attach=float_attach, realize=float_realize) -def float_attach(space, py_obj, w_obj): +def float_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyFloatObject with the given float object. The value must not be modified. diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -30,7 +30,7 @@ dealloc=frame_dealloc, realize=frame_realize) -def frame_attach(space, py_obj, w_obj): +def frame_attach(space, py_obj, w_obj, w_userdata=None): "Fills a newly allocated PyFrameObject with a frame object" frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -51,7 +51,7 @@ PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) -def function_attach(space, py_obj, w_obj): +def function_attach(space, py_obj, w_obj, w_userdata=None): py_func = rffi.cast(PyFunctionObject, py_obj) assert isinstance(w_obj, Function) py_func.c_func_name = make_ref(space, space.wrap(w_obj.name)) @@ -63,7 +63,7 @@ from pypy.module.cpyext.object import _dealloc _dealloc(space, py_obj) -def code_attach(space, py_obj, w_obj): +def code_attach(space, py_obj, w_obj, w_userdata=None): py_code = rffi.cast(PyCodeObject, py_obj) assert isinstance(w_obj, PyCode) py_code.c_co_name = make_ref(space, space.wrap(w_obj.co_name)) diff --git a/pypy/module/cpyext/include/dictobject.h b/pypy/module/cpyext/include/dictobject.h --- a/pypy/module/cpyext/include/dictobject.h +++ b/pypy/module/cpyext/include/dictobject.h @@ -7,6 +7,10 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *ob_keys; /* a private place to put keys during PyDict_Next */ +} PyDictObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -24,7 +24,7 @@ attach=int_attach, realize=int_realize) -def int_attach(space, py_obj, w_obj): +def int_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyIntObject with the given int object. The value must not be modified. diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -44,7 +44,7 @@ attach=cfunction_attach, dealloc=cfunction_dealloc) -def cfunction_attach(space, py_obj, w_obj): +def cfunction_attach(space, py_obj, w_obj, w_userdata=None): assert isinstance(w_obj, W_PyCFunctionObject) py_func = rffi.cast(PyCFunctionObject, py_obj) py_func.c_m_ml = w_obj.ml diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -61,7 +61,7 @@ pyobj.c_ob_type = pytype return pyobj - def attach(self, space, pyobj, w_obj): + def attach(self, space, pyobj, w_obj, w_userdata=None): pass def realize(self, space, obj): @@ -111,8 +111,8 @@ return tp_dealloc.api_func if tp_attach: - def attach(self, space, pyobj, w_obj): - tp_attach(space, pyobj, w_obj) + def attach(self, space, pyobj, w_obj, w_userdata=None): + tp_attach(space, pyobj, w_obj, w_userdata) if tp_realize: def realize(self, space, ref): @@ -152,7 +152,7 @@ class InvalidPointerException(Exception): pass -def create_ref(space, w_obj): +def create_ref(space, w_obj, w_userdata=None): """ Allocates a PyObject, and fills its fields with info from the given interpreter object. @@ -173,7 +173,7 @@ assert py_obj.c_ob_refcnt > rawrefcount.REFCNT_FROM_PYPY py_obj.c_ob_refcnt -= 1 # - typedescr.attach(space, py_obj, w_obj) + typedescr.attach(space, py_obj, w_obj, w_userdata) return py_obj def track_reference(space, py_obj, w_obj): @@ -228,7 +228,7 @@ assert isinstance(w_type, W_TypeObject) return get_typedescr(w_type.layout.typedef).realize(space, ref) -def as_pyobj(space, w_obj): +def as_pyobj(space, w_obj, w_userdata=None): """ Returns a 'PyObject *' representing the given intepreter object. This doesn't give a new reference, but the returned 'PyObject *' @@ -240,7 +240,7 @@ assert not is_pyobj(w_obj) py_obj = rawrefcount.from_obj(PyObject, w_obj) if not py_obj: - py_obj = create_ref(space, w_obj) + py_obj = create_ref(space, w_obj, w_userdata) return py_obj else: return lltype.nullptr(PyObject.TO) @@ -269,14 +269,14 @@ return hop.inputconst(lltype.Bool, hop.s_result.const) @specialize.ll() -def make_ref(space, obj): +def make_ref(space, obj, w_userdata=None): """Increment the reference counter of the PyObject and return it. Can be called with either a PyObject or a W_Root. """ if is_pyobj(obj): pyobj = rffi.cast(PyObject, obj) else: - pyobj = as_pyobj(space, obj) + pyobj = as_pyobj(space, obj, w_userdata) if pyobj: assert pyobj.c_ob_refcnt > 0 pyobj.c_ob_refcnt += 1 diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -28,7 +28,7 @@ dealloc=traceback_dealloc) -def traceback_attach(space, py_obj, w_obj): +def traceback_attach(space, py_obj, w_obj, w_userdata=None): py_traceback = rffi.cast(PyTracebackObject, py_obj) traceback = space.interp_w(PyTraceback, w_obj) if traceback.next is None: diff --git a/pypy/module/cpyext/sliceobject.py b/pypy/module/cpyext/sliceobject.py --- a/pypy/module/cpyext/sliceobject.py +++ b/pypy/module/cpyext/sliceobject.py @@ -25,7 +25,7 @@ attach=slice_attach, dealloc=slice_dealloc) -def slice_attach(space, py_obj, w_obj): +def slice_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PySliceObject with the given slice object. The fields must not be modified. diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP +from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP, PyTypeObjectPtr from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -181,6 +181,27 @@ raises(OperationError, space.call_method, w_proxy, 'clear') assert api.PyDictProxy_Check(w_proxy) + def test_typedict1(self, space, api): + py_type = make_ref(space, space.w_int) + py_dict = rffi.cast(PyTypeObjectPtr, py_type).c_tp_dict + ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + + ppos[0] = 0 + pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + try: + w_copy = space.newdict() + while api.PyDict_Next(py_dict, ppos, pkey, pvalue): + w_key = from_ref(space, pkey[0]) + w_value = from_ref(space, pvalue[0]) + space.setitem(w_copy, w_key, w_value) + finally: + lltype.free(ppos, flavor='raw') + lltype.free(pkey, flavor='raw') + lltype.free(pvalue, flavor='raw') + api.Py_DecRef(py_type) # release borrowed references + # do something with w_copy ? + class AppTestDictObject(AppTestCpythonExtensionBase): def test_dictproxytype(self): module = self.import_extension('foo', [ @@ -225,3 +246,16 @@ d = {"a": 1} raises(AttributeError, module.update, d, [("c", 2)]) + def test_typedict2(self): + module = self.import_extension('foo', [ + ("get_type_dict", "METH_O", + ''' + PyObject* value = args->ob_type->tp_dict; + if (value == NULL) value = Py_None; + Py_INCREF(value); + return value; + '''), + ]) + d = module.get_type_dict(1) + assert d['real'].__get__(1, 1) == 1 + diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -63,7 +63,7 @@ p[i] = lltype.nullptr(PyObject.TO) return py_obj -def tuple_attach(space, py_obj, w_obj): +def tuple_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyTupleObject with the given tuple object. The buffer must not be modified. diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -32,7 +32,7 @@ from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( - PyGetSetDef, PyMemberDef, newfunc, + PyGetSetDef, PyMemberDef, newfunc, getter, setter, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.objspace.std.typeobject import W_TypeObject, find_best_base @@ -61,6 +61,7 @@ self.w_type = w_type doc = set = get = None if doc: + # XXX dead code? doc = rffi.charp2str(getset.c_doc) if getset.c_get: get = GettersAndSetters.getter.im_func @@ -73,6 +74,21 @@ def PyDescr_NewGetSet(space, getset, w_type): return space.wrap(W_GetSetPropertyEx(getset, w_type)) +def make_GetSet(space, getsetprop): + py_getsetdef = lltype.malloc(PyGetSetDef, flavor='raw') + doc = getsetprop.doc + if doc: + py_getsetdef.c_doc = rffi.str2charp(doc) + else: + py_getsetdef.c_doc = rffi.cast(rffi.CCHARP, 0) + py_getsetdef.c_name = rffi.str2charp(getsetprop.getname(space)) + # XXX FIXME - actually assign these !!! + py_getsetdef.c_get = rffi.cast(getter, 0) + py_getsetdef.c_set = rffi.cast(setter, 0) + py_getsetdef.c_closure = rffi.cast(rffi.VOIDP, 0) + return py_getsetdef + + class W_MemberDescr(GetSetProperty): name = 'member_descriptor' def __init__(self, member, w_type): @@ -158,7 +174,7 @@ realize=methoddescr_realize, ) -def memberdescr_attach(space, py_obj, w_obj): +def memberdescr_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyMemberDescrObject with the given W_MemberDescr object. The values must not be modified. @@ -177,17 +193,21 @@ track_reference(space, obj, w_obj) return w_obj -def getsetdescr_attach(space, py_obj, w_obj): +def getsetdescr_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyGetSetDescrObject with the given W_GetSetPropertyEx object. The values must not be modified. """ py_getsetdescr = rffi.cast(PyGetSetDescrObject, py_obj) + if isinstance(w_obj, GetSetProperty): + py_getsetdef = make_GetSet(space, w_obj) + assert space.isinstance_w(w_userdata, space.w_type) + w_obj = W_GetSetPropertyEx(py_getsetdef, w_userdata) # XXX assign to d_dname, d_type? assert isinstance(w_obj, W_GetSetPropertyEx) py_getsetdescr.c_d_getset = w_obj.getset -def methoddescr_attach(space, py_obj, w_obj): +def methoddescr_attach(space, py_obj, w_obj, w_userdata=None): py_methoddescr = rffi.cast(PyMethodDescrObject, py_obj) # XXX assign to d_dname, d_type? assert isinstance(w_obj, W_PyCFunctionObject) @@ -663,7 +683,7 @@ return rffi.cast(PyObject, heaptype) -def type_attach(space, py_obj, w_type): +def type_attach(space, py_obj, w_type, w_userdata=None): """ Fills a newly allocated PyTypeObject from an existing type. """ @@ -890,7 +910,9 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) - pto.c_tp_dict = make_ref(space, w_dict) + # pass in the w_obj to convert any values that are + # unbound GetSetProperty into bound PyGetSetDescrObject + pto.c_tp_dict = make_ref(space, w_dict, w_obj) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) def PyType_IsSubtype(space, a, b): diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -62,7 +62,7 @@ py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni -def unicode_attach(space, py_obj, w_obj): +def unicode_attach(space, py_obj, w_obj, w_userdata=None): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_length = len(space.unicode_w(w_obj)) From pypy.commits at gmail.com Sun Dec 18 11:00:04 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 18 Dec 2016 08:00:04 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <5856b284.2972c20a.d3693.3754@mx.google.com> Author: Matti Picus Branch: Changeset: r89150:c1f9b348dbb3 Date: 2016-12-18 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/c1f9b348dbb3/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -56,3 +56,11 @@ The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic template instantations, and improved integration with CFFI for better performance. It also provides interactive C++ (and bindings to that). + +.. branch: better-PyDict_Next + +Improve the performance of ``PyDict_Next``. When trying ``PyDict_Next`` on a +typedef dict, the test exposed a problem converting a ``GetSetProperty`` to a +``PyGetSetDescrObject``. The other direction seem to be fully implemented. +This branch made a minimal effort to convert the basic fields to avoid +segfaults, but trying to use the ``PyGetSetDescrObject`` will probably fail. From pypy.commits at gmail.com Sun Dec 18 11:52:40 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 08:52:40 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: copy this file from 2.7.13 Message-ID: <5856bed8.4438c20a.b4b80.3560@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89151:52963ea7f4bf Date: 2016-12-18 17:36 +0100 http://bitbucket.org/pypy/pypy/changeset/52963ea7f4bf/ Log: copy this file from 2.7.13 diff --git a/lib_pypy/_ctypes_test.c b/lib_pypy/_ctypes_test.c --- a/lib_pypy/_ctypes_test.c +++ b/lib_pypy/_ctypes_test.c @@ -1,13 +1,9 @@ /* This is a Verbatim copy of _ctypes_test.c from CPython 2.7 */ -/***************************************************************** - This file should be kept compatible with Python 2.3, see PEP 291. - *****************************************************************/ - #include /* - Backwards compatibility: + Backwards compatibility, no longer strictly required: Python2.2 used LONG_LONG instead of PY_LONG_LONG */ #if defined(HAVE_LONG_LONG) && !defined(PY_LONG_LONG) @@ -40,6 +36,24 @@ return func(a*a, b*b, c*c, d*d, e*e); } +/* + * This structure should be the same as in test_callbacks.py and the + * method test_callback_large_struct. See issues 17310 and 20160: the + * structure must be larger than 8 bytes long. + */ + +typedef struct { + unsigned long first; + unsigned long second; + unsigned long third; +} Test; + +EXPORT(void) +_testfunc_cbk_large_struct(Test in, void (*func)(Test)) +{ + func(in); +} + EXPORT(void)testfunc_array(int values[4]) { printf("testfunc_array %d %d %d %d\n", From pypy.commits at gmail.com Sun Dec 18 11:52:42 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 08:52:42 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Fix for test_abstract in ctypes/test/test_frombuffer Message-ID: <5856beda.c9b3c20a.bbe06.3917@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89152:a7b53ded27c6 Date: 2016-12-18 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/a7b53ded27c6/ Log: Fix for test_abstract in ctypes/test/test_frombuffer diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -67,6 +67,8 @@ from_address = cdata_from_address def _sizeofinstances(self): + if self._ffiarray is None: + raise TypeError("abstract class") size, alignment = self._ffiarray.size_alignment(self._length_) return size diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -229,8 +229,10 @@ __metaclass__ = StructOrUnionMeta def __new__(cls, *args, **kwds): + from _ctypes import union self = super(_CData, cls).__new__(cls) - if '_abstract_' in cls.__dict__: + if ('_abstract_' in cls.__dict__ or cls is Structure + or cls is union.Union): raise TypeError("abstract class") if hasattr(cls, '_ffistruct_'): self.__dict__['_buffer'] = self._ffistruct_(autofree=True) From pypy.commits at gmail.com Sun Dec 18 12:18:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 09:18:43 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Use object_h definitions in methodobject.py Message-ID: <5856c4f3.e2acc20a.4f0e7.2f78@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89153:e7350a8db95b Date: 2016-12-18 17:17 +0000 http://bitbucket.org/pypy/pypy/changeset/e7350a8db95b/ Log: Use object_h definitions in methodobject.py diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -293,7 +293,11 @@ argtypesw = zip(self.argtypes, [_name.startswith("w_") for _name in self.argnames]) error_value = getattr(self, "error_value", CANNOT_FAIL) - if (isinstance(self.restype, lltype.Ptr) + if isinstance(self.restype, lltype.Typedef): + real_restype = self.restype.OF + else: + real_restype = self.restype + if (isinstance(real_restype, lltype.Ptr) and error_value is not CANNOT_FAIL): assert lltype.typeOf(error_value) == self.restype assert not error_value # only support error=NULL diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -11,22 +11,14 @@ CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, build_type_checkers, cpython_api, cpython_struct, generic_cpy_call, - PyTypeObjectPtr) + PyTypeObjectPtr, object_h) from pypy.module.cpyext.pyobject import ( Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr) PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') -PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) -PyCFunctionKwArgs = lltype.Ptr(lltype.FuncType([PyObject, PyObject, PyObject], - PyObject)) - -PyMethodDef = cpython_struct( - 'PyMethodDef', - [('ml_name', rffi.CONST_CCHARP), - ('ml_meth', PyCFunction_typedef), - ('ml_flags', rffi.INT_real), - ('ml_doc', rffi.CONST_CCHARP), - ]) +PyCFunction = object_h.definitions['PyCFunction'] +PyCFunctionKwArgs = object_h.definitions['PyCFunctionWithKeywords'] +PyMethodDef = object_h.definitions['PyMethodDef'] PyCFunctionObjectStruct = cpython_struct( 'PyCFunctionObject', From pypy.commits at gmail.com Sun Dec 18 12:29:23 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 09:29:23 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Update to 2.7.13's handling of ``'%d' % x``, which gives completely Message-ID: <5856c773.aaa3c20a.75e9a.3e12@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89155:fc358e88d939 Date: 2016-12-18 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/fc358e88d939/ Log: Update to 2.7.13's handling of ``'%d' % x``, which gives completely different results if x is a subclass of 'long' or a subclass of 'int' diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -547,11 +547,13 @@ # make sure that w_value is a wrapped float return space.float(w_value) -def format_num_helper_generator(fmt, digits): +def format_num_helper_generator(fmt, digits, method, remove_prefix=''): def format_num_helper(space, w_value): - try: + if (not space.isinstance_w(w_value, space.w_int) and + not space.isinstance_w(w_value, space.w_long)): + try: w_value = maybe_int(space, w_value) - except OperationError: + except OperationError: try: w_value = space.long(w_value) except OperationError as operr: @@ -561,17 +563,27 @@ "%s format: a number is required, not %T", fmt, w_value) else: raise - try: + if space.isinstance_w(w_value, space.w_long): + text = space.str_w(space.call_method(w_value, method)) + skip_left = 0 + skip_right = len(text) + if remove_prefix: + if not text.startswith(remove_prefix): + raise oefmt(space.w_ValueError, + "%s format: invalid result of %s (type=%T)", + fmt, method, w_value) + skip_left = len(remove_prefix) + if text.endswith('L'): + skip_right = len(text) - 1 + assert skip_right >= 0 + return text[skip_left : skip_right] + else: value = space.int_w(w_value) return fmt % (value,) - except OperationError as operr: - if not operr.match(space, space.w_OverflowError): - raise - num = space.bigint_w(w_value) - return num.format(digits) return func_with_new_name(format_num_helper, 'base%d_num_helper' % len(digits)) -int_num_helper = format_num_helper_generator('%d', '0123456789') -oct_num_helper = format_num_helper_generator('%o', '01234567') -hex_num_helper = format_num_helper_generator('%x', '0123456789abcdef') +int_num_helper = format_num_helper_generator('%d', '0123456789', '__str__') +oct_num_helper = format_num_helper_generator('%o', '01234567', '__oct__', '0') +hex_num_helper = format_num_helper_generator('%x', '0123456789abcdef', + '__hex__', '0x') diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -137,6 +137,57 @@ sl = SubLong(l) assert '%d' % sl == '4800000000' + def test_format_subclass_with_str(self): + import sys + if sys.version_info < (2, 7, 13): + skip("CPython gives SystemError before 2.7.13") + #...and behaves inconsistently in 2.7.13, but we reproduce that + + class SubInt2(int): + def __str__(self): + assert False, "not called" + def __hex__(self): + assert False, "not called" + def __oct__(self): + assert False, "not called" + def __int__(self): + assert False, "not called" + def __long__(self): + assert False, "not called" + sl = SubInt2(123) + assert '%i' % sl == '123' + assert '%u' % sl == '123' + assert '%d' % sl == '123' + assert '%x' % sl == '7b' + assert '%X' % sl == '7B' + assert '%o' % sl == '173' + + class SubLong2(long): + def __str__(self): + return 'Xx' + def __hex__(self): + return extra_stuff + '0xYy' + extra_tail + def __oct__(self): + return extra_stuff + '0Zz' + extra_tail + def __int__(self): + assert False, "not called" + def __long__(self): + assert False, "not called" + sl = SubLong2(123) + extra_stuff = '' + for extra_tail in ['', 'l', 'L']: + x = '%i' % sl + assert x == 'Xx' + assert '%u' % sl == 'Xx' + assert '%d' % sl == 'Xx' + assert '%x' % sl == ('Yyl' if extra_tail == 'l' else 'Yy') + assert '%X' % sl == ('YYL' if extra_tail == 'l' else 'YY') + assert '%o' % sl == ('Zzl' if extra_tail == 'l' else 'Zz') + extra_stuff = '??' + raises(ValueError, "'%x' % sl") + raises(ValueError, "'%X' % sl") + raises(ValueError, "'%o' % sl") + def test_format_list(self): l = [1,2] assert '<[1, 2]>' == '<%s>' % l @@ -202,7 +253,8 @@ def __long__(self): return 0L - assert "%x" % IntFails() == '0' + x = "%x" % IntFails() + assert x == '0' def test_formatting_huge_precision(self): prec = 2**31 From pypy.commits at gmail.com Sun Dec 18 12:29:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 09:29:21 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: update version Message-ID: <5856c771.d5091c0a.6942f.d25e@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89154:3570b561f443 Date: 2016-12-18 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/3570b561f443/ Log: update version diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,7 +21,7 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 12 +#define PY_MICRO_VERSION 13 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 12, "final", 42) +CPYTHON_VERSION = (2, 7, 13, "final", 42) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From pypy.commits at gmail.com Sun Dec 18 12:40:46 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 09:40:46 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Check early that we get a list of strings Message-ID: <5856ca1e.ce181c0a.bb41f.d84f@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89156:b558a727875c Date: 2016-12-18 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/b558a727875c/ Log: Check early that we get a list of strings diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -350,8 +350,13 @@ # bit artificial code but important to not just unwrap w_fromlist # to get a better trace. if it is unwrapped, the immutability of the # tuple is lost + length = space.len_w(w_fromlist) + for i in range(length): + w_name = space.getitem(w_fromlist, space.wrap(i)) + if not space.isinstance_w(w_name, space.w_str): + raise oefmt(space.w_TypeError, + "'fromlist' must be a list of strings, got %T", w_name) if w_path is not None: - length = space.len_w(w_fromlist) if length == 1 and space.eq_w( space.getitem(w_fromlist, space.wrap(0)), space.wrap('*')): diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -351,6 +351,9 @@ o = __import__('sys', [], [], ['']) # CPython accepts this assert sys == o + def test_import_fromlist_must_not_contain_unicodes(self): + raises(TypeError, __import__, 'encodings', None, None, [u'xxx']) + def test_import_relative_back_to_absolute2(self): from pkg import abs_x_y import sys From pypy.commits at gmail.com Sun Dec 18 12:56:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 09:56:00 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: support resizing anonymous mmaps (2.7.12 and pypy previously get a EBADF Message-ID: <5856cdb0.0209c20a.3e8d9.4dfe@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89157:8e961f254b34 Date: 2016-12-18 18:55 +0100 http://bitbucket.org/pypy/pypy/changeset/8e961f254b34/ Log: support resizing anonymous mmaps (2.7.12 and pypy previously get a EBADF in this case) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -872,3 +872,25 @@ assert str(e) == "cannot mmap an empty file" except BaseException as e: assert False, "unexpected exception: " + str(e) + + def test_resize_past_pos(self): + import os, mmap, sys + if os.name == "nt": + skip("cannot resize anonymous mmaps on Windows") + if sys.version_info < (2, 7, 13): + skip("cannot resize anonymous mmaps before 2.7.13") + m = mmap.mmap(-1, 8192) + m.read(5000) + try: + m.resize(4096) + except SystemError: + skip("resizing not supported") + assert m.tell() == 5000 + assert m.read(14) == '' + assert m.read(-1) == '' + raises(ValueError, m.read_byte) + assert m.readline() == '' + raises(ValueError, m.write_byte, 'b') + raises(ValueError, m.write, 'abc') + assert m.tell() == 5000 + m.close() diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -507,6 +507,8 @@ return rffi.ptradd(self.data, offset) def getslice(self, start, length): + if length < 0: + return '' return rffi.charpsize2str(self.getptr(start), length) def setslice(self, start, newdata): @@ -549,8 +551,9 @@ if not has_mremap: raise RValueError("mmap: resizing not available--no mremap()") - # resize the underlying file first - os.ftruncate(self.fd, self.offset + newsize) + # resize the underlying file first, if there is one + if self.fd >= 0: + os.ftruncate(self.fd, self.offset + newsize) # now resize the mmap newdata = c_mremap(self.getptr(0), self.size, newsize, From pypy.commits at gmail.com Sun Dec 18 13:10:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 10:10:50 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: update sqlite3 Message-ID: <5856d12a.876ec20a.e50e2.4522@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89158:79cf94d1d4ec Date: 2016-12-18 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/79cf94d1d4ec/ Log: update sqlite3 diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -368,12 +368,16 @@ def __call__(self, sql): return self._statement_cache.get(sql) - def cursor(self, factory=None): + def _default_cursor_factory(self): + return Cursor(self) + + def cursor(self, factory=_default_cursor_factory): self._check_thread() self._check_closed() - if factory is None: - factory = Cursor cur = factory(self) + if not issubclass(type(cur), Cursor): + raise TypeError("factory must return a cursor, not %s" + % (type(cur).__name__,)) if self.row_factory is not None: cur.row_factory = self.row_factory return cur @@ -414,7 +418,8 @@ if not self._in_transaction: return - self.__do_all_statements(Statement._reset, False) + # the following line is removed for compatibility with 2.7.13: + # self.__do_all_statements(Statement._reset, False) statement_star = _ffi.new('sqlite3_stmt **') ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1, @@ -546,7 +551,7 @@ @_check_thread_wrap @_check_closed_wrap def create_collation(self, name, callback): - name = name.upper() + name = str.upper(name) if not all(c in string.ascii_uppercase + string.digits + '_' for c in name): raise ProgrammingError("invalid character in collation name") From pypy.commits at gmail.com Sun Dec 18 13:19:41 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 10:19:41 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Allow 'sys.maxint' as max_length in calls to zlib.decompressobj().decompress() Message-ID: <5856d33d.c9b3c20a.bbe06.5b73@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89159:036509cd4445 Date: 2016-12-18 19:19 +0100 http://bitbucket.org/pypy/pypy/changeset/036509cd4445/ Log: Allow 'sys.maxint' as max_length in calls to zlib.decompressobj().decompress() diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -278,7 +278,7 @@ else: self.unconsumed_tail = tail - @unwrap_spec(data='bufferstr', max_length="c_int") + @unwrap_spec(data='bufferstr', max_length=int) def decompress(self, space, data, max_length=0): """ decompress(data[, max_length]) -- Return a string containing the diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -230,6 +230,14 @@ data = d.unconsumed_tail assert not data + def test_max_length_large(self): + import sys + if sys.version_info < (2, 7, 13): + skip("passing a potentially 64-bit int as max_length is not " + "supported before 2.7.13") + d = self.zlib.decompressobj() + assert d.decompress(self.compressed, sys.maxsize) == self.expanded + def test_buffer(self): """ We should be able to pass buffer objects instead of strings. From pypy.commits at gmail.com Sun Dec 18 13:43:59 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 10:43:59 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: ssl fix for test_parse_all_sans Message-ID: <5856d8ef.c6bdc20a.e2a27.5050@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89160:642126fc25ea Date: 2016-12-18 19:43 +0100 http://bitbucket.org/pypy/pypy/changeset/642126fc25ea/ Log: ssl fix for test_parse_all_sans diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -897,10 +897,24 @@ length = libssl_ASN1_STRING_length(as_) w_t = space.newtuple([ v, space.wrap(rffi.charpsize2str(buf, length))]) + elif gntype == GEN_RID: + with lltype.scoped_alloc(rffi.CCHARP.TO, 2048) as buf: + d_rid = libssl_pypy_GENERAL_NAME_rid(name) + length = libssl_i2t_ASN1_OBJECT(buf, 2047, d_rid) + if length < 0: + raise _ssl_seterror(space, None, 0) + elif length >= 2048: + v = "" + else: + v = rffi.charpsize2str(buf, length) + w_t = space.newtuple([ + space.wrap("Registered ID"), + space.call_function(space.w_unicode, + space.wrap(v))]) else: # for everything else, we use the OpenSSL print form if gntype not in (GEN_OTHERNAME, GEN_X400, GEN_EDIPARTY, - GEN_IPADD, GEN_RID): + GEN_IPADD): space.warn(space.wrap("Unknown general name type"), space.w_RuntimeWarning) libssl_BIO_reset(biobuf) @@ -911,6 +925,9 @@ raise _ssl_seterror(space, None, 0) v = rffi.charpsize2str(buf, length) + if ':' not in v: + raise oefmt(space.w_ValueError, + "Invalid value %s", v) v1, v2 = v.split(':', 1) w_t = space.newtuple([space.wrap(v1), space.wrap(v2)]) diff --git a/lib-python/2.7/test/allsans.pem b/pypy/module/_ssl/test/allsans.pem copy from lib-python/2.7/test/allsans.pem copy to pypy/module/_ssl/test/allsans.pem diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -8,6 +8,8 @@ def setup_class(cls): cls.w_nullbytecert = cls.space.wrap(os.path.join( os.path.dirname(__file__), 'nullbytecert.pem')) + cls.w_allsans = cls.space.wrap(os.path.join( + os.path.dirname(__file__), 'allsans.pem')) def test_init_module(self): import _ssl @@ -121,6 +123,10 @@ ('IP Address', '192.0.2.1'), ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) + def test_decode_all_sans(self): + import _ssl + _ssl._test_decode_cert(self.allsans) + def test_context(self): import _ssl s = _ssl._SSLContext(_ssl.PROTOCOL_TLS) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -55,6 +55,7 @@ # Unnamed structures are not supported by rffi_platform. # So we replace an attribute access with a macro call. '#define pypy_GENERAL_NAME_dirn(name) (name->d.dirn)', + '#define pypy_GENERAL_NAME_rid(name) (name->d.rid)', '#define pypy_GENERAL_NAME_uri(name) (name->d.uniformResourceIdentifier)', '#define pypy_GENERAL_NAME_pop_free(names) (sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free))', '#define pypy_DIST_POINT_fullname(obj) (obj->distpoint->name.fullname)', @@ -436,6 +437,7 @@ save_err=SAVE_ERR) ssl_external('ASN1_TIME_print', [BIO, ASN1_TIME], rffi.INT) ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) +ssl_external('i2t_ASN1_OBJECT', [rffi.CCHARP, rffi.INT, ASN1_OBJECT], rffi.INT) ssl_external('ASN1_item_d2i', [rffi.VOIDP, rffi.CCHARPP, rffi.LONG, ASN1_ITEM], rffi.VOIDP) ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) @@ -475,6 +477,8 @@ ssl_external('GENERAL_NAME_print', [BIO, GENERAL_NAME], rffi.INT) ssl_external('pypy_GENERAL_NAME_dirn', [GENERAL_NAME], X509_NAME, macro=True) +ssl_external('pypy_GENERAL_NAME_rid', [GENERAL_NAME], ASN1_OBJECT, + macro=True) ssl_external('pypy_GENERAL_NAME_uri', [GENERAL_NAME], ASN1_IA5STRING, macro=True) From pypy.commits at gmail.com Sun Dec 18 13:58:42 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 10:58:42 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: fix for socket.ssl().read(0) Message-ID: <5856dc62.c6bdc20a.e2a27.557e@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89161:2106f69132d9 Date: 2016-12-18 19:58 +0100 http://bitbucket.org/pypy/pypy/changeset/2106f69132d9/ Log: fix for socket.ssl().read(0) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -420,6 +420,12 @@ raise oefmt(space.w_ValueError, "size should not be negative") rwbuffer = None + if num_bytes <= 0: + if rwbuffer: + return space.wrap(0) + else: + return space.wrap("") + with rffi.scoped_alloc_buffer(num_bytes) as buf: while True: err = 0 diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -218,6 +218,7 @@ ss.write("hello\n") data = ss.read(10) assert isinstance(data, str) + assert ss.read(0) == '' self.s.close() del ss; gc.collect() From pypy.commits at gmail.com Sun Dec 18 15:28:05 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 12:28:05 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Improve the test, finding even more obscure cases Message-ID: <5856f155.4438c20a.b4b80.86b6@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89162:21d3d5d83974 Date: 2016-12-18 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/21d3d5d83974/ Log: Improve the test, finding even more obscure cases diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -164,7 +164,7 @@ class SubLong2(long): def __str__(self): - return 'Xx' + return extra_stuff + 'Xx' def __hex__(self): return extra_stuff + '0xYy' + extra_tail def __oct__(self): @@ -174,15 +174,16 @@ def __long__(self): assert False, "not called" sl = SubLong2(123) - extra_stuff = '' - for extra_tail in ['', 'l', 'L']: - x = '%i' % sl - assert x == 'Xx' - assert '%u' % sl == 'Xx' - assert '%d' % sl == 'Xx' - assert '%x' % sl == ('Yyl' if extra_tail == 'l' else 'Yy') - assert '%X' % sl == ('YYL' if extra_tail == 'l' else 'YY') - assert '%o' % sl == ('Zzl' if extra_tail == 'l' else 'Zz') + for extra_stuff in ['', '-']: + for extra_tail in ['', 'l', 'L']: + m = extra_stuff + x = '%i' % sl + assert x == m+'Xx' + assert '%u' % sl == m+'Xx' + assert '%d' % sl == m+'Xx' + assert '%x' % sl == m+('Yyl' if extra_tail == 'l' else 'Yy') + assert '%X' % sl == m+('YYL' if extra_tail == 'l' else 'YY') + assert '%o' % sl == m+('Zzl' if extra_tail == 'l' else 'Zz') extra_stuff = '??' raises(ValueError, "'%x' % sl") raises(ValueError, "'%X' % sl") From pypy.commits at gmail.com Sun Dec 18 15:28:07 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 12:28:07 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Give up, revert and skip the cpython test, and document in Message-ID: <5856f157.c89cc20a.5a316.89e4@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89163:803a7b9b59bf Date: 2016-12-18 21:27 +0100 http://bitbucket.org/pypy/pypy/changeset/803a7b9b59bf/ Log: Give up, revert and skip the cpython test, and document in cpython_differences.rst diff --git a/lib-python/2.7/test/test_format.py b/lib-python/2.7/test/test_format.py --- a/lib-python/2.7/test/test_format.py +++ b/lib-python/2.7/test/test_format.py @@ -337,8 +337,10 @@ except exc: pass else: - self.fail('%s not raised for %r format of %r' % - (exc.__name__, fmt, result)) + if test_support.check_impl_detail(): + self.fail('%s not raised for %r format of %r' % + (exc.__name__, fmt, result)) + #else (PyPy): at least it didn't explode, good enough def test_main(): diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -478,6 +478,16 @@ from the Makefile used to build the interpreter. PyPy should bake the values in during compilation, but does not do that yet. +* ``"%d" % x`` and ``"%x" % x`` and similar constructs, where ``x`` is + an instance of a subclass of ``long`` that overrides the special + methods ``__str__`` or ``__hex__`` or ``__oct__``: PyPy doesn't call + the special methods; CPython does---but only if it is a subclass of + ``long``, not ``int``. CPython's behavior is really messy: e.g. for + ``%x`` it calls ``__hex__()``, which is supposed to return a string + like ``-0x123L``; then the ``0x`` and the final ``L`` are removed, and + the rest is kept. If you return an unexpected string from + ``__hex__()`` you get an exception (or a crash before CPython 2.7.13). + .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -547,7 +547,7 @@ # make sure that w_value is a wrapped float return space.float(w_value) -def format_num_helper_generator(fmt, digits, method, remove_prefix=''): +def format_num_helper_generator(fmt, digits): def format_num_helper(space, w_value): if (not space.isinstance_w(w_value, space.w_int) and not space.isinstance_w(w_value, space.w_long)): @@ -563,27 +563,17 @@ "%s format: a number is required, not %T", fmt, w_value) else: raise - if space.isinstance_w(w_value, space.w_long): - text = space.str_w(space.call_method(w_value, method)) - skip_left = 0 - skip_right = len(text) - if remove_prefix: - if not text.startswith(remove_prefix): - raise oefmt(space.w_ValueError, - "%s format: invalid result of %s (type=%T)", - fmt, method, w_value) - skip_left = len(remove_prefix) - if text.endswith('L'): - skip_right = len(text) - 1 - assert skip_right >= 0 - return text[skip_left : skip_right] - else: + try: value = space.int_w(w_value) return fmt % (value,) + except OperationError as operr: + if not operr.match(space, space.w_OverflowError): + raise + num = space.bigint_w(w_value) + return num.format(digits) return func_with_new_name(format_num_helper, 'base%d_num_helper' % len(digits)) -int_num_helper = format_num_helper_generator('%d', '0123456789', '__str__') -oct_num_helper = format_num_helper_generator('%o', '01234567', '__oct__', '0') -hex_num_helper = format_num_helper_generator('%x', '0123456789abcdef', - '__hex__', '0x') +int_num_helper = format_num_helper_generator('%d', '0123456789') +oct_num_helper = format_num_helper_generator('%o', '01234567') +hex_num_helper = format_num_helper_generator('%x', '0123456789abcdef') diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -138,11 +138,6 @@ assert '%d' % sl == '4800000000' def test_format_subclass_with_str(self): - import sys - if sys.version_info < (2, 7, 13): - skip("CPython gives SystemError before 2.7.13") - #...and behaves inconsistently in 2.7.13, but we reproduce that - class SubInt2(int): def __str__(self): assert False, "not called" @@ -162,6 +157,12 @@ assert '%X' % sl == '7B' assert '%o' % sl == '173' + skip("the rest of this test is serious nonsense imho, changed " + "only on 2.7.13, and is different on 3.x anyway. We could " + "reproduce it by writing lengthy logic, then get again the " + "reasonable performance by special-casing the exact type " + "'long'. And all for 2.7.13 only. Let's give up.") + class SubLong2(long): def __str__(self): return extra_stuff + 'Xx' From pypy.commits at gmail.com Sun Dec 18 15:35:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 12:35:39 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: Handle const pointers (we probably only care about 'const char *') Message-ID: <5856f31b.4f831c0a.1a3b7.1a0e@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89164:94988b4687ff Date: 2016-12-18 20:34 +0000 http://bitbucket.org/pypy/pypy/changeset/94988b4687ff/ Log: Handle const pointers (we probably only care about 'const char *') diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -693,9 +693,9 @@ self.structs.update(other.structs) self.includes.append(other) - def add_typedef(self, name, obj): + def add_typedef(self, name, obj, quals): assert name not in self.definitions - tp = self.convert_type(obj) + tp = self.convert_type(obj, quals) if isinstance(tp, DelayedStruct): self.realize_struct(tp, name) tp = self.structs[obj] = tp.TYPE @@ -728,7 +728,7 @@ if name in self._TYPES: self._TYPES[name].become(TYPE) - def convert_type(self, obj): + def convert_type(self, obj, quals=0): if isinstance(obj, model.PrimitiveType): return cname_to_lltype(obj.name) elif isinstance(obj, model.StructType): @@ -746,7 +746,11 @@ if isinstance(TO, lltype.ContainerType): return lltype.Ptr(TO) else: - return rffi.CArrayPtr(TO) + if obj.quals & model.Q_CONST: + return lltype.Ptr(lltype.Array( + TO, hints={'nolength': True, 'render_as_const': True})) + else: + return rffi.CArrayPtr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: raise NotImplementedError @@ -773,7 +777,7 @@ continue if name.startswith('typedef '): name = name[8:] - src.add_typedef(name, obj) + src.add_typedef(name, obj, quals) elif name.startswith('macro '): name = name[6:] src.add_macro(name, obj) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -63,3 +63,11 @@ hdr2 = parse_source(cdef2, includes=[hdr1]) assert 'Object' in hdr2.definitions assert 'Type' not in hdr2.definitions + +def test_const(): + cdef = """ + typedef struct { + const char * const foo; + } bar; + """ + hdr = parse_source(cdef) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -702,9 +702,11 @@ heaptype = rffi.cast(PyHeapTypeObject, pto) heaptype.c_ht_name = make_ref(space, w_typename) from pypy.module.cpyext.bytesobject import PyString_AsString - pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) + pto.c_tp_name = rffi.cast( + rffi.CONST_CCHARP, PyString_AsString(space, heaptype.c_ht_name)) else: - pto.c_tp_name = rffi.str2charp(w_type.name) + pto.c_tp_name = rffi.cast( + rffi.CONST_CCHARP, rffi.str2charp(w_type.name)) # uninitialized fields: # c_tp_print # XXX implement From pypy.commits at gmail.com Sun Dec 18 16:29:22 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 13:29:22 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: tweak the error message to pass the cpython test Message-ID: <5856ffb2.4c9d1c0a.74f50.2cfe@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89166:b0bc60afe38f Date: 2016-12-18 21:31 +0100 http://bitbucket.org/pypy/pypy/changeset/b0bc60afe38f/ Log: tweak the error message to pass the cpython test diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -355,7 +355,7 @@ w_name = space.getitem(w_fromlist, space.wrap(i)) if not space.isinstance_w(w_name, space.w_str): raise oefmt(space.w_TypeError, - "'fromlist' must be a list of strings, got %T", w_name) + "'fromlist' items must be str, not %T", w_name) if w_path is not None: if length == 1 and space.eq_w( space.getitem(w_fromlist, space.wrap(0)), From pypy.commits at gmail.com Sun Dec 18 16:29:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 13:29:24 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Revert the change to sqlite that occurred in 2.7.13, and document it as Message-ID: <5856ffb4.c4251c0a.6f1d.3360@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89167:23c8b64a1b80 Date: 2016-12-18 22:28 +0100 http://bitbucket.org/pypy/pypy/changeset/23c8b64a1b80/ Log: Revert the change to sqlite that occurred in 2.7.13, and document it as a known difference with CPython 2.7.13. diff --git a/lib-python/2.7/sqlite3/test/regression.py b/lib-python/2.7/sqlite3/test/regression.py --- a/lib-python/2.7/sqlite3/test/regression.py +++ b/lib-python/2.7/sqlite3/test/regression.py @@ -24,6 +24,7 @@ import datetime import unittest import sqlite3 as sqlite +from test import test_support class RegressionTests(unittest.TestCase): def setUp(self): @@ -350,7 +351,10 @@ self.assertRaises(ValueError, cur.execute, " \0select 2") self.assertRaises(ValueError, cur.execute, "select 2\0") + @test_support.impl_detail(pypy=False) def CheckCommitCursorReset(self): + # This test is for logic added in 2.7.13 which PyPy doesn't + # implement. See http://bugs.python.org/issue29006 """ Connection.commit() did reset cursors, which made sqlite3 to return rows multiple times when fetched from cursors diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -418,8 +418,15 @@ if not self._in_transaction: return - # the following line is removed for compatibility with 2.7.13: - # self.__do_all_statements(Statement._reset, False) + # The following line is a KNOWN DIFFERENCE with CPython 2.7.13. + # More precisely, the corresponding line was removed in the + # version 2.7.13 of CPython, but this is causing troubles for + # PyPy (and potentially for CPython too): + # + # http://bugs.python.org/issue29006 + # + # So for now, we keep this line. + self.__do_all_statements(Statement._reset, False) statement_star = _ffi.new('sqlite3_stmt **') ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1, diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -488,6 +488,11 @@ the rest is kept. If you return an unexpected string from ``__hex__()`` you get an exception (or a crash before CPython 2.7.13). +* The ``sqlite`` module was updated on 2.7.13 to no longer reset all + cursors when there is a commit. This causes troubles for PyPy (and + potentially for CPython too), and so for now we didn't port this change: + see http://bugs.python.org/issue29006. + .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ From pypy.commits at gmail.com Sun Dec 18 16:56:32 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 13:56:32 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: fix version here too Message-ID: <58570610.2854c20a.5e529.ae51@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89168:47bb335b62e8 Date: 2016-12-18 22:45 +0100 http://bitbucket.org/pypy/pypy/changeset/47bb335b62e8/ Log: fix version here too diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -26,7 +26,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.12" +#define PY_VERSION "2.7.13" /* PyPy version as a string */ #define PYPY_VERSION "5.7.0-alpha0" From pypy.commits at gmail.com Sun Dec 18 16:56:34 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 13:56:34 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: decompressobj().flush() also accepts sys.maxint now Message-ID: <58570612.8b9a1c0a.e728f.3276@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89169:bf66e022e49c Date: 2016-12-18 22:55 +0100 http://bitbucket.org/pypy/pypy/changeset/bf66e022e49c/ Log: decompressobj().flush() also accepts sys.maxint now diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -313,7 +313,7 @@ data as possible. """ if w_length is not None: - length = space.c_int_w(w_length) + length = space.int_w(w_length) if length <= 0: raise oefmt(space.w_ValueError, "length must be greater than zero") diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -135,7 +135,6 @@ """ decompressor = self.zlib.decompressobj() bytes = decompressor.decompress(self.compressed) - raises(OverflowError, decompressor.flush, 2**31) bytes += decompressor.flush() assert bytes == self.expanded @@ -165,10 +164,8 @@ raises(ValueError, zlib.decompressobj().flush, 0) raises(ValueError, zlib.decompressobj().flush, -1) raises(TypeError, zlib.decompressobj().flush, None) - raises(OverflowError, zlib.decompressobj().flush, 2**31) raises(ValueError, zlib.decompressobj().decompress, b'abc', -1) raises(TypeError, zlib.decompressobj().decompress, b'abc', None) - raises(OverflowError, zlib.decompressobj().decompress, b'abc', 2**31) raises(TypeError, self.zlib.decompress, self.compressed, None) raises(OverflowError, self.zlib.decompress, self.compressed, 2**31) From pypy.commits at gmail.com Sun Dec 18 17:01:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 14:01:50 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: Add a link from pypy.org to the bitbucket issues page Message-ID: <5857074e.c515c20a.a3d17.9329@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r836:a9c19b179793 Date: 2016-12-18 23:01 +0100 http://bitbucket.org/pypy/pypy.org/changeset/a9c19b179793/ Log: Add a link from pypy.org to the bitbucket issues page diff --git a/archive.html b/archive.html --- a/archive.html +++ b/archive.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/howtohelp.html b/howtohelp.html --- a/howtohelp.html +++ b/howtohelp.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/source/_layouts/site.genshi b/source/_layouts/site.genshi --- a/source/_layouts/site.genshi +++ b/source/_layouts/site.genshi @@ -11,6 +11,7 @@ ('Performance', 'performance.html'), ('Dev Documentation', 'http://doc.pypy.org'), ('Blog', 'http://morepypy.blogspot.com'), + ('Bug/Issues', 'https://bitbucket.org/pypy/pypy/issues?status=new&status=open'), ('People', 'people.html'), ('Contact', 'contact.html'), ('Py3k donations', 'py3donate.html'), diff --git a/sponsor.html b/sponsor.html --- a/sponsor.html +++ b/sponsor.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/success.html b/success.html --- a/success.html +++ b/success.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact diff --git a/tmdonate2.html b/tmdonate2.html --- a/tmdonate2.html +++ b/tmdonate2.html @@ -53,6 +53,8 @@ | Blog | + Bug/Issues + | People | Contact From pypy.commits at gmail.com Sun Dec 18 17:29:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 14:29:24 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: No clue if it's a good approach, but trying it out: allow cursors to Message-ID: <58570dc4.4438c20a.b4b80.ac6c@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89170:5d96f9ed88fe Date: 2016-12-18 23:28 +0100 http://bitbucket.org/pypy/pypy/changeset/5d96f9ed88fe/ Log: No clue if it's a good approach, but trying it out: allow cursors to linger like 2.7.13, but if they cause SQLITE_LOCKED, reset them all and try again the operation. diff --git a/lib-python/2.7/sqlite3/test/regression.py b/lib-python/2.7/sqlite3/test/regression.py --- a/lib-python/2.7/sqlite3/test/regression.py +++ b/lib-python/2.7/sqlite3/test/regression.py @@ -351,10 +351,7 @@ self.assertRaises(ValueError, cur.execute, " \0select 2") self.assertRaises(ValueError, cur.execute, "select 2\0") - @test_support.impl_detail(pypy=False) def CheckCommitCursorReset(self): - # This test is for logic added in 2.7.13 which PyPy doesn't - # implement. See http://bugs.python.org/issue29006 """ Connection.commit() did reset cursors, which made sqlite3 to return rows multiple times when fetched from cursors diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -363,6 +363,14 @@ if cursor is not None: cursor._reset = True + def _reset_all_statements(self): + total = 0 + for weakref in self.__statements: + statement = weakref() + if statement is not None: + total += statement._reset() + return total + @_check_thread_wrap @_check_closed_wrap def __call__(self, sql): @@ -418,16 +426,6 @@ if not self._in_transaction: return - # The following line is a KNOWN DIFFERENCE with CPython 2.7.13. - # More precisely, the corresponding line was removed in the - # version 2.7.13 of CPython, but this is causing troubles for - # PyPy (and potentially for CPython too): - # - # http://bugs.python.org/issue29006 - # - # So for now, we keep this line. - self.__do_all_statements(Statement._reset, False) - statement_star = _ffi.new('sqlite3_stmt **') ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1, statement_star, _ffi.NULL) @@ -827,7 +825,26 @@ self.__statement._set_params(params) # Actually execute the SQL statement - ret = _lib.sqlite3_step(self.__statement._statement) + + # NOTE: if we get SQLITE_LOCKED, it's probably because + # one of the cursors created previously is still alive + # and not reset and the operation we're trying to do + # makes Sqlite unhappy about that. In that case, we + # automatically reset all cursors and try again. This + # is not what CPython does! It is a workaround for a + # new feature of 2.7.13. Previously, all cursors would + # be reset at commit(), which makes it unlikely to have + # cursors lingering around. Since 2.7.13, cursors stay + # around instead. This causes problems here---at least: + # this is the only place shown by pysqlite tests, and I + # can only hope there is no other. + + while True: + ret = _lib.sqlite3_step(self.__statement._statement) + if (ret == _lib.SQLITE_LOCKED and + self.__connection._reset_all_statements()): + continue + break if ret == _lib.SQLITE_ROW: if multiple: @@ -1057,6 +1074,8 @@ if self._in_use and self._statement: _lib.sqlite3_reset(self._statement) self._in_use = False + return 1 + return 0 if sys.version_info[0] < 3: def __check_decodable(self, param): diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -488,11 +488,6 @@ the rest is kept. If you return an unexpected string from ``__hex__()`` you get an exception (or a crash before CPython 2.7.13). -* The ``sqlite`` module was updated on 2.7.13 to no longer reset all - cursors when there is a commit. This causes troubles for PyPy (and - potentially for CPython too), and so for now we didn't port this change: - see http://bugs.python.org/issue29006. - .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ From pypy.commits at gmail.com Sun Dec 18 18:31:37 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 18 Dec 2016 15:31:37 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: simplify code a bit Message-ID: <58571c59.c11d1c0a.5313b.54b0@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89171:55980e9a374e Date: 2016-12-19 00:30 +0100 http://bitbucket.org/pypy/pypy/changeset/55980e9a374e/ Log: simplify code a bit diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -364,12 +364,7 @@ cursor._reset = True def _reset_all_statements(self): - total = 0 - for weakref in self.__statements: - statement = weakref() - if statement is not None: - total += statement._reset() - return total + self.__do_all_statements(Statement._reset, False) @_check_thread_wrap @_check_closed_wrap @@ -839,12 +834,10 @@ # this is the only place shown by pysqlite tests, and I # can only hope there is no other. - while True: + ret = _lib.sqlite3_step(self.__statement._statement) + if ret == _lib.SQLITE_LOCKED: + self.__connection._reset_all_statements() ret = _lib.sqlite3_step(self.__statement._statement) - if (ret == _lib.SQLITE_LOCKED and - self.__connection._reset_all_statements()): - continue - break if ret == _lib.SQLITE_ROW: if multiple: @@ -1074,8 +1067,6 @@ if self._in_use and self._statement: _lib.sqlite3_reset(self._statement) self._in_use = False - return 1 - return 0 if sys.version_info[0] < 3: def __check_decodable(self, param): From pypy.commits at gmail.com Sun Dec 18 19:07:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 18 Dec 2016 16:07:46 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser: const-correctness for tp_doc and tp_name Message-ID: <585724d2.a285c20a.d98b9.c084@mx.google.com> Author: Ronan Lamy Branch: rffi-parser Changeset: r89172:e3991a2b6927 Date: 2016-12-19 00:07 +0000 http://bitbucket.org/pypy/pypy/changeset/e3991a2b6927/ Log: const-correctness for tp_doc and tp_name diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -10,10 +10,8 @@ flags = pyobj.c_ob_type.c_tp_flags if (flags & Py_TPFLAGS_HAVE_NEWBUFFER and as_buffer.c_bf_getbuffer): return 1 - name = rffi.charp2str(pyobj.c_ob_type.c_tp_name) - if name in ('str', 'bytes'): + name = rffi.charp2str(rffi.cast(rffi.CCHARP, pyobj.c_ob_type.c_tp_name)) + if name in ('str', 'bytes'): # XXX remove once wrapper of __buffer__ -> bf_getbuffer works return 1 - return 0 - - + return 0 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -322,7 +322,8 @@ wrapper_func_kwds, doc, func_voidp, offset=offset) dict_w[method_name] = space.wrap(w_obj) if pto.c_tp_doc: - dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc)) + dict_w['__doc__'] = space.newbytes( + rffi.charp2str(rffi.cast(rffi.CCHARP, pto.c_tp_doc))) if pto.c_tp_new: add_tp_new_wrapper(space, dict_w, pto) @@ -456,7 +457,7 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, self) convert_member_defs(space, dict_w, pto.c_tp_members, self) - name = rffi.charp2str(pto.c_tp_name) + name = rffi.charp2str(rffi.cast(rffi.CCHARP, pto.c_tp_name)) flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if flag_heaptype: minsize = rffi.sizeof(PyHeapTypeObject.TO) @@ -475,7 +476,8 @@ not (pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_slice)): self.flag_map_or_seq = 'M' if pto.c_tp_doc: - self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc)) + self.w_doc = space.newbytes( + rffi.charp2str(rffi.cast(rffi.CCHARP, pto.c_tp_doc))) @bootstrap_function def init_typeobject(space): @@ -752,7 +754,7 @@ try: w_obj = _type_realize(space, py_obj) finally: - name = rffi.charp2str(pto.c_tp_name) + name = rffi.charp2str(rffi.cast(rffi.CCHARP, pto.c_tp_name)) pto.c_tp_flags &= ~Py_TPFLAGS_READYING pto.c_tp_flags |= Py_TPFLAGS_READY return w_obj @@ -858,7 +860,7 @@ base = pto.c_tp_base base_pyo = rffi.cast(PyObject, pto.c_tp_base) if base and not base.c_tp_flags & Py_TPFLAGS_READY: - name = rffi.charp2str(base.c_tp_name) + name = rffi.charp2str(rffi.cast(rffi.CCHARP, base.c_tp_name)) type_realize(space, base_pyo) if base and not pto.c_ob_type: # will be filled later pto.c_ob_type = base.c_ob_type From pypy.commits at gmail.com Sun Dec 18 23:21:34 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 18 Dec 2016 20:21:34 -0800 (PST) Subject: [pypy-commit] pypy issue2444: translation fixes, redo self.pyobj to avoid check_graph_of_del_does_not_call_too_much() error Message-ID: <5857604e.c9b3c20a.bbe06.fb2c@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89173:bfce98836768 Date: 2016-12-18 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/bfce98836768/ Log: translation fixes, redo self.pyobj to avoid check_graph_of_del_does_not_call_too_much() error diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -14,7 +14,7 @@ ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc, getbufferproc, releasebufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, Py_DecRef +from pypy.module.cpyext.pyobject import make_ref, decref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State @@ -99,7 +99,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): @@ -110,7 +110,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_ternaryfunc(space, w_self, w_args, func): @@ -132,7 +132,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) arg3 = space.w_None if len(args_w) > 1: arg3 = args_w[1] @@ -329,6 +329,7 @@ self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.pyobj = make_ref(space, w_obj) self.format = format if not shape: self.shape = [size] @@ -344,6 +345,12 @@ self.releasebufferproc = releasebuffer def releasebuffer(self): + if self.pyobj: + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return if self.releasebufferproc: func_target = rffi.cast(releasebufferproc, self.releasebufferproc) with lltype.scoped_alloc(Py_buffer) as pybuf: @@ -354,8 +361,8 @@ pybuf.c_shape[i] = self.shape[i] pybuf.c_strides[i] = self.strides[i] pybuf.c_format = rffi.str2charp(self.format) - generic_cpy_call(self.space, func_target, self.w_obj, pybuf) - self.releasebufferproc = None + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + self.releasebufferproc = rffi.cast(rffi.VOIDP, 0) def getlength(self): return self.size @@ -400,7 +407,11 @@ func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type - releasebuffer = py_type.c_tp_as_buffer and py_type.c_tp_as_buffer.c_bf_releasebuffer + releasebuffer = rffi.cast(rffi.VOIDP, 0) + need_finalizer = False + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + need_finalizer = True with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) @@ -408,14 +419,19 @@ space.fromcache(State).check_and_raise_exception(always=True) buf = CPyBuffer(space, ptr[0], size, w_self, releasebuffer=releasebuffer) - fq.register_finalizer(buf) + if need_finalizer: + fq.register_finalizer(buf) return space.newbuffer(buf) def wrap_getwritebuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type - releasebuffer = py_type.c_tp_as_buffer and py_type.c_tp_as_buffer.c_bf_releasebuffer + releasebuffer = rffi.cast(rffi.VOIDP, 0) + need_finalizer = False + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + need_finalizer = True with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) @@ -423,14 +439,19 @@ space.fromcache(State).check_and_raise_exception(always=True) buf = CPyBuffer(space, ptr[0], size, w_self, readonly=False, releasebuffer=releasebuffer) - fq.register_finalizer(buf) + if need_finalizer: + fq.register_finalizer(buf) return space.newbuffer(buf) def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type - releasebuffer = py_type.c_tp_as_buffer and py_type.c_tp_as_buffer.c_bf_releasebuffer + releasebuffer = rffi.cast(rffi.VOIDP, 0) + need_finalizer = False + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + need_finalizer = True with lltype.scoped_alloc(Py_buffer) as pybuf: _flags = 0 if space.len_w(w_args) > 0: @@ -456,7 +477,8 @@ itemsize=pybuf.c_itemsize, readonly=widen(pybuf.c_readonly), releasebuffer = releasebuffer) - fq.register_finalizer(buf) + if need_finalizer: + fq.register_finalizer(buf) return space.newbuffer(buf) def get_richcmp_func(OP_CONST): From pypy.commits at gmail.com Sun Dec 18 23:21:36 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 18 Dec 2016 20:21:36 -0800 (PST) Subject: [pypy-commit] pypy issue2444: merge default into branch Message-ID: <58576050.212dc20a.22a2c.eddf@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89174:b3b99a26e2b9 Date: 2016-12-18 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/b3b99a26e2b9/ Log: merge default into branch diff too long, truncating to 2000 out of 15848 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -190,6 +190,12 @@ "make sure that all calls go through space.call_args", default=False), + BoolOption("disable_entrypoints", + "Disable external entry points, notably the" + " cpyext module and cffi's embedding mode.", + default=False, + requires=[("objspace.usemodules.cpyext", False)]), + OptionDescription("std", "Standard Object Space Options", [ BoolOption("withtproxy", "support transparent proxies", default=True), diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz @@ -307,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,145 +1,61 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++ (11, 14, etc.). +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a `backend`_, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ - -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -153,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -209,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -311,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -370,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -666,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -686,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -764,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -775,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -871,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -76,11 +76,14 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -45,3 +45,22 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). + +.. branch: better-PyDict_Next + +Improve the performance of ``PyDict_Next``. When trying ``PyDict_Next`` on a +typedef dict, the test exposed a problem converting a ``GetSetProperty`` to a +``PyGetSetDescrObject``. The other direction seem to be fully implemented. +This branch made a minimal effort to convert the basic fields to avoid +segfaults, but trying to use the ``PyGetSetDescrObject`` will probably fail. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,12 +83,18 @@ return 1 return exitcode + return entry_point, get_additional_entrypoints(space, w_initstdio) + + +def get_additional_entrypoints(space, w_initstdio): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype + if space.config.objspace.disable_entrypoints: + return {} + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -188,11 +194,11 @@ return -1 return 0 - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + return {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -428,6 +428,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,16 +247,15 @@ def descr_function_repr(self): return self.getrepr(self.space, 'function %s' % (self.name,)) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -264,10 +263,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -671,10 +671,10 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code - find = staticmethod(find) + return Function.find(space, identifier).code def signature(self): return self.sig diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,6 +4,7 @@ import sys from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg +from rpython.rlib.debug import ll_assert_not_none from rpython.rlib.jit import hint from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint @@ -298,7 +299,13 @@ # stack manipulation helpers def pushvalue(self, w_object): depth = self.valuestackdepth - self.locals_cells_stack_w[depth] = w_object + self.locals_cells_stack_w[depth] = ll_assert_not_none(w_object) + self.valuestackdepth = depth + 1 + + def pushvalue_none(self): + depth = self.valuestackdepth + # the entry is already None, and remains None + assert self.locals_cells_stack_w[depth] is None self.valuestackdepth = depth + 1 def _check_stack_index(self, index): @@ -311,6 +318,9 @@ return index >= stackstart def popvalue(self): + return ll_assert_not_none(self.popvalue_maybe_none()) + + def popvalue_maybe_none(self): depth = self.valuestackdepth - 1 assert self._check_stack_index(depth) assert depth >= 0 @@ -385,6 +395,9 @@ def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). + return ll_assert_not_none(self.peekvalue_maybe_none(index_from_top)) + + def peekvalue_maybe_none(self, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) @@ -396,7 +409,7 @@ index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) assert index >= 0 - self.locals_cells_stack_w[index] = w_object + self.locals_cells_stack_w[index] = ll_assert_not_none(w_object) @jit.unroll_safe def dropvaluesuntil(self, finaldepth): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,6 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi VERSION = "1.9.1" @@ -68,9 +68,14 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL - def startup(self, space): - from pypy.module._cffi_backend import embedding - embedding.glob.space = space + def __init__(self, space, *args): + MixedModule.__init__(self, space, *args) + # + if not space.config.objspace.disable_entrypoints: + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): @@ -85,11 +90,3 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value - - -# write this entrypoint() here, to make sure it is registered early enough - at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], - c_name='pypy_init_embedded_cffi_module') -def pypy_init_embedded_cffi_module(version, init_struct): - from pypy.module._cffi_backend import embedding - return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,4 +1,5 @@ import os +from rpython.rlib import entrypoint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -46,6 +47,8 @@ glob = Global() + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') def pypy_init_embedded_cffi_module(version, init_struct): # called from __init__.py name = "?" diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -77,7 +77,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -86,7 +86,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -169,8 +169,8 @@ } def setup_method(self, method): - # https://www.verisign.net/ - ADDR = "www.verisign.net", 443 + # https://gmail.com/ + ADDR = "gmail.com", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.wrap('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.wrap(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.wrap(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.wrap('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.wrap('_'+attr), space.wrap(cdata)) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) - - def iter_w(self): - return self.space.wrap(self) - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.wrap("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.wrap("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information 'is_namespace' : ([c_scope], c_int), + 'is_template' : ([c_ccharp], c_int), + 'is_abstract' : ([c_type], c_int), 'is_enum' : ([c_ccharp], c_int), # type/class reflection information @@ -216,8 +249,14 @@ 'strtoull' : ([c_ccharp], c_ullong), 'free' : ([c_voidp], c_void), - 'charp2stdstring' : ([c_ccharp], c_object), + 'charp2stdstring' : ([c_ccharp, c_size_t], c_object), + #stdstring2charp actually takes an size_t* as last parameter, but this will do + 'stdstring2charp' : ([c_object, c_voidp], c_ccharp), 'stdstring2stdstring' : ([c_object], c_object), + + 'stdvector_valuetype' : ([c_ccharp], c_ccharp), + 'stdvector_valuesize' : ([c_ccharp], c_size_t), + } # size/offset are backend-specific but fixed after load @@ -277,87 +316,99 @@ ptr = w_cdata.unsafe_escaping_ptr() return rffi.cast(rffi.VOIDP, ptr) +def _cdata_to_ccharp(space, w_cdata): + ptr = _cdata_to_ptr(space, w_cdata) # see above ... something better? + return rffi.cast(rffi.CCHARP, ptr) + def c_load_dictionary(name): return libffi.CDLL(name) # name to opaque C++ scope representation ------------------------------------ def c_num_scopes(space, cppscope): - return space.int_w(call_capi(space, 'num_scopes', [_Arg(h=cppscope.handle)])) + return space.int_w(call_capi(space, 'num_scopes', [_ArgH(cppscope.handle)])) def c_scope_name(space, cppscope, iscope): - args = [_Arg(h=cppscope.handle), _Arg(l=iscope)] + args = [_ArgH(cppscope.handle), _ArgL(iscope)] return charp2str_free(space, call_capi(space, 'scope_name', args)) def c_resolve_name(space, name): - return charp2str_free(space, call_capi(space, 'resolve_name', [_Arg(s=name)])) + return charp2str_free(space, call_capi(space, 'resolve_name', [_ArgS(name)])) def c_get_scope_opaque(space, name): - return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_Arg(s=name)]))) -def c_get_template(space, name): - return rffi.cast(C_TYPE, space.uint_w(call_capi(space, 'get_template', [_Arg(s=name)]))) + return rffi.cast(C_SCOPE, space.uint_w(call_capi(space, 'get_scope', [_ArgS(name)]))) def c_actual_class(space, cppclass, cppobj): - args = [_Arg(h=cppclass.handle), _Arg(h=cppobj)] From pypy.commits at gmail.com Sun Dec 18 23:21:38 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 18 Dec 2016 20:21:38 -0800 (PST) Subject: [pypy-commit] pypy issue2444: failing test for leak in PyMemoryView_GET_BUFFER Message-ID: <58576052.d5091c0a.6942f.84b2@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89175:041bc115e0bd Date: 2016-12-19 06:20 +0200 http://bitbucket.org/pypy/pypy/changeset/041bc115e0bd/ Log: failing test for leak in PyMemoryView_GET_BUFFER diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -173,5 +173,8 @@ view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) rffi.setintfield(view, 'c_readonly', 1) isstr = True + # XXX leaks the view object and never decrefs the view.c_obj + # In cpython the view is a field of the PyMemoryViewObject + # and view.obj is decrefed in memory_dealloc return view diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -190,11 +190,18 @@ { Py_buffer* view = NULL; PyObject* obj = PyTuple_GetItem(args, 0); + int before_cnt = obj->ob_refcnt; PyObject* memoryview = PyMemoryView_FromObject(obj); if (memoryview == NULL) return PyInt_FromLong(-1); view = PyMemoryView_GET_BUFFER(memoryview); Py_DECREF(memoryview); + if (obj->ob_refcnt != before_cnt) + { + PyErr_SetString(PyExc_RuntimeError, + "leaking view->obj from PyMemoryView_GET_BUFFER"); + return NULL; + } return PyInt_FromLong(view->len); } From pypy.commits at gmail.com Mon Dec 19 03:17:42 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 19 Dec 2016 00:17:42 -0800 (PST) Subject: [pypy-commit] pypy py3.5: #2452 check that input of compress/decompress does not overflow 32 bit Message-ID: <585797a6.4673c20a.fafa7.3893@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89176:a1d41e7ebbb6 Date: 2016-12-19 09:16 +0100 http://bitbucket.org/pypy/pypy/changeset/a1d41e7ebbb6/ Log: #2452 check that input of compress/decompress does not overflow 32 bit diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -8,6 +8,7 @@ from rpython.rlib import rzlib +UINT_MAX = 2**32-1 @unwrap_spec(string='bufferstr', start='truncatedint_w') def crc32(space, string, start = rzlib.CRC32_DEFAULT_START): @@ -51,6 +52,8 @@ Optional arg level is the compression level, in 1-9. """ + if len(string) > UINT_MAX: + raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: try: stream = rzlib.deflateInit(level) @@ -73,6 +76,8 @@ Optional arg wbits is the window buffer size. Optional arg bufsize is only for compatibility with CPython and is ignored. """ + if len(string) > UINT_MAX: + raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: try: stream = rzlib.inflateInit(wbits) @@ -147,6 +152,8 @@ Call the flush() method to clear these buffers. """ + if len(data) > UINT_MAX: + raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: self.lock() try: @@ -277,10 +284,12 @@ unconsumed_tail attribute. """ if max_length == 0: - max_length = sys.maxint + max_length = UINT_MAX elif max_length < 0: raise oefmt(space.w_ValueError, "max_length must be greater than zero") + elif len(data) > UINT_MAX: + raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: self.lock() try: From pypy.commits at gmail.com Mon Dec 19 05:23:42 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 02:23:42 -0800 (PST) Subject: [pypy-commit] pypy default: The first half of the changes to weakref.py are finally fixed in CPython Message-ID: <5857b52e.ce941c0a.d731b.0655@mx.google.com> Author: Armin Rigo Branch: Changeset: r89177:a07c1410d491 Date: 2016-12-19 11:22 +0100 http://bitbucket.org/pypy/pypy/changeset/a07c1410d491/ Log: The first half of the changes to weakref.py are finally fixed in CPython too (issue #19542, will be in 2.7.14). Adapt the code to be identical. diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -213,10 +213,10 @@ if o is None: if args: return args[0] - raise KeyError, key + else: + raise KeyError, key else: return o - # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: @@ -230,7 +230,6 @@ return default else: return o - # The logic above was fixed in PyPy def update(*args, **kwargs): if not args: From pypy.commits at gmail.com Mon Dec 19 06:06:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 03:06:14 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: A bit random, but it is enough to reset all "other" statements and leave Message-ID: <5857bf26.c5371c0a.f6936.1293@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89178:2aa05052bb18 Date: 2016-12-19 12:05 +0100 http://bitbucket.org/pypy/pypy/changeset/2aa05052bb18/ Log: A bit random, but it is enough to reset all "other" statements and leave the statement we're currently trying to run un-reset here. diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -363,8 +363,11 @@ if cursor is not None: cursor._reset = True - def _reset_all_statements(self): - self.__do_all_statements(Statement._reset, False) + def _reset_other_statements(self, excepted): + for weakref in self.__statements: + statement = weakref() + if statement is not None and statement is not excepted: + statement._reset() @_check_thread_wrap @_check_closed_wrap @@ -836,7 +839,7 @@ ret = _lib.sqlite3_step(self.__statement._statement) if ret == _lib.SQLITE_LOCKED: - self.__connection._reset_all_statements() + self.__connection._reset_other_statements(self.__statement) ret = _lib.sqlite3_step(self.__statement._statement) if ret == _lib.SQLITE_ROW: From pypy.commits at gmail.com Mon Dec 19 08:38:49 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 19 Dec 2016 05:38:49 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: remove str unwrap_specs in _winreg module Message-ID: <5857e2e9.4dd41c0a.c02f0.4a97@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89179:8d63ef70f9e6 Date: 2016-12-19 14:38 +0100 http://bitbucket.org/pypy/pypy/changeset/8d63ef70f9e6/ Log: remove str unwrap_specs in _winreg module diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -149,7 +149,7 @@ if ret != 0: raiseWindowsError(space, ret, 'RegFlushKey') - at unwrap_spec(subkey=str, filename=str) + at unwrap_spec(subkey="text", filename="text") def LoadKey(space, w_hkey, subkey, filename): """LoadKey(key, sub_key, file_name) - Creates a subkey under the specified key and stores registration information from a specified file into that subkey. @@ -173,7 +173,7 @@ if ret != 0: raiseWindowsError(space, ret, 'RegLoadKey') - at unwrap_spec(filename=str) + at unwrap_spec(filename="text") def SaveKey(space, w_hkey, filename): """SaveKey(key, file_name) - Saves the specified key, and all its subkeys to the specified file. @@ -192,7 +192,7 @@ if ret != 0: raiseWindowsError(space, ret, 'RegSaveKey') - at unwrap_spec(typ=int, value=str) + at unwrap_spec(typ=int, value="text") def SetValue(space, w_hkey, w_subkey, typ, value): """SetValue(key, sub_key, type, value) - Associates a value with a specified key. @@ -388,7 +388,7 @@ else: # REG_BINARY and all other types return space.newbytes(rffi.charpsize2str(buf, buflen)) - at unwrap_spec(value_name=str, typ=int) + at unwrap_spec(value_name="text", typ=int) def SetValueEx(space, w_hkey, value_name, w_reserved, typ, w_value): """SetValueEx(key, value_name, reserved, type, value) - Stores data in the value field of an open registry key. @@ -469,7 +469,7 @@ space.newint(intmask(retType[0])), ]) - at unwrap_spec(subkey=str) + at unwrap_spec(subkey="text") def CreateKey(space, w_hkey, subkey): """key = CreateKey(key, sub_key) - Creates or opens the specified key. @@ -489,7 +489,7 @@ raiseWindowsError(space, ret, 'CreateKey') return W_HKEY(space, rethkey[0]) - at unwrap_spec(subkey=str, res=int, sam=rffi.r_uint) + at unwrap_spec(subkey="text", res=int, sam=rffi.r_uint) def CreateKeyEx(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_WRITE): """key = CreateKey(key, sub_key) - Creates or opens the specified key. @@ -511,7 +511,7 @@ raiseWindowsError(space, ret, 'CreateKeyEx') return W_HKEY(space, rethkey[0]) - at unwrap_spec(subkey=str) + at unwrap_spec(subkey="text") def DeleteKey(space, w_hkey, subkey): """DeleteKey(key, sub_key) - Deletes the specified key. @@ -528,7 +528,7 @@ if ret != 0: raiseWindowsError(space, ret, 'RegDeleteKey') - at unwrap_spec(subkey=str) + at unwrap_spec(subkey="text") def DeleteValue(space, w_hkey, subkey): """DeleteValue(key, value) - Removes a named value from a registry key. @@ -539,7 +539,7 @@ if ret != 0: raiseWindowsError(space, ret, 'RegDeleteValue') - at unwrap_spec(subkey=str, res=int, sam=rffi.r_uint) + at unwrap_spec(subkey="text", res=int, sam=rffi.r_uint) def OpenKey(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_READ): """key = OpenKey(key, sub_key, res = 0, sam = KEY_READ) - Opens the specified key. @@ -727,7 +727,7 @@ raise oefmt(space.w_NotImplementedError, "not implemented on this platform") - at unwrap_spec(subkey=str) + at unwrap_spec(subkey="text") def DeleteKeyEx(space, w_key, subkey): """DeleteKeyEx(key, sub_key, sam, res) - Deletes the specified key. From pypy.commits at gmail.com Mon Dec 19 08:47:44 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 19 Dec 2016 05:47:44 -0800 (PST) Subject: [pypy-commit] pypy space-newtext: merge default Message-ID: <5857e500.c4811c0a.16ab2.5386@mx.google.com> Author: Carl Friedrich Bolz Branch: space-newtext Changeset: r89180:8e2bcfb77e80 Date: 2016-12-19 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/8e2bcfb77e80/ Log: merge default diff too long, truncating to 2000 out of 3192 lines diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -213,10 +213,10 @@ if o is None: if args: return args[0] - raise KeyError, key + else: + raise KeyError, key else: return o - # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: @@ -230,7 +230,6 @@ return default else: return o - # The logic above was fixed in PyPy def update(*args, **kwargs): if not args: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -190,6 +190,12 @@ "make sure that all calls go through space.call_args", default=False), + BoolOption("disable_entrypoints", + "Disable external entry points, notably the" + " cpyext module and cffi's embedding mode.", + default=False, + requires=[("objspace.usemodules.cpyext", False)]), + OptionDescription("std", "Standard Object Space Options", [ BoolOption("withtproxy", "support transparent proxies", default=True), diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -56,3 +56,11 @@ The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic template instantations, and improved integration with CFFI for better performance. It also provides interactive C++ (and bindings to that). + +.. branch: better-PyDict_Next + +Improve the performance of ``PyDict_Next``. When trying ``PyDict_Next`` on a +typedef dict, the test exposed a problem converting a ``GetSetProperty`` to a +``PyGetSetDescrObject``. The other direction seem to be fully implemented. +This branch made a minimal effort to convert the basic fields to avoid +segfaults, but trying to use the ``PyGetSetDescrObject`` will probably fail. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,12 +83,18 @@ return 1 return exitcode + return entry_point, get_additional_entrypoints(space, w_initstdio) + + +def get_additional_entrypoints(space, w_initstdio): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype + if space.config.objspace.disable_entrypoints: + return {} + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -188,11 +194,11 @@ return -1 return 0 - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + return {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,6 +4,7 @@ import sys from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg +from rpython.rlib.debug import ll_assert_not_none from rpython.rlib.jit import hint from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint @@ -298,7 +299,13 @@ # stack manipulation helpers def pushvalue(self, w_object): depth = self.valuestackdepth - self.locals_cells_stack_w[depth] = w_object + self.locals_cells_stack_w[depth] = ll_assert_not_none(w_object) + self.valuestackdepth = depth + 1 + + def pushvalue_none(self): + depth = self.valuestackdepth + # the entry is already None, and remains None + assert self.locals_cells_stack_w[depth] is None self.valuestackdepth = depth + 1 def _check_stack_index(self, index): @@ -311,6 +318,9 @@ return index >= stackstart def popvalue(self): + return ll_assert_not_none(self.popvalue_maybe_none()) + + def popvalue_maybe_none(self): depth = self.valuestackdepth - 1 assert self._check_stack_index(depth) assert depth >= 0 @@ -385,6 +395,9 @@ def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). + return ll_assert_not_none(self.peekvalue_maybe_none(index_from_top)) + + def peekvalue_maybe_none(self, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) @@ -396,7 +409,7 @@ index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) assert index >= 0 - self.locals_cells_stack_w[index] = w_object + self.locals_cells_stack_w[index] = ll_assert_not_none(w_object) @jit.unroll_safe def dropvaluesuntil(self, finaldepth): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,6 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi VERSION = "1.9.1" @@ -68,9 +68,14 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL - def startup(self, space): - from pypy.module._cffi_backend import embedding - embedding.glob.space = space + def __init__(self, space, *args): + MixedModule.__init__(self, space, *args) + # + if not space.config.objspace.disable_entrypoints: + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): @@ -85,11 +90,3 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value - - -# write this entrypoint() here, to make sure it is registered early enough - at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], - c_name='pypy_init_embedded_cffi_module') -def pypy_init_embedded_cffi_module(version, init_struct): - from pypy.module._cffi_backend import embedding - return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,4 +1,5 @@ import os +from rpython.rlib import entrypoint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -46,6 +47,8 @@ glob = Global() + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') def pypy_init_embedded_cffi_module(version, init_struct): # called from __init__.py name = "?" diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -169,8 +169,8 @@ } def setup_method(self, method): - # https://www.verisign.net/ - ADDR = "www.verisign.net", 443 + # https://gmail.com/ + ADDR = "gmail.com", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -602,7 +602,7 @@ GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyClassObject'''.split(): + PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -31,7 +31,7 @@ dealloc=buffer_dealloc, realize=buffer_realize) -def buffer_attach(space, py_obj, w_obj): +def buffer_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyBufferObject with the given (str) buffer object. """ diff --git a/pypy/module/cpyext/bytearrayobject.py b/pypy/module/cpyext/bytearrayobject.py --- a/pypy/module/cpyext/bytearrayobject.py +++ b/pypy/module/cpyext/bytearrayobject.py @@ -7,7 +7,7 @@ PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, make_typedescr, get_typedescr, Py_IncRef) # Type PyByteArrayObject represents a mutable array of bytes. # The Python API is that of a sequence; diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -73,7 +73,7 @@ py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED return py_str -def bytes_attach(space, py_obj, w_obj): +def bytes_attach(space, py_obj, w_obj, w_userdata=None): """ Copy RPython string object contents to a PyBytesObject. The c_ob_sval must not be modified. diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -29,7 +29,7 @@ attach=complex_attach, realize=complex_realize) -def complex_attach(space, py_obj, w_obj): +def complex_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyComplexObject with the given complex object. The value must not be modified. diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -1,11 +1,66 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.objectmodel import specialize +from pypy.interpreter.error import OperationError +from pypy.objspace.std.classdict import ClassDictStrategy +from pypy.interpreter.typedef import GetSetProperty from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, - Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_pyobj + Py_ssize_tP, CONST_STRING, PyObjectFields, cpython_struct, + bootstrap_function) +from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, as_pyobj, + make_typedescr, track_reference, create_ref, from_ref, decref, + Py_IncRef) +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import specialize + +PyDictObjectStruct = lltype.ForwardReference() +PyDictObject = lltype.Ptr(PyDictObjectStruct) +PyDictObjectFields = PyObjectFields + \ + (("ob_keys", PyObject),) +cpython_struct("PyDictObject", PyDictObjectFields, PyDictObjectStruct) + + at bootstrap_function +def init_dictobject(space): + "Type description of PyDictObject" + make_typedescr(space.w_dict.layout.typedef, + basestruct=PyDictObject.TO, + attach=dict_attach, + dealloc=dict_dealloc, + realize=dict_realize) + +def dict_attach(space, py_obj, w_obj, w_userdata=None): + """ + Fills a newly allocated PyDictObject with the given dict object. + """ + py_dict = rffi.cast(PyDictObject, py_obj) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + # Problems: if this dict is a typedict, we may have unbound GetSetProperty + # functions in the dict. The corresponding PyGetSetDescrObject must be + # bound to a class, but the actual w_type will be unavailable later on. + # Solution: use the w_userdata argument when assigning a PyTypeObject's + # tp_dict slot to pass a w_type in, and force creation of the pair here + if not space.is_w(w_userdata, space.gettypefor(GetSetProperty)): + # do not do this for type dict of GetSetProperty, that would recurse + w_vals = space.call_method(space.w_dict, "values", w_obj) + vals = space.listview(w_vals) + for w_v in vals: + if isinstance(w_v, GetSetProperty): + pyobj = as_pyobj(space, w_v, w_userdata) + # refcnt will be REFCNT_FROM_PYPY, no need to inc or dec + +def dict_realize(space, py_obj): + """ + Creates the dict in the interpreter + """ + w_obj = space.newdict() + track_reference(space, py_obj, w_obj) + + at cpython_api([PyObject], lltype.Void, header=None) +def dict_dealloc(space, py_obj): + py_dict = rffi.cast(PyDictObject, py_obj) + decref(space, py_dict.c_ob_keys) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + _dealloc(space, py_obj) @cpython_api([], PyObject) def PyDict_New(space): @@ -181,9 +236,9 @@ } The dictionary p should not be mutated during iteration. It is safe - (since Python 2.1) to modify the values of the keys as you iterate over the - dictionary, but only so long as the set of keys does not change. For - example: + (since Python 2.1) to modify the values but not the keys as you iterate + over the dictionary, the keys must not change. + For example: PyObject *key, *value; Py_ssize_t pos = 0; @@ -199,34 +254,32 @@ } Py_DECREF(o); }""" + if w_dict is None: return 0 - # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably - # work, but we can't work out how to not leak it if iteration does - # not complete. Alternatively, we could add some RPython-only - # dict-iterator method to move forward by N steps. - - w_dict.ensure_object_strategy() # make sure both keys and values can - # be borrwed - try: - w_iter = space.call_method(space.w_dict, "iteritems", w_dict) - pos = ppos[0] - while pos: - space.call_method(w_iter, "next") - pos -= 1 - - w_item = space.call_method(w_iter, "next") - w_key, w_value = space.fixedview(w_item, 2) - if pkey: - pkey[0] = as_pyobj(space, w_key) - if pvalue: - pvalue[0] = as_pyobj(space, w_value) - ppos[0] += 1 - except OperationError as e: - if not e.match(space, space.w_StopIteration): - raise + pos = ppos[0] + py_obj = as_pyobj(space, w_dict) + py_dict = rffi.cast(PyDictObject, py_obj) + if pos == 0: + # Store the current keys in the PyDictObject. + decref(space, py_dict.c_ob_keys) + w_keys = space.call_method(space.w_dict, "keys", w_dict) + py_dict.c_ob_keys = create_ref(space, w_keys) + Py_IncRef(space, py_dict.c_ob_keys) + else: + w_keys = from_ref(space, py_dict.c_ob_keys) + ppos[0] += 1 + if pos >= space.len_w(w_keys): + decref(space, py_dict.c_ob_keys) + py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) return 0 + w_key = space.listview(w_keys)[pos] + w_value = space.getitem(w_dict, w_key) + if pkey: + pkey[0] = as_pyobj(space, w_key) + if pvalue: + pvalue[0] = as_pyobj(space, w_value) return 1 @specialize.memo() diff --git a/pypy/module/cpyext/floatobject.py b/pypy/module/cpyext/floatobject.py --- a/pypy/module/cpyext/floatobject.py +++ b/pypy/module/cpyext/floatobject.py @@ -22,7 +22,7 @@ attach=float_attach, realize=float_realize) -def float_attach(space, py_obj, w_obj): +def float_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyFloatObject with the given float object. The value must not be modified. diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -30,7 +30,7 @@ dealloc=frame_dealloc, realize=frame_realize) -def frame_attach(space, py_obj, w_obj): +def frame_attach(space, py_obj, w_obj, w_userdata=None): "Fills a newly allocated PyFrameObject with a frame object" frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -51,7 +51,7 @@ PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method) PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) -def function_attach(space, py_obj, w_obj): +def function_attach(space, py_obj, w_obj, w_userdata=None): py_func = rffi.cast(PyFunctionObject, py_obj) assert isinstance(w_obj, Function) py_func.c_func_name = make_ref(space, space.newtext(w_obj.name)) @@ -63,7 +63,7 @@ from pypy.module.cpyext.object import _dealloc _dealloc(space, py_obj) -def code_attach(space, py_obj, w_obj): +def code_attach(space, py_obj, w_obj, w_userdata=None): py_code = rffi.cast(PyCodeObject, py_obj) assert isinstance(w_obj, PyCode) py_code.c_co_name = make_ref(space, space.newtext(w_obj.co_name)) diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,6 +1,6 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( - generic_cpy_call, cpython_api, PyObject, CONST_STRING) + generic_cpy_call, cpython_api, PyObject, CONST_STRING, CANNOT_FAIL) from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError from pypy.interpreter.module import Module @@ -124,3 +124,22 @@ w_mod = importing.add_module(space, w_name) space.setattr(w_mod, space.newtext('__file__'), space.newtext(pathname)) return importing.exec_code_module(space, w_mod, code, w_name) + + at cpython_api([], lltype.Void, error=CANNOT_FAIL) +def _PyImport_AcquireLock(space): + """Locking primitive to prevent parallel imports of the same module + in different threads to return with a partially loaded module. + These calls are serialized by the global interpreter lock.""" + try: + space.call_method(space.getbuiltinmodule('imp'), 'acquire_lock') + except OperationError as e: + e.write_unraisable(space, "_PyImport_AcquireLock") + + at cpython_api([], rffi.INT_real, error=CANNOT_FAIL) +def _PyImport_ReleaseLock(space): + try: + space.call_method(space.getbuiltinmodule('imp'), 'release_lock') + return 1 + except OperationError as e: + e.write_unraisable(space, "_PyImport_ReleaseLock") + return -1 diff --git a/pypy/module/cpyext/include/dictobject.h b/pypy/module/cpyext/include/dictobject.h --- a/pypy/module/cpyext/include/dictobject.h +++ b/pypy/module/cpyext/include/dictobject.h @@ -7,6 +7,10 @@ extern "C" { #endif +typedef struct { + PyObject_HEAD + PyObject *ob_keys; /* a private place to put keys during PyDict_Next */ +} PyDictObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -24,7 +24,7 @@ attach=int_attach, realize=int_realize) -def int_attach(space, py_obj, w_obj): +def int_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyIntObject with the given int object. The value must not be modified. diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -44,7 +44,7 @@ attach=cfunction_attach, dealloc=cfunction_dealloc) -def cfunction_attach(space, py_obj, w_obj): +def cfunction_attach(space, py_obj, w_obj, w_userdata=None): assert isinstance(w_obj, W_PyCFunctionObject) py_func = rffi.cast(PyCFunctionObject, py_obj) py_func.c_m_ml = w_obj.ml diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -61,7 +61,7 @@ pyobj.c_ob_type = pytype return pyobj - def attach(self, space, pyobj, w_obj): + def attach(self, space, pyobj, w_obj, w_userdata=None): pass def realize(self, space, obj): @@ -111,8 +111,8 @@ return tp_dealloc.api_func if tp_attach: - def attach(self, space, pyobj, w_obj): - tp_attach(space, pyobj, w_obj) + def attach(self, space, pyobj, w_obj, w_userdata=None): + tp_attach(space, pyobj, w_obj, w_userdata) if tp_realize: def realize(self, space, ref): @@ -152,7 +152,7 @@ class InvalidPointerException(Exception): pass -def create_ref(space, w_obj): +def create_ref(space, w_obj, w_userdata=None): """ Allocates a PyObject, and fills its fields with info from the given interpreter object. @@ -173,7 +173,7 @@ assert py_obj.c_ob_refcnt > rawrefcount.REFCNT_FROM_PYPY py_obj.c_ob_refcnt -= 1 # - typedescr.attach(space, py_obj, w_obj) + typedescr.attach(space, py_obj, w_obj, w_userdata) return py_obj def track_reference(space, py_obj, w_obj): @@ -228,7 +228,7 @@ assert isinstance(w_type, W_TypeObject) return get_typedescr(w_type.layout.typedef).realize(space, ref) -def as_pyobj(space, w_obj): +def as_pyobj(space, w_obj, w_userdata=None): """ Returns a 'PyObject *' representing the given intepreter object. This doesn't give a new reference, but the returned 'PyObject *' @@ -240,7 +240,7 @@ assert not is_pyobj(w_obj) py_obj = rawrefcount.from_obj(PyObject, w_obj) if not py_obj: - py_obj = create_ref(space, w_obj) + py_obj = create_ref(space, w_obj, w_userdata) return py_obj else: return lltype.nullptr(PyObject.TO) @@ -269,14 +269,14 @@ return hop.inputconst(lltype.Bool, hop.s_result.const) @specialize.ll() -def make_ref(space, obj): +def make_ref(space, obj, w_userdata=None): """Increment the reference counter of the PyObject and return it. Can be called with either a PyObject or a W_Root. """ if is_pyobj(obj): pyobj = rffi.cast(PyObject, obj) else: - pyobj = as_pyobj(space, obj) + pyobj = as_pyobj(space, obj, w_userdata) if pyobj: assert pyobj.c_ob_refcnt > 0 pyobj.c_ob_refcnt += 1 diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -28,7 +28,7 @@ dealloc=traceback_dealloc) -def traceback_attach(space, py_obj, w_obj): +def traceback_attach(space, py_obj, w_obj, w_userdata=None): py_traceback = rffi.cast(PyTracebackObject, py_obj) traceback = space.interp_w(PyTraceback, w_obj) if traceback.next is None: diff --git a/pypy/module/cpyext/sliceobject.py b/pypy/module/cpyext/sliceobject.py --- a/pypy/module/cpyext/sliceobject.py +++ b/pypy/module/cpyext/sliceobject.py @@ -25,7 +25,7 @@ attach=slice_attach, dealloc=slice_dealloc) -def slice_attach(space, py_obj, w_obj): +def slice_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PySliceObject with the given slice object. The fields must not be modified. diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP +from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP, PyTypeObjectPtr from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -181,6 +181,27 @@ raises(OperationError, space.call_method, w_proxy, 'clear') assert api.PyDictProxy_Check(w_proxy) + def test_typedict1(self, space, api): + py_type = make_ref(space, space.w_int) + py_dict = rffi.cast(PyTypeObjectPtr, py_type).c_tp_dict + ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') + + ppos[0] = 0 + pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw') + try: + w_copy = space.newdict() + while api.PyDict_Next(py_dict, ppos, pkey, pvalue): + w_key = from_ref(space, pkey[0]) + w_value = from_ref(space, pvalue[0]) + space.setitem(w_copy, w_key, w_value) + finally: + lltype.free(ppos, flavor='raw') + lltype.free(pkey, flavor='raw') + lltype.free(pvalue, flavor='raw') + api.Py_DecRef(py_type) # release borrowed references + # do something with w_copy ? + class AppTestDictObject(AppTestCpythonExtensionBase): def test_dictproxytype(self): module = self.import_extension('foo', [ @@ -225,3 +246,16 @@ d = {"a": 1} raises(AttributeError, module.update, d, [("c", 2)]) + def test_typedict2(self): + module = self.import_extension('foo', [ + ("get_type_dict", "METH_O", + ''' + PyObject* value = args->ob_type->tp_dict; + if (value == NULL) value = Py_None; + Py_INCREF(value); + return value; + '''), + ]) + d = module.get_type_dict(1) + assert d['real'].__get__(1, 1) == 1 + diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py --- a/pypy/module/cpyext/test/test_import.py +++ b/pypy/module/cpyext/test/test_import.py @@ -37,6 +37,14 @@ stat = api.PyImport_ReloadModule(stat) assert space.getattr(stat, space.wrap("S_IMODE")) + def test_lock(self, space, api): + # "does not crash" + api._PyImport_AcquireLock() + api._PyImport_AcquireLock() + api._PyImport_ReleaseLock() + api._PyImport_ReleaseLock() + + class AppTestImportLogic(AppTestCpythonExtensionBase): def test_import_logic(self): import sys, os diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -63,7 +63,7 @@ p[i] = lltype.nullptr(PyObject.TO) return py_obj -def tuple_attach(space, py_obj, w_obj): +def tuple_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyTupleObject with the given tuple object. The buffer must not be modified. diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -32,7 +32,7 @@ from pypy.module.cpyext.state import State from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne from pypy.module.cpyext.typeobjectdefs import ( - PyGetSetDef, PyMemberDef, newfunc, + PyGetSetDef, PyMemberDef, newfunc, getter, setter, PyNumberMethods, PyMappingMethods, PySequenceMethods, PyBufferProcs) from pypy.objspace.std.typeobject import W_TypeObject, find_best_base @@ -61,6 +61,7 @@ self.w_type = w_type doc = set = get = None if doc: + # XXX dead code? doc = rffi.charp2str(getset.c_doc) if getset.c_get: get = GettersAndSetters.getter.im_func @@ -73,6 +74,21 @@ def PyDescr_NewGetSet(space, getset, w_type): return W_GetSetPropertyEx(getset, w_type) +def make_GetSet(space, getsetprop): + py_getsetdef = lltype.malloc(PyGetSetDef, flavor='raw') + doc = getsetprop.doc + if doc: + py_getsetdef.c_doc = rffi.str2charp(doc) + else: + py_getsetdef.c_doc = rffi.cast(rffi.CCHARP, 0) + py_getsetdef.c_name = rffi.str2charp(getsetprop.getname(space)) + # XXX FIXME - actually assign these !!! + py_getsetdef.c_get = rffi.cast(getter, 0) + py_getsetdef.c_set = rffi.cast(setter, 0) + py_getsetdef.c_closure = rffi.cast(rffi.VOIDP, 0) + return py_getsetdef + + class W_MemberDescr(GetSetProperty): name = 'member_descriptor' def __init__(self, member, w_type): @@ -160,7 +176,7 @@ realize=methoddescr_realize, ) -def memberdescr_attach(space, py_obj, w_obj): +def memberdescr_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyMemberDescrObject with the given W_MemberDescr object. The values must not be modified. @@ -179,17 +195,21 @@ track_reference(space, obj, w_obj) return w_obj -def getsetdescr_attach(space, py_obj, w_obj): +def getsetdescr_attach(space, py_obj, w_obj, w_userdata=None): """ Fills a newly allocated PyGetSetDescrObject with the given W_GetSetPropertyEx object. The values must not be modified. """ py_getsetdescr = rffi.cast(PyGetSetDescrObject, py_obj) + if isinstance(w_obj, GetSetProperty): + py_getsetdef = make_GetSet(space, w_obj) + assert space.isinstance_w(w_userdata, space.w_type) + w_obj = W_GetSetPropertyEx(py_getsetdef, w_userdata) # XXX assign to d_dname, d_type? assert isinstance(w_obj, W_GetSetPropertyEx) py_getsetdescr.c_d_getset = w_obj.getset -def methoddescr_attach(space, py_obj, w_obj): +def methoddescr_attach(space, py_obj, w_obj, w_userdata=None): py_methoddescr = rffi.cast(PyMethodDescrObject, py_obj) # XXX assign to d_dname, d_type? assert isinstance(w_obj, W_PyCFunctionObject) @@ -665,7 +685,7 @@ return rffi.cast(PyObject, heaptype) -def type_attach(space, py_obj, w_type): +def type_attach(space, py_obj, w_type, w_userdata=None): """ Fills a newly allocated PyTypeObject from an existing type. """ @@ -892,7 +912,9 @@ if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) - pto.c_tp_dict = make_ref(space, w_dict) + # pass in the w_obj to convert any values that are + # unbound GetSetProperty into bound PyGetSetDescrObject + pto.c_tp_dict = make_ref(space, w_dict, w_obj) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) def PyType_IsSubtype(space, a, b): diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -62,7 +62,7 @@ py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni -def unicode_attach(space, py_obj, w_obj): +def unicode_attach(space, py_obj, w_obj, w_userdata=None): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_length = len(space.unicode_w(w_obj)) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -66,7 +66,7 @@ w_SystemExit = W_TypeObject("SystemExit") w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_VisibleDeprecationWarning = W_TypeObject("VisibleDeprecationWarning") - w_None = None + w_None = W_Root() w_bool = W_TypeObject("bool") w_int = W_TypeObject("int") diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -80,14 +80,14 @@ if w_value is None: w_value = space.getattr(w_obj, w_name) f.pushvalue(w_value) - f.pushvalue(None) + f.pushvalue_none() @jit.unroll_safe def CALL_METHOD(f, oparg, *ignored): # opargs contains the arg, and kwarg count, excluding the implicit 'self' n_args = oparg & 0xff n_kwargs = (oparg >> 8) & 0xff - w_self = f.peekvalue(n_args + (2 * n_kwargs)) + w_self = f.peekvalue_maybe_none(n_args + (2 * n_kwargs)) n = n_args + (w_self is not None) if not n_kwargs: @@ -115,7 +115,7 @@ arguments, keywords, keywords_w, None, None, methodcall=w_self is not None) if w_self is None: - f.popvalue() # removes w_self, which is None + f.popvalue_maybe_none() # removes w_self, which is None w_callable = f.popvalue() if f.get_is_being_profiled() and function.is_builtin_code(w_callable): w_result = f.space.call_args_and_c_profile(f, w_callable, args) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -231,6 +231,12 @@ v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, s_ImpossibleValue) + v = graph.exceptblock.inputargs[1] + if v.annotation is not None and v.annotation.can_be_none(): + raise annmodel.AnnotatorError( + "%r is found by annotation to possibly raise None, " + "but the None was not suppressed by the flow space" % + (graph,)) def validate(self): """Check that the annotation results are valid""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -484,6 +484,9 @@ def __init__(self, classdefs): self.classdefs = classdefs + def can_be_none(self): + return False + def as_SomeInstance(self): return unionof(*[SomeInstance(cdef) for cdef in self.classdefs]) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4652,6 +4652,17 @@ assert ('string formatting requires a constant string/unicode' in str(e.value)) + def test_cannot_raise_none(self): + def f(x): + s = None + if x > 5: + s = ValueError() + raise s + a = self.RPythonAnnotator() + a.build_types(f, [int]) + s_exc = a.binding(graphof(a, f).exceptblock.inputargs[1]) + assert not s_exc.can_be_none() + def g(n): return [0, 1, 2, n] diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -597,6 +597,9 @@ Returns an FSException object whose w_value is an instance of w_type. """ + from rpython.rlib.debug import ll_assert_not_none + + check_not_none = False w_is_type = op.isinstance(w_arg1, const(type)).eval(self) if self.guessbool(w_is_type): # this is for all cases of the form (Class, something) @@ -608,6 +611,7 @@ if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)): # raise Type, Instance: let etype be the exact type of value w_value = w_arg2 + check_not_none = True else: # raise Type, X: assume X is the constructor argument w_value = op.simple_call(w_arg1, w_arg2).eval(self) @@ -618,6 +622,10 @@ "separate value") raise Raise(const(exc)) w_value = w_arg1 + check_not_none = True + if check_not_none: + w_value = op.simple_call(const(ll_assert_not_none), + w_value).eval(self) w_type = op.type(w_value).eval(self) return FSException(w_type, w_value) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -283,6 +283,12 @@ def rewrite_op_jit_record_exact_class(self, op): return SpaceOperation("record_exact_class", [op.args[0], op.args[1]], None) + def rewrite_op_debug_assert_not_none(self, op): + if isinstance(op.args[0], Variable): + return SpaceOperation('assert_not_none', [op.args[0]], None) + else: + return [] + def rewrite_op_cast_bool_to_int(self, op): pass def rewrite_op_cast_bool_to_uint(self, op): pass def rewrite_op_cast_char_to_int(self, op): pass diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -402,7 +402,7 @@ self.encoding_test(f, [65], """ raise $<* struct object> - """) + """, transform=True) def test_exc_raise_2(self): def g(i): @@ -466,6 +466,14 @@ int_return $True """, transform=True) + def test_assert_disappears(self): + def f(i): + assert i > 5 + return i + self.encoding_test(f, [7], """ + int_return %i0 + """) + def test_int_floordiv_ovf_zer(self): def f(i, j): assert i >= 0 diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -563,6 +563,10 @@ ll_assert((i & 1) == 1, "bhimpl_cast_int_to_ptr: not an odd int") return lltype.cast_int_to_ptr(llmemory.GCREF, i) + @arguments("r") + def bhimpl_assert_not_none(a): + assert a + @arguments("r", "i") def bhimpl_record_exact_class(a, b): pass diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -5,6 +5,7 @@ from rpython.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize +from rpython.rlib.debug import fatalerror from rpython.jit.metainterp.history import check_descr from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr @@ -321,6 +322,10 @@ def do_keepalive(cpu, _, x): pass +def do_assert_not_none(cpu, _, box): + if not box.getref_base(): + fatalerror("found during JITting: ll_assert_not_none() failed") + # ____________________________________________________________ diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -230,7 +230,8 @@ opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and - opnum != rop.INSTANCE_PTR_NE): + opnum != rop.INSTANCE_PTR_NE and + opnum != rop.ASSERT_NOT_NONE): for box in argboxes: self._escape_box(box) @@ -263,7 +264,8 @@ opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or - opnum == rop.RAW_STORE): + opnum == rop.RAW_STORE or + opnum == rop.ASSERT_NOT_NONE): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or @@ -371,7 +373,7 @@ def class_now_known(self, box): if isinstance(box, Const): return - self._set_flag(box, HF_KNOWN_CLASS) + self._set_flag(box, HF_KNOWN_CLASS | HF_KNOWN_NULLITY) def is_nullity_known(self, box): if isinstance(box, Const): @@ -401,7 +403,8 @@ def new(self, box): assert isinstance(box, RefFrontendOp) self.update_version(box) - add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED) + add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED + | HF_KNOWN_NULLITY) def new_array(self, box, lengthbox): self.new(box) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -499,6 +499,9 @@ box = self.get_box_replacement(op.getarg(0)) self.make_constant(box, CONST_0) + def optimize_ASSERT_NOT_NONE(self, op): + self.make_nonnull(op.getarg(0)) + def optimize_RECORD_EXACT_CLASS(self, op): opinfo = self.getptrinfo(op.getarg(0)) expectedclassbox = op.getarg(1) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -42,6 +42,9 @@ # but it's a bit hard to implement robustly if heap.py is also run pass + def optimize_ASSERT_NOT_NONE(self, op): + pass + def optimize_RECORD_EXACT_CLASS(self, op): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5595,6 +5595,19 @@ """ self.optimize_loop(ops, expected) + def test_assert_not_none(self): + ops = """ + [p0] + assert_not_none(p0) + guard_nonnull(p0) [] + finish() + """ + expected = """ + [p0] + finish() + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -275,12 +275,18 @@ def opimpl_ptr_iszero(self, box): return self.execute(rop.PTR_EQ, box, history.CONST_NULL) + @arguments("box") + def opimpl_assert_not_none(self, box): + if self.metainterp.heapcache.is_nullity_known(box): + return + self.execute(rop.ASSERT_NOT_NONE, box) + self.metainterp.heapcache.nullity_now_known(box) + @arguments("box", "box") def opimpl_record_exact_class(self, box, clsbox): from rpython.rtyper.lltypesystem import llmemory if self.metainterp.heapcache.is_class_known(box): return - adr = clsbox.getaddr() self.execute(rop.RECORD_EXACT_CLASS, box, clsbox) self.metainterp.heapcache.class_now_known(box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1143,6 +1143,7 @@ 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5/n', 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr + 'ASSERT_NOT_NONE/1/n', # [objptr] 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', 'SAVE_EXCEPTION/0/r', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3510,6 +3510,7 @@ self.check_resops(call_f=1) def test_look_inside_iff_virtual(self): + from rpython.rlib.debug import ll_assert_not_none # There's no good reason for this to be look_inside_iff, but it's a test! @look_inside_iff(lambda arg, n: isvirtual(arg)) def f(arg, n): @@ -3529,7 +3530,7 @@ if n == 0: i += f(a, n) else: - i += f(A(2), n) + i += f(ll_assert_not_none(A(2)), n) res = self.meta_interp(main, [0], enable_opts='') assert res == main(0) self.check_resops(call_i=1, getfield_gc_i=0) @@ -4585,3 +4586,30 @@ assert res == -42 res = self.interp_operations(f, [0, 200]) assert res == 205 + + def test_ll_assert_not_none(self): + # the presence of ll_assert_not_none(), even in cases where it + # doesn't influence the annotation, is a hint for the JIT + from rpython.rlib.debug import ll_assert_not_none + class X: + pass + class Y(X): + pass + def g(x, check): + if check: + x = ll_assert_not_none(x) + return isinstance(x, Y) + @dont_look_inside + def make(i): + if i == 1: + return X() + if i == 2: + return Y() + return None + def f(a, b, check): + return g(make(a), check) + g(make(b), check) * 10 + res = self.interp_operations(f, [1, 2, 1]) + assert res == 10 + self.check_operations_history(guard_nonnull=0, guard_nonnull_class=0, + guard_class=2, + assert_not_none=2) # before optimization diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -83,6 +83,19 @@ assert not h.is_nullity_known(box1) assert not h.is_nullity_known(box2) + def test_known_nullity_more_cases(self): + h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + h.class_now_known(box1) + assert h.is_nullity_known(box1) + + h.new(box2) + assert h.is_nullity_known(box2) + + h.reset() + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) def test_nonstandard_virtualizable(self): h = HeapCache() diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -11,7 +11,8 @@ # Expose these here (public interface) from rpython.rtyper.debug import ( - ll_assert, FatalError, fatalerror, fatalerror_notb, debug_print_traceback) + ll_assert, FatalError, fatalerror, fatalerror_notb, debug_print_traceback, + ll_assert_not_none) class DebugLog(list): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1141,6 +1141,9 @@ """ Assure the JIT that value is an instance of cls. This is a precise class check, like a guard_class. + + See also debug.ll_assert_not_none(x), which asserts that x is not None + and also assures the JIT that it is the case. """ assert type(value) is cls diff --git a/rpython/rtyper/debug.py b/rpython/rtyper/debug.py --- a/rpython/rtyper/debug.py +++ b/rpython/rtyper/debug.py @@ -20,6 +20,23 @@ hop.exception_cannot_occur() hop.genop('debug_assert', vlist) +def ll_assert_not_none(x): + """assert x is not None""" + assert x is not None, "ll_assert_not_none(%r)" % (x,) + return x + +class Entry(ExtRegistryEntry): + _about_ = ll_assert_not_none + + def compute_result_annotation(self, s_x): + return s_x.nonnoneify() + + def specialize_call(self, hop): + [v0] = hop.inputargs(hop.args_r[0]) + hop.exception_cannot_occur() + hop.genop('debug_assert_not_none', [v0]) + return v0 + class FatalError(Exception): pass diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -521,6 +521,10 @@ if not x: raise LLAssertFailure(msg) + def op_debug_assert_not_none(self, x): + if not x: + raise LLAssertFailure("ll_assert_not_none() failed") + def op_debug_fatalerror(self, ll_msg, ll_exc=None): msg = ''.join(ll_msg.chars) if ll_exc is None: diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -78,7 +78,8 @@ def is_pure(self, args_v): if self.canfold: # canfold => pure operation return True - if self is llop.debug_assert: # debug_assert is pure enough + if (self is llop.debug_assert or # debug_assert is pure enough + self is llop.debug_assert_not_none): return True # reading from immutable if self is llop.getfield or self is llop.getarrayitem: @@ -552,6 +553,7 @@ 'debug_offset': LLOp(canrun=True), 'debug_flush': LLOp(canrun=True), 'debug_assert': LLOp(tryfold=True), + 'debug_assert_not_none': LLOp(tryfold=True), 'debug_fatalerror': LLOp(canrun=True), 'debug_llinterpcall': LLOp(canraise=(Exception,)), # Python func call 'res=arg[0](*arg[1:])' diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py deleted file mode 100644 --- a/rpython/translator/backendopt/mallocv.py +++ /dev/null @@ -1,1055 +0,0 @@ -from rpython.flowspace.model import Variable, Constant, Block, Link -from rpython.flowspace.model import SpaceOperation, copygraph -from rpython.flowspace.model import checkgraph -from rpython.translator.backendopt.support import log -from rpython.translator.simplify import join_blocks -from rpython.translator.unsimplify import varoftype -from rpython.rtyper.lltypesystem.lltype import getfunctionptr -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lloperation import llop - - -def virtualize_mallocs(translator, graphs, verbose=False): - newgraphs = graphs[:] - mallocv = MallocVirtualizer(newgraphs, translator.rtyper, verbose) - while mallocv.remove_mallocs_once(): - pass - for graph in newgraphs: - checkgraph(graph) - join_blocks(graph) - assert newgraphs[:len(graphs)] == graphs - del newgraphs[:len(graphs)] - translator.graphs.extend(newgraphs) - -# ____________________________________________________________ - - -class MallocTypeDesc(object): - - def __init__(self, MALLOCTYPE): - if not isinstance(MALLOCTYPE, lltype.GcStruct): - raise CannotRemoveThisType - self.MALLOCTYPE = MALLOCTYPE - self.check_no_destructor() - self.names_and_types = [] - self.name2index = {} - self.name2subtype = {} - self.initialize_type(MALLOCTYPE) - #self.immutable_struct = MALLOCTYPE._hints.get('immutable') - - def check_no_destructor(self): - STRUCT = self.MALLOCTYPE - try: - rttiptr = lltype.getRuntimeTypeInfo(STRUCT) - except ValueError: - return # ok - destr_ptr = getattr(rttiptr._obj, 'destructor_funcptr', None) - if destr_ptr: - raise CannotRemoveThisType - - def initialize_type(self, TYPE): - fieldnames = TYPE._names - firstname, FIRSTTYPE = TYPE._first_struct() - if FIRSTTYPE is not None: - self.initialize_type(FIRSTTYPE) - fieldnames = fieldnames[1:] - for name in fieldnames: - FIELDTYPE = TYPE._flds[name] - if isinstance(FIELDTYPE, lltype.ContainerType): - raise CannotRemoveThisType("inlined substructure") - self.name2index[name] = len(self.names_and_types) - self.names_and_types.append((name, FIELDTYPE)) - self.name2subtype[name] = TYPE - - -class SpecNode(object): - pass - - -class RuntimeSpecNode(SpecNode): - - def __init__(self, name, TYPE): - self.name = name - self.TYPE = TYPE - - def newvar(self): - v = Variable(self.name) - v.concretetype = self.TYPE - return v - - def getfrozenkey(self, memo): - return 'R' - - def accumulate_nodes(self, rtnodes, vtnodes): - rtnodes.append(self) - - def copy(self, memo, flagreadonly): - return RuntimeSpecNode(self.name, self.TYPE) - - def bind_rt_nodes(self, memo, newnodes_iter): - return newnodes_iter.next() - - -class VirtualSpecNode(SpecNode): - - def __init__(self, typedesc, fields, readonly=False): - self.typedesc = typedesc - self.fields = fields # list of SpecNodes - self.readonly = readonly - - def getfrozenkey(self, memo): - if self in memo: - return memo[self] - else: - memo[self] = len(memo) - result = [self.typedesc, self.readonly] - for subnode in self.fields: - result.append(subnode.getfrozenkey(memo)) - return tuple(result) - - def accumulate_nodes(self, rtnodes, vtnodes): - if self in vtnodes: - return - vtnodes[self] = True - for subnode in self.fields: - subnode.accumulate_nodes(rtnodes, vtnodes) - - def copy(self, memo, flagreadonly): - if self in memo: - return memo[self] - readonly = self.readonly or self in flagreadonly - newnode = VirtualSpecNode(self.typedesc, [], readonly) - memo[self] = newnode - for subnode in self.fields: - newnode.fields.append(subnode.copy(memo, flagreadonly)) - return newnode - - def bind_rt_nodes(self, memo, newnodes_iter): - if self in memo: - return memo[self] - newnode = VirtualSpecNode(self.typedesc, [], self.readonly) - memo[self] = newnode - for subnode in self.fields: - newnode.fields.append(subnode.bind_rt_nodes(memo, newnodes_iter)) - return newnode - - -class VirtualFrame(object): - - def __init__(self, sourceblock, nextopindex, - allnodes, callerframe=None, calledgraphs={}): - if isinstance(allnodes, dict): - self.varlist = vars_alive_through_op(sourceblock, nextopindex) - self.nodelist = [allnodes[v] for v in self.varlist] - else: - assert nextopindex == 0 - self.varlist = sourceblock.inputargs - self.nodelist = allnodes[:] - self.sourceblock = sourceblock - self.nextopindex = nextopindex - self.callerframe = callerframe - self.calledgraphs = calledgraphs - - def get_nodes_in_use(self): - return dict(zip(self.varlist, self.nodelist)) - - def shallowcopy(self): - newframe = VirtualFrame.__new__(VirtualFrame) - newframe.varlist = self.varlist - newframe.nodelist = self.nodelist - newframe.sourceblock = self.sourceblock - newframe.nextopindex = self.nextopindex - newframe.callerframe = self.callerframe - newframe.calledgraphs = self.calledgraphs - return newframe - - def copy(self, memo, flagreadonly={}): - newframe = self.shallowcopy() - newframe.nodelist = [node.copy(memo, flagreadonly) - for node in newframe.nodelist] - if newframe.callerframe is not None: - newframe.callerframe = newframe.callerframe.copy(memo, - flagreadonly) - return newframe - - def enum_call_stack(self): - frame = self - while frame is not None: - yield frame - frame = frame.callerframe - - def getfrozenkey(self): - memo = {} - key = [] - for frame in self.enum_call_stack(): - key.append(frame.sourceblock) - key.append(frame.nextopindex) - for node in frame.nodelist: - key.append(node.getfrozenkey(memo)) - return tuple(key) - - def find_all_nodes(self): - rtnodes = [] - vtnodes = {} - for frame in self.enum_call_stack(): - for node in frame.nodelist: - node.accumulate_nodes(rtnodes, vtnodes) - return rtnodes, vtnodes - - def find_rt_nodes(self): - rtnodes, vtnodes = self.find_all_nodes() - return rtnodes - - def find_vt_nodes(self): - rtnodes, vtnodes = self.find_all_nodes() - return vtnodes - - -def copynodes(nodelist, flagreadonly={}): - memo = {} - return [node.copy(memo, flagreadonly) for node in nodelist] - -def find_all_nodes(nodelist): - rtnodes = [] - vtnodes = {} - for node in nodelist: - node.accumulate_nodes(rtnodes, vtnodes) - return rtnodes, vtnodes - -def is_trivial_nodelist(nodelist): - for node in nodelist: - if not isinstance(node, RuntimeSpecNode): - return False - return True - -def bind_rt_nodes(srcnodelist, newnodes_list): - """Return srcnodelist with all RuntimeNodes replaced by nodes - coming from newnodes_list. - """ - memo = {} - newnodes_iter = iter(newnodes_list) - result = [node.bind_rt_nodes(memo, newnodes_iter) for node in srcnodelist] - rest = list(newnodes_iter) - assert rest == [], "too many nodes in newnodes_list" - return result - - -class CannotVirtualize(Exception): - pass - -class ForcedInline(Exception): - pass - -class CannotRemoveThisType(Exception): - pass - -# ____________________________________________________________ - - -class MallocVirtualizer(object): - - def __init__(self, graphs, rtyper, verbose=False): - self.graphs = graphs - self.rtyper = rtyper - self.excdata = rtyper.exceptiondata - self.graphbuilders = {} - self.specialized_graphs = {} - self.specgraphorigin = {} - self.inline_and_remove = {} # {graph: op_to_remove} - self.inline_and_remove_seen = {} # set of (graph, op_to_remove) - self.malloctypedescs = {} - self.count_virtualized = 0 - self.verbose = verbose - self.EXCTYPE_to_vtable = self.build_obscure_mapping() - - def build_obscure_mapping(self): - result = {} - for rinstance in self.rtyper.instance_reprs.values(): - result[rinstance.lowleveltype.TO] = rinstance.rclass.getvtable() - return result - - def report_result(self, progress): - if progress: - log.mallocv('removed %d mallocs so far' % self.count_virtualized) - else: - log.mallocv('done') - - def enum_all_mallocs(self, graph): - for block in graph.iterblocks(): - for op in block.operations: - if op.opname == 'malloc': - MALLOCTYPE = op.result.concretetype.TO - try: - self.getmalloctypedesc(MALLOCTYPE) - except CannotRemoveThisType: - pass - else: - yield (block, op) - elif op.opname == 'direct_call': - graph = graph_called_by(op) - if graph in self.inline_and_remove: - yield (block, op) - - def remove_mallocs_once(self): - self.flush_failed_specializations() - prev = self.count_virtualized - count_inline_and_remove = len(self.inline_and_remove) - for graph in self.graphs: - seen = {} - while True: - for block, op in self.enum_all_mallocs(graph): - if op.result not in seen: - seen[op.result] = True - if self.try_remove_malloc(graph, block, op): - break # graph mutated, restart enum_all_mallocs() - else: - break # enum_all_mallocs() exhausted, graph finished - progress1 = self.count_virtualized - prev - progress2 = len(self.inline_and_remove) - count_inline_and_remove - progress = progress1 or bool(progress2) - self.report_result(progress) - return progress - - def flush_failed_specializations(self): - for key, (mode, specgraph) in self.specialized_graphs.items(): - if mode == 'fail': - del self.specialized_graphs[key] - - def fixup_except_block(self, exceptblock): - # hack: this block's inputargs may be missing concretetypes... - e1, v1 = exceptblock.inputargs - e1.concretetype = self.excdata.lltype_of_exception_type - v1.concretetype = self.excdata.lltype_of_exception_value - - def getmalloctypedesc(self, MALLOCTYPE): - try: - dsc = self.malloctypedescs[MALLOCTYPE] - except KeyError: - dsc = self.malloctypedescs[MALLOCTYPE] = MallocTypeDesc(MALLOCTYPE) - return dsc - - def try_remove_malloc(self, graph, block, op): - if (graph, op) in self.inline_and_remove_seen: - return False # no point in trying again - graphbuilder = GraphBuilder(self, graph) - if graph in self.graphbuilders: - graphbuilder.initialize_from_old_builder(self.graphbuilders[graph]) - graphbuilder.start_from_a_malloc(graph, block, op.result) - try: - graphbuilder.propagate_specializations() - except CannotVirtualize as e: - self.logresult(op, 'failed', e) - return False - except ForcedInline as e: - self.logresult(op, 'forces inlining', e) - self.inline_and_remove[graph] = op - self.inline_and_remove_seen[graph, op] = True - return False - else: - self.logresult(op, 'removed') - graphbuilder.finished_removing_malloc() - self.graphbuilders[graph] = graphbuilder - self.count_virtualized += 1 - return True - - def logresult(self, op, msg, exc=None): # only for nice log outputs - if self.verbose: - if exc is None: - exc = '' - else: - exc = ': %s' % (exc,) - chain = [] - while True: - chain.append(str(op.result)) - if op.opname != 'direct_call': - break - fobj = op.args[0].value._obj - op = self.inline_and_remove[fobj.graph] - log.mallocv('%s %s%s' % ('->'.join(chain), msg, exc)) - elif exc is None: - log.dot() - - def get_specialized_graph(self, graph, nodelist): - assert len(graph.getargs()) == len(nodelist) - if is_trivial_nodelist(nodelist): - return 'trivial', graph - if graph in self.specgraphorigin: - orggraph, orgnodelist = self.specgraphorigin[graph] - nodelist = bind_rt_nodes(orgnodelist, nodelist) - graph = orggraph - virtualframe = VirtualFrame(graph.startblock, 0, nodelist) - key = virtualframe.getfrozenkey() - try: - return self.specialized_graphs[key] - except KeyError: - self.build_specialized_graph(graph, key, nodelist) - return self.specialized_graphs[key] - - def build_specialized_graph(self, graph, key, nodelist): - graph2 = copygraph(graph) - virtualframe = VirtualFrame(graph2.startblock, 0, nodelist) - graphbuilder = GraphBuilder(self, graph2) - specblock = graphbuilder.start_from_virtualframe(virtualframe) - specgraph = graph2 - specgraph.name += '_mallocv' - specgraph.startblock = specblock - self.specialized_graphs[key] = ('call', specgraph) - try: - graphbuilder.propagate_specializations() - except ForcedInline as e: - if self.verbose: - log.mallocv('%s inlined: %s' % (graph.name, e)) - self.specialized_graphs[key] = ('inline', None) - except CannotVirtualize as e: - if self.verbose: - log.mallocv('%s failing: %s' % (graph.name, e)) - self.specialized_graphs[key] = ('fail', None) - else: - self.graphbuilders[specgraph] = graphbuilder - self.specgraphorigin[specgraph] = graph, nodelist - self.graphs.append(specgraph) - - -class GraphBuilder(object): - - def __init__(self, mallocv, graph): - self.mallocv = mallocv - self.graph = graph - self.specialized_blocks = {} - self.pending_specializations = [] - - def initialize_from_old_builder(self, oldbuilder): - self.specialized_blocks.update(oldbuilder.specialized_blocks) - - def start_from_virtualframe(self, startframe): - spec = BlockSpecializer(self) - spec.initialize_renamings(startframe) - self.pending_specializations.append(spec) - return spec.specblock - - def start_from_a_malloc(self, graph, block, v_result): - assert v_result in [op.result for op in block.operations] - nodelist = [] - for v in block.inputargs: - nodelist.append(RuntimeSpecNode(v, v.concretetype)) - trivialframe = VirtualFrame(block, 0, nodelist) - spec = BlockSpecializer(self, v_result) - spec.initialize_renamings(trivialframe, keep_inputargs=True) - self.pending_specializations.append(spec) - self.pending_patch = (block, spec.specblock) - - def finished_removing_malloc(self): - (srcblock, specblock) = self.pending_patch - srcblock.inputargs = specblock.inputargs - srcblock.operations = specblock.operations - srcblock.exitswitch = specblock.exitswitch - srcblock.recloseblock(*specblock.exits) - - def create_outgoing_link(self, currentframe, targetblock, - nodelist, renamings, v_expand_malloc=None): - assert len(nodelist) == len(targetblock.inputargs) - # - if is_except(targetblock): - v_expand_malloc = None - while currentframe.callerframe is not None: - currentframe = currentframe.callerframe - newlink = self.handle_catch(currentframe, nodelist, renamings) - if newlink: - return newlink - else: - targetblock = self.exception_escapes(nodelist, renamings) - assert len(nodelist) == len(targetblock.inputargs) - - if (currentframe.callerframe is None and - is_trivial_nodelist(nodelist)): - # there is no more VirtualSpecNodes being passed around, - # so we can stop specializing - rtnodes = nodelist - specblock = targetblock - else: - if is_return(targetblock): - v_expand_malloc = None - newframe = self.return_to_caller(currentframe, nodelist[0]) - else: - targetnodes = dict(zip(targetblock.inputargs, nodelist)) - newframe = VirtualFrame(targetblock, 0, targetnodes, - callerframe=currentframe.callerframe, - calledgraphs=currentframe.calledgraphs) - rtnodes = newframe.find_rt_nodes() - specblock = self.get_specialized_block(newframe, v_expand_malloc) - - linkargs = [renamings[rtnode] for rtnode in rtnodes] - return Link(linkargs, specblock) - - def return_to_caller(self, currentframe, retnode): - callerframe = currentframe.callerframe - if callerframe is None: - raise ForcedInline("return block") - nodelist = callerframe.nodelist - callerframe = callerframe.shallowcopy() - callerframe.nodelist = [] - for node in nodelist: - if isinstance(node, FutureReturnValue): - node = retnode - callerframe.nodelist.append(node) - return callerframe - - def handle_catch(self, catchingframe, nodelist, renamings): - if not self.has_exception_catching(catchingframe): - return None - [exc_node, exc_value_node] = nodelist - v_exc_type = renamings.get(exc_node) - if isinstance(v_exc_type, Constant): - exc_type = v_exc_type.value - elif isinstance(exc_value_node, VirtualSpecNode): - EXCTYPE = exc_value_node.typedesc.MALLOCTYPE - exc_type = self.mallocv.EXCTYPE_to_vtable[EXCTYPE] - else: - raise CannotVirtualize("raising non-constant exc type") - excdata = self.mallocv.excdata - assert catchingframe.sourceblock.exits[0].exitcase is None - for catchlink in catchingframe.sourceblock.exits[1:]: - if excdata.fn_exception_match(exc_type, catchlink.llexitcase): - # Match found. Follow this link. - mynodes = catchingframe.get_nodes_in_use() - for node, attr in zip(nodelist, - ['last_exception', 'last_exc_value']): - v = getattr(catchlink, attr) - if isinstance(v, Variable): - mynodes[v] = node - # - nodelist = [] - for v in catchlink.args: - if isinstance(v, Variable): - node = mynodes[v] - else: - node = getconstnode(v, renamings) - nodelist.append(node) - return self.create_outgoing_link(catchingframe, - catchlink.target, - nodelist, renamings) - else: - # No match at all, propagate the exception to the caller - return None - - def has_exception_catching(self, catchingframe): - if not catchingframe.sourceblock.canraise: - return False - else: - operations = catchingframe.sourceblock.operations - assert 1 <= catchingframe.nextopindex <= len(operations) - return catchingframe.nextopindex == len(operations) - - def exception_escapes(self, nodelist, renamings): - # the exception escapes - if not is_trivial_nodelist(nodelist): - # start of hacks to help handle_catch() - [exc_node, exc_value_node] = nodelist - v_exc_type = renamings.get(exc_node) - if isinstance(v_exc_type, Constant): - # cannot improve: handle_catch() would already be happy - # by seeing the exc_type as a constant - pass - elif isinstance(exc_value_node, VirtualSpecNode): - # can improve with a strange hack: we pretend that - # the source code jumps to a block that itself allocates - # the exception, sets all fields, and raises it by - # passing a constant type. - typedesc = exc_value_node.typedesc - return self.get_exc_reconstruction_block(typedesc) - else: - # cannot improve: handle_catch() will have no clue about - # the exception type - pass - raise CannotVirtualize("except block") - targetblock = self.graph.exceptblock - self.mallocv.fixup_except_block(targetblock) - return targetblock - - def get_exc_reconstruction_block(self, typedesc): - exceptblock = self.graph.exceptblock - self.mallocv.fixup_except_block(exceptblock) - TEXC = exceptblock.inputargs[0].concretetype - TVAL = exceptblock.inputargs[1].concretetype - # - v_ignored_type = varoftype(TEXC) - v_incoming_value = varoftype(TVAL) - block = Block([v_ignored_type, v_incoming_value]) - # - c_EXCTYPE = Constant(typedesc.MALLOCTYPE, lltype.Void) - v = varoftype(lltype.Ptr(typedesc.MALLOCTYPE)) - c_flavor = Constant({'flavor': 'gc'}, lltype.Void) - op = SpaceOperation('malloc', [c_EXCTYPE, c_flavor], v) - block.operations.append(op) - # - for name, FIELDTYPE in typedesc.names_and_types: - EXACTPTR = lltype.Ptr(typedesc.name2subtype[name]) - c_name = Constant(name) - c_name.concretetype = lltype.Void - # - v_in = varoftype(EXACTPTR) - op = SpaceOperation('cast_pointer', [v_incoming_value], v_in) - block.operations.append(op) - # - v_field = varoftype(FIELDTYPE) - op = SpaceOperation('getfield', [v_in, c_name], v_field) - block.operations.append(op) - # - v_out = varoftype(EXACTPTR) - op = SpaceOperation('cast_pointer', [v], v_out) - block.operations.append(op) - # - v0 = varoftype(lltype.Void) - op = SpaceOperation('setfield', [v_out, c_name, v_field], v0) - block.operations.append(op) - # - v_exc_value = varoftype(TVAL) - op = SpaceOperation('cast_pointer', [v], v_exc_value) - block.operations.append(op) - # - exc_type = self.mallocv.EXCTYPE_to_vtable[typedesc.MALLOCTYPE] - c_exc_type = Constant(exc_type, TEXC) - block.closeblock(Link([c_exc_type, v_exc_value], exceptblock)) - return block - - def get_specialized_block(self, virtualframe, v_expand_malloc=None): - key = virtualframe.getfrozenkey() - specblock = self.specialized_blocks.get(key) - if specblock is None: - orgblock = virtualframe.sourceblock - assert len(orgblock.exits) != 0 - spec = BlockSpecializer(self, v_expand_malloc) - spec.initialize_renamings(virtualframe) - self.pending_specializations.append(spec) - specblock = spec.specblock - self.specialized_blocks[key] = specblock - return specblock - - def propagate_specializations(self): - while self.pending_specializations: - spec = self.pending_specializations.pop() - spec.specialize_operations() - spec.follow_exits() - - -class BlockSpecializer(object): - - def __init__(self, graphbuilder, v_expand_malloc=None): - self.graphbuilder = graphbuilder - self.v_expand_malloc = v_expand_malloc - self.specblock = Block([]) - - def initialize_renamings(self, virtualframe, keep_inputargs=False): - # we make a copy of the original 'virtualframe' because the - # specialize_operations() will mutate some of its content. - virtualframe = virtualframe.copy({}) - self.virtualframe = virtualframe - self.nodes = virtualframe.get_nodes_in_use() - self.renamings = {} # {RuntimeSpecNode(): Variable()} - if keep_inputargs: - assert virtualframe.varlist == virtualframe.sourceblock.inputargs - specinputargs = [] - for i, rtnode in enumerate(virtualframe.find_rt_nodes()): - if keep_inputargs: - v = virtualframe.varlist[i] - assert v.concretetype == rtnode.TYPE - else: - v = rtnode.newvar() - self.renamings[rtnode] = v - specinputargs.append(v) - self.specblock.inputargs = specinputargs - - def setnode(self, v, node): - assert v not in self.nodes - self.nodes[v] = node - - def getnode(self, v): - if isinstance(v, Variable): - return self.nodes[v] From pypy.commits at gmail.com Mon Dec 19 09:42:46 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 06:42:46 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.13: Close branch, ready to merge Message-ID: <5857f1e6.2aa9c20a.32025.d8c6@mx.google.com> Author: Armin Rigo Branch: stdlib-2.7.13 Changeset: r89181:26449ba13c77 Date: 2016-12-19 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/26449ba13c77/ Log: Close branch, ready to merge From pypy.commits at gmail.com Mon Dec 19 09:42:49 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 06:42:49 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge stdlib-2.7.13 Message-ID: <5857f1e9.581d1c0a.853b9.65bc@mx.google.com> Author: Armin Rigo Branch: Changeset: r89182:85878999f8f4 Date: 2016-12-19 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/85878999f8f4/ Log: hg merge stdlib-2.7.13 Update to CPython 2.7.13 diff too long, truncating to 2000 out of 21893 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Mon Dec 19 09:42:51 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 06:42:51 -0800 (PST) Subject: [pypy-commit] pypy default: Document merge Message-ID: <5857f1eb.e337c20a.c90e9.de86@mx.google.com> Author: Armin Rigo Branch: Changeset: r89183:8fb31ee71d77 Date: 2016-12-19 15:42 +0100 http://bitbucket.org/pypy/pypy/changeset/8fb31ee71d77/ Log: Document merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -64,3 +64,7 @@ ``PyGetSetDescrObject``. The other direction seem to be fully implemented. This branch made a minimal effort to convert the basic fields to avoid segfaults, but trying to use the ``PyGetSetDescrObject`` will probably fail. + +.. branch: stdlib-2.7.13 + +Updated the implementation to match CPython 2.7.13 instead of 2.7.13. From pypy.commits at gmail.com Mon Dec 19 10:54:39 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 07:54:39 -0800 (PST) Subject: [pypy-commit] pypy default: Fix rzlib to support arbitrary large input strings (> 2**32 on 64-bit) Message-ID: <585802bf.c4811c0a.16ab2.87c0@mx.google.com> Author: Armin Rigo Branch: Changeset: r89184:10018e2abec8 Date: 2016-12-19 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/10018e2abec8/ Log: Fix rzlib to support arbitrary large input strings (> 2**32 on 64-bit) diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -76,6 +76,10 @@ DEF_MEM_LEVEL = MAX_MEM_LEVEL OUTPUT_BUFFER_SIZE = 32*1024 +INPUT_BUFFER_MAX = 2047*1024*1024 +# Note: we assume that zlib never outputs less than OUTPUT_BUFFER_SIZE +# from an input of INPUT_BUFFER_MAX bytes. This should be true by a +# large margin (I think zlib never compresses by more than ~1000x). class ComplexCConfig: @@ -366,10 +370,10 @@ """Common code for compress() and decompress(). """ # Prepare the input buffer for the stream - assert data is not None # XXX seems to be sane assumption, however not for sure + assert data is not None with rffi.scoped_nonmovingbuffer(data) as inbuf: stream.c_next_in = rffi.cast(Bytefp, inbuf) - rffi.setintfield(stream, 'c_avail_in', len(data)) + end_inbuf = rffi.ptradd(stream.c_next_in, len(data)) # Prepare the output buffer with lltype.scoped_alloc(rffi.CCHARP.TO, OUTPUT_BUFFER_SIZE) as outbuf: @@ -379,6 +383,11 @@ result = StringBuilder() while True: + avail_in = ptrdiff(end_inbuf, stream.c_next_in) + if avail_in > INPUT_BUFFER_MAX: + avail_in = INPUT_BUFFER_MAX + rffi.setintfield(stream, 'c_avail_in', avail_in) + stream.c_next_out = rffi.cast(Bytefp, outbuf) bufsize = OUTPUT_BUFFER_SIZE if max_length < bufsize: @@ -388,7 +397,9 @@ bufsize = max_length max_length -= bufsize rffi.setintfield(stream, 'c_avail_out', bufsize) + err = cfunc(stream, flush) + if err == Z_NEED_DICT and zdict is not None: inflateSetDictionary(stream, zdict) # repeat the call to inflate @@ -422,6 +433,9 @@ # When decompressing, if the compressed stream of data was truncated, # then the zlib simply returns Z_OK and waits for more. If it is # complete it returns Z_STREAM_END. - return (result.build(), - err, - rffi.cast(lltype.Signed, stream.c_avail_in)) + avail_in = ptrdiff(end_inbuf, stream.c_next_in) + return (result.build(), err, avail_in) + +def ptrdiff(p, q): + x = rffi.cast(lltype.Unsigned, p) - rffi.cast(lltype.Unsigned, q) + return rffi.cast(lltype.Signed, x) diff --git a/rpython/rlib/test/test_rzlib.py b/rpython/rlib/test/test_rzlib.py --- a/rpython/rlib/test/test_rzlib.py +++ b/rpython/rlib/test/test_rzlib.py @@ -3,7 +3,7 @@ Tests for the rzlib module. """ -import py +import py, sys from rpython.rlib import rzlib from rpython.rlib.rarithmetic import r_uint from rpython.rlib import clibffi # for side effect of testing lib_c_name on win32 @@ -274,3 +274,36 @@ def test_zlibVersion(): runtime_version = rzlib.zlibVersion() assert runtime_version[0] == rzlib.ZLIB_VERSION[0] + +def test_translate_and_large_input(): + from rpython.translator.c.test.test_genc import compile + + def f(i): + bytes = "s" * i + for j in range(3): + stream = rzlib.deflateInit() + bytes = rzlib.compress(stream, bytes, rzlib.Z_FINISH) + rzlib.deflateEnd(stream) + return bytes + + fc = compile(f, [int]) + + test_list = [1, 2, 3, 5, 8, 87, 876, 8765, 87654, 876543, 8765432, + 127329129] # up to ~128MB + if sys.maxint > 2**32: + test_list.append(2971215073) # 3GB (greater than INPUT_BUFFER_MAX) + for a in test_list: + print 'Testing compression of "s" * %d' % a + z = zlib.compressobj() + count = a + pieces = [] + while count > 1024*1024: + pieces.append(z.compress("s" * (1024*1024))) + count -= 1024*1024 + pieces.append(z.compress("s" * count)) + pieces.append(z.flush(zlib.Z_FINISH)) + expected = ''.join(pieces) + del pieces + expected = zlib.compress(expected) + expected = zlib.compress(expected) + assert fc(a) == expected From pypy.commits at gmail.com Mon Dec 19 11:13:24 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 08:13:24 -0800 (PST) Subject: [pypy-commit] pypy default: zlib.crc32() and zlib.adler32(): support >2**32 strings Message-ID: <58580724.c89cc20a.5a316.0a57@mx.google.com> Author: Armin Rigo Branch: Changeset: r89185:f5d06c5fc573 Date: 2016-12-19 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/f5d06c5fc573/ Log: zlib.crc32() and zlib.adler32(): support >2**32 strings diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -172,6 +172,18 @@ # ____________________________________________________________ +def _crc_or_adler(string, start, function): + with rffi.scoped_nonmovingbuffer(string) as bytes: + remaining = len(string) + checksum = start + ptr = rffi.cast(Bytefp, bytes) + while remaining > 0: + count = min(remaining, 32*1024*1024) + checksum = function(checksum, ptr, count) + ptr = rffi.ptradd(ptr, count) + remaining -= count + return checksum + CRC32_DEFAULT_START = 0 def crc32(string, start=CRC32_DEFAULT_START): @@ -179,13 +191,18 @@ Compute the CRC32 checksum of the string, possibly with the given start value, and return it as a unsigned 32 bit integer. """ - with rffi.scoped_nonmovingbuffer(string) as bytes: - checksum = _crc32(start, rffi.cast(Bytefp, bytes), len(string)) - return checksum - + return _crc_or_adler(string, start, _crc32) ADLER32_DEFAULT_START = 1 +def adler32(string, start=ADLER32_DEFAULT_START): + """ + Compute the Adler-32 checksum of the string, possibly with the given + start value, and return it as a unsigned 32 bit integer. + """ + return _crc_or_adler(string, start, _adler32) + + def deflateSetDictionary(stream, string): with rffi.scoped_nonmovingbuffer(string) as buf: err = _deflateSetDictionary(stream, rffi.cast(Bytefp, buf), len(string)) @@ -200,16 +217,6 @@ elif err == Z_DATA_ERROR: raise RZlibError("The given dictionary doesn't match the expected one") - -def adler32(string, start=ADLER32_DEFAULT_START): - """ - Compute the Adler-32 checksum of the string, possibly with the given - start value, and return it as a unsigned 32 bit integer. - """ - with rffi.scoped_nonmovingbuffer(string) as bytes: - checksum = _adler32(start, rffi.cast(Bytefp, bytes), len(string)) - return checksum - def zlibVersion(): """Return the runtime version of zlib library""" return rffi.charp2str(_zlibVersion()) diff --git a/rpython/rlib/test/test_rzlib.py b/rpython/rlib/test/test_rzlib.py --- a/rpython/rlib/test/test_rzlib.py +++ b/rpython/rlib/test/test_rzlib.py @@ -278,20 +278,29 @@ def test_translate_and_large_input(): from rpython.translator.c.test.test_genc import compile - def f(i): + def f(i, check): bytes = "s" * i - for j in range(3): - stream = rzlib.deflateInit() - bytes = rzlib.compress(stream, bytes, rzlib.Z_FINISH) - rzlib.deflateEnd(stream) - return bytes + if check == 1: + for j in range(3): + stream = rzlib.deflateInit() + bytes = rzlib.compress(stream, bytes, rzlib.Z_FINISH) + rzlib.deflateEnd(stream) + return bytes + if check == 2: + return str(rzlib.adler32(bytes)) + if check == 3: + return str(rzlib.crc32(bytes)) + return '?' - fc = compile(f, [int]) + fc = compile(f, [int, int]) test_list = [1, 2, 3, 5, 8, 87, 876, 8765, 87654, 876543, 8765432, 127329129] # up to ~128MB if sys.maxint > 2**32: - test_list.append(2971215073) # 3GB (greater than INPUT_BUFFER_MAX) + test_list.append(4305704715) # 4.01GB + # XXX should we have a way to say "I don't have enough RAM, + # don't run this"? + for a in test_list: print 'Testing compression of "s" * %d' % a z = zlib.compressobj() @@ -306,4 +315,17 @@ del pieces expected = zlib.compress(expected) expected = zlib.compress(expected) - assert fc(a) == expected + assert fc(a, 1) == expected + + print 'Testing adler32 and crc32 of "s" * %d' % a + def compute(function, start): + count = a + while count > 0: + count1 = min(count, 1024*1024) + start = function("s" * count1, start) + count -= count1 + return start + expected_adler32 = compute(zlib.adler32, 1) & (2**32-1) + expected_crc32 = compute(zlib.crc32, 0) & (2**32-1) + assert fc(a, 2) == str(expected_adler32) + assert fc(a, 3) == str(expected_crc32) From pypy.commits at gmail.com Mon Dec 19 11:50:25 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 08:50:25 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Backed out changeset a1d41e7ebbb6: should be fixed in rzlib in "default" Message-ID: <58580fd1.12ad1c0a.593aa.a399@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89186:cafc329793d4 Date: 2016-12-19 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/cafc329793d4/ Log: Backed out changeset a1d41e7ebbb6: should be fixed in rzlib in "default" diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -8,7 +8,6 @@ from rpython.rlib import rzlib -UINT_MAX = 2**32-1 @unwrap_spec(string='bufferstr', start='truncatedint_w') def crc32(space, string, start = rzlib.CRC32_DEFAULT_START): @@ -52,8 +51,6 @@ Optional arg level is the compression level, in 1-9. """ - if len(string) > UINT_MAX: - raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: try: stream = rzlib.deflateInit(level) @@ -76,8 +73,6 @@ Optional arg wbits is the window buffer size. Optional arg bufsize is only for compatibility with CPython and is ignored. """ - if len(string) > UINT_MAX: - raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: try: stream = rzlib.inflateInit(wbits) @@ -152,8 +147,6 @@ Call the flush() method to clear these buffers. """ - if len(data) > UINT_MAX: - raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: self.lock() try: @@ -284,12 +277,10 @@ unconsumed_tail attribute. """ if max_length == 0: - max_length = UINT_MAX + max_length = sys.maxint elif max_length < 0: raise oefmt(space.w_ValueError, "max_length must be greater than zero") - elif len(data) > UINT_MAX: - raise oefmt(space.w_OverflowError, "Size does not fit in an unsigned int") try: self.lock() try: From pypy.commits at gmail.com Mon Dec 19 11:50:27 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 08:50:27 -0800 (PST) Subject: [pypy-commit] pypy default: Rename the PyPy-specific 'ob_keys' member of PyDictObject to '_tmpkeys', Message-ID: <58580fd3.8a29c20a.c1a15.0964@mx.google.com> Author: Armin Rigo Branch: Changeset: r89187:12ee90f51eb9 Date: 2016-12-19 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/12ee90f51eb9/ Log: Rename the PyPy-specific 'ob_keys' member of PyDictObject to '_tmpkeys', otherwise we might think it comes from CPython diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -16,7 +16,7 @@ PyDictObjectStruct = lltype.ForwardReference() PyDictObject = lltype.Ptr(PyDictObjectStruct) PyDictObjectFields = PyObjectFields + \ - (("ob_keys", PyObject),) + (("_tmpkeys", PyObject),) cpython_struct("PyDictObject", PyDictObjectFields, PyDictObjectStruct) @bootstrap_function @@ -33,7 +33,7 @@ Fills a newly allocated PyDictObject with the given dict object. """ py_dict = rffi.cast(PyDictObject, py_obj) - py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + py_dict.c__tmpkeys = lltype.nullptr(PyObject.TO) # Problems: if this dict is a typedict, we may have unbound GetSetProperty # functions in the dict. The corresponding PyGetSetDescrObject must be # bound to a class, but the actual w_type will be unavailable later on. @@ -58,8 +58,8 @@ @cpython_api([PyObject], lltype.Void, header=None) def dict_dealloc(space, py_obj): py_dict = rffi.cast(PyDictObject, py_obj) - decref(space, py_dict.c_ob_keys) - py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + decref(space, py_dict.c__tmpkeys) + py_dict.c__tmpkeys = lltype.nullptr(PyObject.TO) _dealloc(space, py_obj) @cpython_api([], PyObject) @@ -263,16 +263,16 @@ py_dict = rffi.cast(PyDictObject, py_obj) if pos == 0: # Store the current keys in the PyDictObject. - decref(space, py_dict.c_ob_keys) + decref(space, py_dict.c__tmpkeys) w_keys = space.call_method(space.w_dict, "keys", w_dict) - py_dict.c_ob_keys = create_ref(space, w_keys) - Py_IncRef(space, py_dict.c_ob_keys) + py_dict.c__tmpkeys = create_ref(space, w_keys) + Py_IncRef(space, py_dict.c__tmpkeys) else: - w_keys = from_ref(space, py_dict.c_ob_keys) + w_keys = from_ref(space, py_dict.c__tmpkeys) ppos[0] += 1 if pos >= space.len_w(w_keys): - decref(space, py_dict.c_ob_keys) - py_dict.c_ob_keys = lltype.nullptr(PyObject.TO) + decref(space, py_dict.c__tmpkeys) + py_dict.c__tmpkeys = lltype.nullptr(PyObject.TO) return 0 w_key = space.listview(w_keys)[pos] w_value = space.getitem(w_dict, w_key) diff --git a/pypy/module/cpyext/include/dictobject.h b/pypy/module/cpyext/include/dictobject.h --- a/pypy/module/cpyext/include/dictobject.h +++ b/pypy/module/cpyext/include/dictobject.h @@ -9,7 +9,7 @@ typedef struct { PyObject_HEAD - PyObject *ob_keys; /* a private place to put keys during PyDict_Next */ + PyObject *_tmpkeys; /* a private place to put keys during PyDict_Next */ } PyDictObject; #ifdef __cplusplus From pypy.commits at gmail.com Mon Dec 19 12:05:56 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 09:05:56 -0800 (PST) Subject: [pypy-commit] pypy py3.5: hg merge default Message-ID: <58581374.913fc20a.7301f.10dc@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89188:afa1ccecab71 Date: 2016-12-19 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/afa1ccecab71/ Log: hg merge default diff too long, truncating to 2000 out of 25107 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Mon Dec 19 12:47:35 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 19 Dec 2016 09:47:35 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <58581d37.ce181c0a.bb41f.bed7@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r837:8c0df436f374 Date: 2016-12-19 18:47 +0100 http://bitbucket.org/pypy/pypy.org/changeset/8c0df436f374/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -29,7 +29,7 @@ frozenset object @@ -1271,7 +1271,7 @@ return self.space.unicode_w(w_item) def wrap(self, item): - return self.space.wrap(item) + return self.space.newunicode(item) def iter(self, w_set): return UnicodeIteratorImplementation(self.space, self, w_set) @@ -1312,7 +1312,7 @@ return self.space.int_w(w_item) def wrap(self, item): - return self.space.wrap(item) + return self.space.newint(item) def iter(self, w_set): return IntegerIteratorImplementation(self.space, self, w_set) @@ -1475,7 +1475,7 @@ def next_entry(self): for key in self.iterator: - return self.space.wrap(key) + return self.space.newunicode(key) else: return None @@ -1490,7 +1490,7 @@ def next_entry(self): # note that this 'for' loop only runs once, at most for key in self.iterator: - return self.space.wrap(key) + return self.space.newint(key) else: return None @@ -1501,8 +1501,8 @@ self.iterator = d.iterkeys() def next_entry(self): - for key in self.iterator: - return self.space.wrap(key) + for w_key in self.iterator: + return w_key else: return None @@ -1527,7 +1527,7 @@ self.iterimplementation = iterimplementation def descr_length_hint(self, space): - return space.wrap(self.iterimplementation.length()) + return space.newint(self.iterimplementation.length()) def descr_iter(self, space): return self diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -105,7 +105,7 @@ return w_obj def descr_repr(self, space): - return space.wrap("slice(%s, %s, %s)" % ( + return space.newtext("slice(%s, %s, %s)" % ( space.str_w(space.repr(self.w_start)), space.str_w(space.repr(self.w_stop)), space.str_w(space.repr(self.w_step)))) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -14,6 +14,16 @@ def make_specialised_class(typetuple): assert type(typetuple) == tuple + wraps = [] + for typ in typetuple: + if typ == int: + wraps.append(lambda space, x: space.newint(x)) + elif typ == float: + wraps.append(lambda space, x: space.newfloat(x)) + elif typ == object: + wraps.append(lambda space, w_x: w_x) + else: + assert 0 typelen = len(typetuple) iter_n = unrolling_iterable(range(typelen)) @@ -46,8 +56,7 @@ list_w = [None] * typelen for i in iter_n: value = getattr(self, 'value%s' % i) - if typetuple[i] != object: - value = self.space.wrap(value) + value = wraps[i](self.space, value) list_w[i] = value return list_w @@ -84,7 +93,7 @@ z -= 1 mult += 82520 + z + z x += 97531 - return space.wrap(intmask(x)) + return space.newint(intmask(x)) def descr_eq(self, space, w_other): if not isinstance(w_other, W_AbstractTupleObject): @@ -95,8 +104,7 @@ for i in iter_n: myval = getattr(self, 'value%s' % i) otherval = w_other.getitem(space, i) - if typetuple[i] != object: - myval = space.wrap(myval) + myval = wraps[i](self.space, myval) if not space.eq_w(myval, otherval): return space.w_False return space.w_True @@ -125,8 +133,7 @@ for i in iter_n: if index == i: value = getattr(self, 'value%s' % i) - if typetuple[i] != object: - value = space.wrap(value) + value = wraps[i](self.space, value) return value raise oefmt(space.w_IndexError, "tuple index out of range") @@ -169,10 +176,10 @@ # faster to move the decision out of the loop. @specialize.arg(1) -def _build_zipped_spec(space, Cls, lst1, lst2): +def _build_zipped_spec(space, Cls, lst1, lst2, wrap1, wrap2): length = min(len(lst1), len(lst2)) - return [Cls(space, space.wrap(lst1[i]), - space.wrap(lst2[i])) for i in range(length)] + return [Cls(space, wrap1(lst1[i]), + wrap2(lst2[i])) for i in range(length)] def _build_zipped_spec_oo(space, w_list1, w_list2): strat1 = w_list1.strategy @@ -198,15 +205,18 @@ if intlist1 is not None: intlist2 = w_list2.getitems_int() if intlist2 is not None: - lst_w = _build_zipped_spec(space, Cls_ii, intlist1, intlist2) + lst_w = _build_zipped_spec( + space, Cls_ii, intlist1, intlist2, + space.newint, space.newint) return space.newlist(lst_w) else: floatlist1 = w_list1.getitems_float() if floatlist1 is not None: floatlist2 = w_list2.getitems_float() if floatlist2 is not None: - lst_w = _build_zipped_spec(space, Cls_ff, floatlist1, - floatlist2) + lst_w = _build_zipped_spec( + space, Cls_ff, floatlist1, floatlist2, space.newfloat, + space.newfloat) return space.newlist(lst_w) lst_w = _build_zipped_spec_oo(space, w_list1, w_list2) diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -41,7 +41,7 @@ return StringBuffer(self.force()) def descr_len(self, space): - return space.wrap(self.length) + return space.newint(self.length) def descr_add(self, space, w_other): try: diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -73,7 +73,7 @@ return chr(char) def descr_len(self, space): - return space.wrap(self._len()) + return space.newint(self._len()) def descr_iter(self, space): from pypy.objspace.std.iterobject import W_StringIterObject @@ -188,7 +188,7 @@ else: res = count(value, sub, start, end) assert res >= 0 - return space.wrap(res) + return space.newint(res) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import ( @@ -257,7 +257,7 @@ res = find(value, sub, start, end) if ofs is not None and res >= 0: res -= ofs - return space.wrap(res) + return space.newint(res) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) @@ -269,7 +269,7 @@ res = rfind(value, sub, start, end) if ofs is not None and res >= 0: res -= ofs - return space.wrap(res) + return space.newint(res) def descr_index(self, space, w_sub, w_start=None, w_end=None): value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) @@ -285,7 +285,7 @@ "substring not found in " + self._KIND2 + ".index") if ofs is not None: res -= ofs - return space.wrap(res) + return space.newint(res) def descr_rindex(self, space, w_sub, w_start=None, w_end=None): value, start, end, ofs = self._convert_idx_params(space, w_start, w_end) @@ -301,7 +301,7 @@ "substring not found in " + self._KIND2 + ".rindex") if ofs is not None: res -= ofs - return space.wrap(res) + return space.newint(res) @specialize.arg(2) def _is_generic(self, space, func_name): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1148,6 +1148,7 @@ return string.encode('utf-8') assert isinstance(string, str) return string + bytes_w = str_w def bytes_w(self, string): assert isinstance(string, str) @@ -1165,6 +1166,7 @@ if isinstance(obj, str): return obj.decode('ascii') return obj + newtext = newbytes = wrap def newbytes(self, obj): return obj diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py --- a/pypy/objspace/std/test/test_stdobjspace.py +++ b/pypy/objspace/std/test/test_stdobjspace.py @@ -13,8 +13,8 @@ self.space.wrap(0)) def test_utf8(self): - assert self.space.isinstance_w(self.space.newutf8("abc"), self.space.w_unicode) - assert self.space.eq_w(self.space.newutf8("üöä"), self.space.newunicode(u"üöä")) + assert self.space.isinstance_w(self.space.newtext("abc"), self.space.w_unicode) + assert self.space.eq_w(self.space.newtext("üöä"), self.space.newunicode(u"üöä")) def test_str_w_non_str(self): raises(OperationError,self.space.str_w,self.space.wrap(None)) diff --git a/pypy/objspace/std/transparent.py b/pypy/objspace/std/transparent.py --- a/pypy/objspace/std/transparent.py +++ b/pypy/objspace/std/transparent.py @@ -39,8 +39,8 @@ def setup(space): """Add proxy functions to the __pypy__ module.""" w___pypy__ = space.getbuiltinmodule("__pypy__") - space.setattr(w___pypy__, space.wrap('tproxy'), space.wrap(app_proxy)) - space.setattr(w___pypy__, space.wrap('get_tproxy_controller'), + space.setattr(w___pypy__, space.newtext('tproxy'), space.wrap(app_proxy)) + space.setattr(w___pypy__, space.newtext('get_tproxy_controller'), space.wrap(app_proxy_controller)) diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -53,7 +53,7 @@ return None # empty tuple: base value 258 uid = (258 << IDTAG_SHIFT) | IDTAG_SPECIAL - return space.wrap(uid) + return space.newint(uid) def __repr__(self): """representation for debugging purposes""" @@ -102,11 +102,11 @@ def descr_repr(self, space): items = self.tolist() if len(items) == 1: - return space.wrap(u"(" + space.unicode_w(space.repr(items[0])) + - u",)") + return space.newunicode( + u"(" + space.unicode_w(space.repr(items[0])) + u",)") tmp = u", ".join([space.unicode_w(space.repr(item)) for item in items]) - return space.wrap(u"(" + tmp + u")") + return space.newunicode(u"(" + tmp + u")") def descr_hash(self, space): raise NotImplementedError @@ -213,7 +213,7 @@ for w_item in self.tolist(): if space.eq_w(w_item, w_obj): count += 1 - return space.wrap(count) + return space.newint(count) @unwrap_spec(w_start=WrappedDefault(0), w_stop=WrappedDefault(sys.maxint)) @jit.look_inside_iff(lambda self, _1, _2, _3, _4: _unroll_condition(self)) @@ -226,7 +226,7 @@ for i in range(start, min(stop, length)): w_item = self.tolist()[i] if space.eq_w(w_item, w_obj): - return space.wrap(i) + return space.newint(i) raise oefmt(space.w_ValueError, "tuple.index(x): x not in tuple") W_AbstractTupleObject.typedef = TypeDef( @@ -296,7 +296,7 @@ z -= 1 mult += 82520 + z + z x += 97531 - return space.wrap(intmask(x)) + return space.newint(intmask(x)) def _descr_hash_jitdriver(self, space): mult = 1000003 @@ -310,7 +310,7 @@ z -= 1 mult += 82520 + z + z x += 97531 - return space.wrap(intmask(x)) + return space.newint(intmask(x)) def descr_eq(self, space, w_other): if not isinstance(w_other, W_AbstractTupleObject): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -35,7 +35,7 @@ self.intvalue = intvalue def unwrap_cell(self, space): - return space.wrap(self.intvalue) + return space.newint(self.intvalue) def __repr__(self): return "" % (self.intvalue, ) @@ -344,7 +344,7 @@ if name == "__del__" and name not in self.dict_w: msg = ("a __del__ method added to an existing type will not be " "called") - space.warn(space.wrap(msg), space.w_RuntimeWarning) + space.warn(space.newtext(msg), space.w_RuntimeWarning) version_tag = self.version_tag() if version_tag is not None: w_curr = self._pure_getdictvalue_no_unwrapping( @@ -540,7 +540,7 @@ mod = self.name[:dot] else: mod = "builtins" - return space.wrap(mod) + return space.newtext(mod) def getname(self, space): if self.is_heaptype(): @@ -651,9 +651,9 @@ else: mod = space.unicode_w(w_mod) if mod is not None and mod != u'builtins': - return space.wrap(u"" % (mod, self.getqualname(space))) + return space.newunicode(u"" % (mod, self.getqualname(space))) else: - return space.wrap(u"" % (self.name.decode('utf-8'))) + return space.newtext("" % (self.name,)) def descr_getattribute(self, space, w_name): name = space.str_w(w_name) @@ -712,8 +712,8 @@ w_winner = _calculate_metaclass(space, w_typetype, bases_w) if not space.is_w(w_winner, w_typetype): - newfunc = space.getattr(w_winner, space.wrap('__new__')) - if not space.is_w(newfunc, space.getattr(space.w_type, space.wrap('__new__'))): + newfunc = space.getattr(w_winner, space.newtext('__new__')) + if not space.is_w(newfunc, space.getattr(space.w_type, space.newtext('__new__'))): return space.call_function(newfunc, w_winner, w_name, w_bases, w_dict) w_typetype = w_winner @@ -766,13 +766,13 @@ def _check(space, w_type, msg="descriptor is for 'type'"): if not isinstance(w_type, W_TypeObject): - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise OperationError(space.w_TypeError, space.newtext(msg)) return w_type def descr_get__name__(space, w_type): w_type = _check(space, w_type) - return space.wrap(w_type.getname(space)) + return space.newtext(w_type.getname(space)) def descr_set__name__(space, w_type, w_value): w_type = _check(space, w_type) @@ -886,7 +886,7 @@ def descr__doc(space, w_type): if space.is_w(w_type, space.w_type): - return space.wrap("""type(object) -> the object's type + return space.newtext("""type(object) -> the object's type type(name, bases, dict) -> a new type""") w_type = _check(space, w_type) if not w_type.is_heaptype(): @@ -920,7 +920,7 @@ flags |= _CPYTYPE if w_type.flag_abstract: flags |= _ABSTRACT - return space.wrap(flags) + return space.newint(flags) def descr_get__module(space, w_type): w_type = _check(space, w_type) @@ -1230,7 +1230,7 @@ caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: w_globals = caller.get_w_globals() - w_name = space.finditem(w_globals, space.wrap('__name__')) + w_name = space.finditem(w_globals, space.newtext('__name__')) if w_name is not None: w_self.dict_w['__module__'] = w_name diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -74,7 +74,7 @@ else: base = 257 # empty unicode string: base value 257 uid = (base << IDTAG_SHIFT) | IDTAG_SPECIAL - return space.wrap(uid) + return space.newint(uid) def unicode_w(self, space): return self._value @@ -116,7 +116,7 @@ raise oefmt(space.w_TypeError, "ord() expected a character, but string of length %d " "found", len(self._value)) - return space.wrap(ord(self._value[0])) + return space.newint(ord(self._value[0])) def _new(self, value): return W_UnicodeObject(value) @@ -327,7 +327,7 @@ chars = self._value size = len(chars) s = _repr_function(chars, size, "strict") - return space.wrap(s) + return space.newtext(s) def descr_str(self, space): if space.is_w(space.type(self), space.w_unicode): @@ -337,7 +337,7 @@ def descr_hash(self, space): x = compute_hash(self._value) - return space.wrap(x) + return space.newint(x) def descr_eq(self, space, w_other): try: @@ -426,11 +426,11 @@ selfvalue = self._value w_sys = space.getbuiltinmodule('sys') maxunicode = space.int_w(space.getattr(w_sys, - space.wrap("maxunicode"))) + space.newtext("maxunicode"))) result = [] for unichar in selfvalue: try: - w_newval = space.getitem(w_table, space.wrap(ord(unichar))) + w_newval = space.getitem(w_table, space.newint(ord(unichar))) except OperationError as e: if e.match(space, space.w_LookupError): result.append(unichar) @@ -464,8 +464,8 @@ l = space.listview_unicode(w_list) if l is not None: if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) + return space.newunicode(l[0]) + return space.newunicode(self._val(space).join(l)) return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): @@ -600,11 +600,11 @@ def wrap_encode_error(space, ue): raise OperationError(space.w_UnicodeEncodeError, space.newtuple([ - space.wrap(ue.encoding), - space.wrap(ue.object), - space.wrap(ue.start), - space.wrap(ue.end), - space.wrap(ue.reason)])) + space.newtext(ue.encoding), + space.newbytes(ue.object), + space.newint(ue.start), + space.newint(ue.end), + space.newtext(ue.reason)])) def decode_object(space, w_obj, encoding, errors): @@ -620,11 +620,11 @@ eh = unicodehelper.decode_error_handler(space) u = str_decode_ascii( # try again, to get the error right s, len(s), None, final=True, errorhandler=eh)[0] - return space.wrap(u) + return space.newunicode(u) if encoding == 'utf-8': s = space.charbuf_w(w_obj) eh = unicodehelper.decode_error_handler(space) - return space.wrap(str_decode_utf_8( + return space.newunicode(str_decode_utf_8( s, len(s), None, final=True, errorhandler=eh)[0]) from pypy.module._codecs.interp_codecs import decode_text diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -1,6 +1,6 @@ from rpython.rlib.rstring import InvalidBaseError -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter import gateway IDTAG_SHIFT = 4 @@ -53,12 +53,10 @@ def wrap_parsestringerror(space, e, w_source): if isinstance(e, InvalidBaseError): - w_msg = space.wrap(e.msg) + raise OperationError(space.w_ValueError, space.newtext(e.msg)) else: - w_msg = space.wrap(u'%s: %s' % (unicode(e.msg), - space.unicode_w(space.repr(w_source)))) - return OperationError(space.w_ValueError, w_msg) - + raise oefmt(space.w_ValueError, '%s: %R', + e.msg, w_source) app = gateway.applevel(r''' def _classdir(klass): From pypy.commits at gmail.com Tue Dec 20 07:00:10 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 04:00:10 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: hg merge py3.5 Message-ID: <58591d4a.e576c20a.37a92.7916@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89198:10264e569a8c Date: 2016-12-20 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/10264e569a8c/ Log: hg merge py3.5 diff too long, truncating to 2000 out of 26231 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Tue Dec 20 09:30:06 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 06:30:06 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: merge default Message-ID: <5859406e.113cc20a.1104c.c479@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89201:e5ac7ec7ab17 Date: 2016-12-20 15:29 +0100 http://bitbucket.org/pypy/pypy/changeset/e5ac7ec7ab17/ Log: merge default diff too long, truncating to 2000 out of 16301 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info --- a/lib_pypy/greenlet.egg-info +++ b/lib_pypy/greenlet.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: greenlet -Version: 0.4.10 +Version: 0.4.11 Summary: Lightweight in-process concurrent programming Home-page: https://github.com/python-greenlet/greenlet Author: Ralf Schmitt (for CPython), PyPy team diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,7 +1,7 @@ import sys import _continuation -__version__ = "0.4.10" +__version__ = "0.4.11" # ____________________________________________________________ # Exceptions diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -190,6 +190,12 @@ "make sure that all calls go through space.call_args", default=False), + BoolOption("disable_entrypoints", + "Disable external entry points, notably the" + " cpyext module and cffi's embedding mode.", + default=False, + requires=[("objspace.usemodules.cpyext", False)]), + OptionDescription("std", "Standard Object Space Options", [ BoolOption("withtproxy", "support transparent proxies", default=True), diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -1,3 +1,9 @@ +#encoding utf-8 + +Contributors +------------ +:: + Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz @@ -307,7 +313,7 @@ Mads Kiilerich Antony Lee Jason Madden - Daniel Neuh�user + Daniel Neuhäuser reubano at gmail.com Yaroslav Fedevych Jim Hunziker diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -1,145 +1,61 @@ cppyy: C++ bindings for PyPy ============================ -The cppyy module creates, at run-time, Python-side classes and functions for -C++, by querying a C++ reflection system. -The default system used is `Reflex`_, which extracts the needed information -from C++ header files. -Another current backend is based on `CINT`_, and yet another, more important -one for the medium- to long-term will be based on `cling`_. -The latter sits on top of `llvm`_'s `clang`_, and will therefore allow the use -of C++11. -The work on the cling backend has so far been done only for CPython, but -bringing it to PyPy is a lot less work than developing it in the first place. +The cppyy module delivers dynamic Python-C++ bindings. +It is designed for automation, high performance, scale, interactivity, and +handling all of modern C++ (11, 14, etc.). +It is based on `Cling`_ which, through `LLVM`_/`clang`_, provides C++ +reflection and interactivity. +Reflection information is extracted from C++ header files. +Cppyy itself is built into PyPy (an alternative exists for CPython), but +it requires a `backend`_, installable through pip, to interface with Cling. -.. _Reflex: https://root.cern.ch/how/how-use-reflex -.. _CINT: https://root.cern.ch/introduction-cint -.. _cling: https://root.cern.ch/cling -.. _llvm: http://llvm.org/ +.. _Cling: https://root.cern.ch/cling +.. _LLVM: http://llvm.org/ .. _clang: http://clang.llvm.org/ - -This document describes the version of cppyy that lives in the main branch of -PyPy. -The development of cppyy happens in the "reflex-support" branch. - - -Motivation ----------- - -To provide bindings to another language in CPython, you program to a -generic C-API that exposes many of the interpreter features. -With PyPy, however, there is no such generic C-API, because several of the -interpreter features (e.g. the memory model) are pluggable and therefore -subject to change. -Furthermore, a generic API does not allow any assumptions about the calls -into another language, forcing the JIT to behave conservatively around these -calls and with the objects that cross language boundaries. -In contrast, cppyy does not expose an API, but expects one to be implemented -by a backend. -It makes strong assumptions about the semantics of the API that it uses and -that in turn allows the JIT to make equally strong assumptions. -This is possible, because the expected API is only for providing C++ language -bindings, and does not provide generic programmability. - -The cppyy module further offers two features, which result in improved -performance as well as better functionality and cross-language integration. -First, cppyy itself is written in RPython and therefore open to optimizations -by the JIT up until the actual point of call into C++. -This means for example, that if variables are already unboxed by the JIT, they -can be passed through directly to C++. -Second, a backend such as Reflex (and cling far more so) adds dynamic features -to C++, thus greatly reducing impedance mismatches between the two languages. -For example, Reflex is dynamic enough to allow writing runtime bindings -generation in python (as opposed to RPython) and this is used to create very -natural "pythonizations" of the bound code. -As another example, cling allows automatic instantiations of templates. - -See this description of the `cppyy architecture`_ for further details. - -.. _cppyy architecture: http://morepypy.blogspot.com/2012/06/architecture-of-cppyy.html +.. _backend: https://pypi.python.org/pypi/PyPy-cppyy-backend Installation ------------ -There are two ways of using cppyy, and the choice depends on how pypy-c was -built: the backend can be builtin, or dynamically loadable. -The former has the disadvantage of requiring pypy-c to be linked with external -C++ libraries (e.g. libReflex.so), but has the advantage of being faster in -some cases. -That advantage will disappear over time, however, with improvements in the -JIT. -Therefore, this document assumes that the dynamically loadable backend is -chosen (it is, by default). -See the :doc:`backend documentation `. +This assumes PyPy2.7 v5.7 or later; earlier versions use a Reflex-based cppyy +module, which is no longer supported. +Both the tooling and user-facing Python codes are very backwards compatible, +however. +Further dependencies are cmake (for general build), Python2.7 (for LLVM), and +a modern C++ compiler (one that supports at least C++11). -A standalone version of Reflex that also provides the dynamically loadable -backend is available for `download`_. Note this is currently the only way to -get the dynamically loadable backend, so use this first. +Assuming you have a recent enough version of PyPy installed, use pip to +complete the installation of cppyy:: -That version, as well as any other distribution of Reflex (e.g. the one that -comes with `ROOT`_, which may be part of your Linux distribution as part of -the selection of scientific software) will also work for a build with the -builtin backend. + $ MAKE_NPROCS=4 pypy-c -m pip install --verbose PyPy-cppyy-backend -.. _download: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _ROOT: http://root.cern.ch/ +Set the number of parallel builds ('4' in this example, through the MAKE_NPROCS +environment variable) to a number appropriate for your machine. +The building process may take quite some time as it includes a customized +version of LLVM as part of Cling, which is why --verbose is recommended so that +you can see the build progress. -Besides Reflex, you probably need a version of `gccxml`_ installed, which is -most easily provided by the packager of your system. -If you read up on gccxml, you will probably notice that it is no longer being -developed and hence will not provide C++11 support. -That's why the medium term plan is to move to cling. -Note that gccxml is only needed to generate reflection libraries. -It is not needed to use them. - -.. _gccxml: http://www.gccxml.org - -To install the standalone version of Reflex, after download:: - - $ tar jxf reflex-2014-10-20.tar.bz2 - $ cd reflex-2014-10-20 - $ ./build/autogen - $ ./configure - $ make && make install - -The usual rules apply: /bin needs to be added to the ``PATH`` and -/lib to the ``LD_LIBRARY_PATH`` environment variable. -For convenience, this document will assume that there is a ``REFLEXHOME`` -variable that points to . -If you downloaded or built the whole of ROOT, ``REFLEXHOME`` should be equal -to ``ROOTSYS``. - -The following is optional, and is only to show how pypy-c can be build -:doc:`from source `, for example to get at the main development branch of cppyy. -The :doc:`backend documentation ` has more details on the backend-specific -prerequisites. - -Then run the translation to build ``pypy-c``:: - - $ hg clone https://bitbucket.org/pypy/pypy - $ cd pypy - $ hg up reflex-support # optional - - # This example shows python, but using pypy-c is faster and uses less memory - $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy - -This will build a ``pypy-c`` that includes the cppyy module, and through that, -Reflex support. -Of course, if you already have a pre-built version of the ``pypy`` interpreter, -you can use that for the translation rather than ``python``. -If not, you may want :ref:`to obtain a binary distribution ` to speed up the -translation step. +The default installation will be under +$PYTHONHOME/site-packages/cppyy_backend/lib, +which needs to be added to your dynamic loader path (LD_LIBRARY_PATH). +If you need the dictionary and class map generation tools (used in the examples +below), you need to add $PYTHONHOME/site-packages/cppyy_backend/bin to your +executable path (PATH). Basic bindings example ---------------------- -Now test with a trivial example whether all packages are properly installed -and functional. -First, create a C++ header file with some class in it (note that all functions -are made inline for convenience; a real-world example would of course have a -corresponding source file):: +These examples assume that cppyy_backend is pointed to by the environment +variable CPPYYHOME, and that CPPYYHOME/lib is added to LD_LIBRARY_PATH and +CPPYYHOME/bin to PATH. + +Let's first test with a trivial example whether all packages are properly +installed and functional. +Create a C++ header file with some class in it (all functions are made inline +for convenience; if you have out-of-line code, link with it as appropriate):: $ cat MyClass.h class MyClass { @@ -153,11 +69,11 @@ int m_myint; }; -Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the -code:: +Then, generate the bindings using ``genreflex`` (installed under +cppyy_backend/bin in site_packages), and compile the code:: $ genreflex MyClass.h - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling Next, make sure that the library can be found through the dynamic lookup path (the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), @@ -209,7 +125,7 @@ For example:: $ genreflex MyClass.h --rootmap=libMyClassDict.rootmap --rootmap-lib=libMyClassDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$CPPYYHOME/lib -lCling where the first option (``--rootmap``) specifies the output file name, and the second option (``--rootmap-lib``) the name of the reflection library where @@ -311,7 +227,7 @@ Now the reflection info can be generated and compiled:: $ genreflex MyAdvanced.h --selection=MyAdvanced.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyAdvanced_rflx.cpp -o libAdvExDict.so -L$CPPYYHOME/lib -lCling and subsequently be used from PyPy:: @@ -370,7 +286,7 @@ bound using:: $ genreflex example.h --deep --rootmap=libexampleDict.rootmap --rootmap-lib=libexampleDict.so - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include example_rflx.cpp -o libexampleDict.so -L$REFLEXHOME/lib -lReflex + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include example_rflx.cpp -o libexampleDict.so -L$CPPYYHOME/lib -lCling * **abstract classes**: Are represented as python classes, since they are needed to complete the inheritance hierarchies, but will raise an exception @@ -666,13 +582,10 @@ Templates --------- -A bit of special care needs to be taken for the use of templates. -For a templated class to be completely available, it must be guaranteed that -said class is fully instantiated, and hence all executable C++ code is -generated and compiled in. -The easiest way to fulfill that guarantee is by explicit instantiation in the -header file that is handed to ``genreflex``. -The following example should make that clear:: +Templates can be automatically instantiated, assuming the appropriate header +files have been loaded or are accessible to the class loader. +This is the case for example for all of STL. +For example:: $ cat MyTemplate.h #include @@ -686,68 +599,10 @@ int m_i; }; - #ifdef __GCCXML__ - template class std::vector; // explicit instantiation - #endif - -If you know for certain that all symbols will be linked in from other sources, -you can also declare the explicit template instantiation ``extern``. -An alternative is to add an object to an unnamed namespace:: - - namespace { - std::vector vmc; - } // unnamed namespace - -Unfortunately, this is not always enough for gcc. -The iterators of vectors, if they are going to be used, need to be -instantiated as well, as do the comparison operators on those iterators, as -these live in an internal namespace, rather than in the iterator classes. -Note that you do NOT need this iterators to iterator over a vector. -You only need them if you plan to explicitly call e.g. ``begin`` and ``end`` -methods, and do comparisons of iterators. -One way to handle this, is to deal with this once in a macro, then reuse that -macro for all ``vector`` classes. -Thus, the header above needs this (again protected with -``#ifdef __GCCXML__``), instead of just the explicit instantiation of the -``vector``:: - - #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \ - template class std::STLTYPE< TTYPE >; \ - template class __gnu_cxx::__normal_iterator >; \ - template class __gnu_cxx::__normal_iterator >;\ - namespace __gnu_cxx { \ - template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \ - const std::STLTYPE< TTYPE >::iterator&); \ - } - - STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass) - -Then, still for gcc, the selection file needs to contain the full hierarchy as -well as the global overloads for comparisons for the iterators:: - - $ cat MyTemplate.xml - - - - - - - - - Run the normal ``genreflex`` and compilation steps:: $ genreflex MyTemplate.h --selection=MyTemplate.xml - $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$REFLEXHOME/lib -lReflex - -Note: this is a dirty corner that clearly could do with some automation, -even if the macro already helps. -Such automation is planned. -In fact, in the Cling world, the backend can perform the template -instantations and generate the reflection info on the fly, and none of the -above will any longer be necessary. + $ g++ -std=c++11 -fPIC -rdynamic -O2 -shared -I$CPPYYHOME/include MyTemplate_rflx.cpp -o libTemplateDict.so -L$CPPYYHOME/lib -lCling Subsequent use should be as expected. Note the meta-class style of "instantiating" the template:: @@ -764,8 +619,6 @@ 1 2 3 >>>> -Other templates work similarly, but are typically simpler, as there are no -similar issues with iterators for e.g. ``std::list``. The arguments to the template instantiation can either be a string with the full list of arguments, or the explicit classes. The latter makes for easier code writing if the classes passed to the @@ -775,95 +628,40 @@ The fast lane ------------- -The following is an experimental feature of cppyy. -It mostly works, but there are some known issues (e.g. with return-by-value). -Soon it should be the default mode, however. +By default, cppyy will use direct function pointers through `CFFI`_ whenever +possible. If this causes problems for you, you can disable it by setting the +CPPYY_DISABLE_FASTPATH environment variable. -With a slight modification of Reflex, it can provide function pointers for -C++ methods, and hence allow PyPy to call those pointers directly, rather than -calling C++ through a Reflex stub. +.. _CFFI: https://cffi.readthedocs.io/en/latest/ -The standalone version of Reflex `provided`_ has been patched, but if you get -Reflex from another source (most likely with a ROOT distribution), locate the -file `genreflex-methptrgetter.patch`_ in pypy/module/cppyy and apply it to -the genreflex python scripts found in ``$ROOTSYS/lib``:: - - $ cd $ROOTSYS/lib - $ patch -p2 < genreflex-methptrgetter.patch - -With this patch, ``genreflex`` will have grown the ``--with-methptrgetter`` -option. -Use this option when running ``genreflex``, and add the -``-Wno-pmf-conversions`` option to ``g++`` when compiling. -The rest works the same way: the fast path will be used transparently (which -also means that you can't actually find out whether it is in use, other than -by running a micro-benchmark or a JIT test). - -.. _provided: http://cern.ch/wlav/reflex-2014-10-20.tar.bz2 -.. _genreflex-methptrgetter.patch: https://bitbucket.org/pypy/pypy/src/default/pypy/module/cppyy/genreflex-methptrgetter.patch CPython ------- -Most of the ideas in cppyy come originally from the `PyROOT`_ project. -Although PyROOT does not support Reflex directly, it has an alter ego called -"PyCintex" that, in a somewhat roundabout way, does. -If you installed ROOT, rather than just Reflex, PyCintex should be available -immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment -variable. +Most of the ideas in cppyy come originally from the `PyROOT`_ project, which +contains a CPython-based cppyy.py module (with similar dependencies as the +one that comes with PyPy). +A standalone pip-installable version is planned, but for now you can install +ROOT through your favorite distribution installer (available in the science +section). .. _PyROOT: https://root.cern.ch/pyroot -There are a couple of minor differences between PyCintex and cppyy, most to do -with naming. -The one that you will run into directly, is that PyCintex uses a function -called ``loadDictionary`` rather than ``load_reflection_info`` (it has the -same rootmap-based class loader functionality, though, making this point -somewhat moot). -The reason for this is that Reflex calls the shared libraries that contain -reflection info "dictionaries." -However, in python, the name `dictionary` already has a well-defined meaning, -so a more descriptive name was chosen for cppyy. -In addition, PyCintex requires that the names of shared libraries so loaded -start with "lib" in their name. -The basic example above, rewritten for PyCintex thus goes like this:: - - $ python - >>> import PyCintex - >>> PyCintex.loadDictionary("libMyClassDict.so") - >>> myinst = PyCintex.gbl.MyClass(42) - >>> print myinst.GetMyInt() - 42 - >>> myinst.SetMyInt(33) - >>> print myinst.m_myint - 33 - >>> myinst.m_myint = 77 - >>> print myinst.GetMyInt() - 77 - >>> help(PyCintex.gbl.MyClass) # shows that normal python introspection works - -Other naming differences are such things as taking an address of an object. -In PyCintex, this is done with ``AddressOf`` whereas in cppyy the choice was -made to follow the naming as in ``ctypes`` and hence use ``addressof`` -(PyROOT/PyCintex predate ``ctypes`` by several years, and the ROOT project -follows camel-case, hence the differences). - -Of course, this is python, so if any of the naming is not to your liking, all -you have to do is provide a wrapper script that you import instead of -importing the ``cppyy`` or ``PyCintex`` modules directly. -In that wrapper script you can rename methods exactly the way you need it. - -In the cling world, all these differences will be resolved. +There are a couple of minor differences between the two versions of cppyy +(the CPython version has a few more features). +Work is on-going to integrate the nightly tests of both to make sure their +feature sets are equalized. Python3 ------- -To change versions of CPython (to Python3, another version of Python, or later -to the `Py3k`_ version of PyPy), the only part that requires recompilation is -the bindings module, be it ``cppyy`` or ``libPyROOT.so`` (in PyCintex). -Although ``genreflex`` is indeed a Python tool, the generated reflection -information is completely independent of Python. +The CPython version of cppyy supports Python3, assuming your packager has +build the backend for it. +The cppyy module has not been tested with the `Py3k`_ version of PyPy. +Note that the generated reflection information (from ``genreflex``) is fully +independent of Python, and does not need to be rebuild when switching versions +or interpreters. .. _Py3k: https://bitbucket.org/pypy/pypy/src/py3k @@ -871,5 +669,4 @@ .. toctree:: :hidden: - cppyy_backend cppyy_example diff --git a/pypy/doc/cppyy_backend.rst b/pypy/doc/cppyy_backend.rst deleted file mode 100644 --- a/pypy/doc/cppyy_backend.rst +++ /dev/null @@ -1,45 +0,0 @@ -Backends for cppyy -================== - -The cppyy module needs a backend to provide the C++ reflection information on -which the Python bindings are build. -The backend is called through a C-API, which can be found in the PyPy sources -in: :source:`pypy/module/cppyy/include/capi.h`. -There are two kinds of API calls: querying about reflection information, which -are used during the creation of Python-side constructs, and making the actual -calls into C++. -The objects passed around are all opaque: cppyy does not make any assumptions -about them, other than that the opaque handles can be copied. -Their definition, however, appears in two places: in the C code (in capi.h), -and on the RPython side (in :source:`capi_types.py `), so if they are changed, they -need to be changed on both sides. - -There are two places where selections in the RPython code affect the choice -(and use) of the backend. -The first is in :source:`pypy/module/cppyy/capi/__init__.py`:: - - # choose C-API access method: - from pypy.module.cppyy.capi.loadable_capi import * - #from pypy.module.cppyy.capi.builtin_capi import * - -The default is the loadable C-API. -Comment it and uncomment the builtin C-API line, to use the builtin version. - -Next, if the builtin C-API is chosen, the specific backend needs to be set as -well (default is Reflex). -This second choice is in :source:`pypy/module/cppyy/capi/builtin_capi.py`:: - - import reflex_capi as backend - #import cint_capi as backend - -After those choices have been made, built pypy-c as usual. - -When building pypy-c from source, keep the following in mind. -If the loadable_capi is chosen, no further prerequisites are needed. -However, for the build of the builtin_capi to succeed, the ``ROOTSYS`` -environment variable must point to the location of your ROOT (or standalone -Reflex in the case of the Reflex backend) installation, or the ``root-config`` -utility must be accessible through ``$PATH`` (e.g. by adding ``$ROOTSYS/bin`` -to ``PATH``). -In case of the former, include files are expected under ``$ROOTSYS/include`` -and libraries under ``$ROOTSYS/lib``. diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -12,7 +12,7 @@ * Write them in pure Python and use ctypes_. -* Write them in C++ and bind them through Reflex_. +* Write them in C++ and bind them through :doc:`cppyy ` using Cling. * Write them in as `RPython mixed modules`_. @@ -61,11 +61,11 @@ .. _libffi: http://sourceware.org/libffi/ -Reflex ------- +Cling and cppyy +--------------- The builtin :doc:`cppyy ` module uses reflection information, provided by -`Reflex`_ (which needs to be `installed separately`_), of C/C++ code to +`Cling`_ (which needs to be `installed separately`_), of C/C++ code to automatically generate bindings at runtime. In Python, classes and functions are always runtime structures, so when they are generated matters not for performance. @@ -76,11 +76,14 @@ The :doc:`cppyy ` module is written in RPython, thus PyPy's JIT is able to remove most cross-language call overhead. -:doc:`Full details ` are `available here `. +:doc:Full details are `available here `. -.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2 -.. _Reflex: https://root.cern.ch/how/how-use-reflex +.. _installed separately: https://pypi.python.org/pypi/PyPy-cppyy-backend +.. _Cling: https://root.cern.ch/cling +.. toctree:: + + cppyy RPython Mixed Modules --------------------- @@ -94,7 +97,3 @@ This is how the numpy module is being developed. -.. toctree:: - :hidden: - - cppyy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -59,6 +59,7 @@ .. toctree:: + release-pypy3.3-v5.5.0.rst release-pypy3.3-v5.2-alpha1.rst CPython 3.2 compatible versions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -45,3 +45,22 @@ Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key so it will be picked up by app-level objects of that type + +.. branch: cling-support + +Module cppyy now uses cling as its backend (Reflex has been removed). The +user-facing interface and main developer tools (genreflex, selection files, +class loader, etc.) remain the same. A libcppyy_backend.so library is still +needed but is now available through PyPI with pip: PyPy-cppyy-backend. + +The Cling-backend brings support for modern C++ (11, 14, etc.), dynamic +template instantations, and improved integration with CFFI for better +performance. It also provides interactive C++ (and bindings to that). + +.. branch: better-PyDict_Next + +Improve the performance of ``PyDict_Next``. When trying ``PyDict_Next`` on a +typedef dict, the test exposed a problem converting a ``GetSetProperty`` to a +``PyGetSetDescrObject``. The other direction seem to be fully implemented. +This branch made a minimal effort to convert the basic fields to avoid +segfaults, but trying to use the ``PyGetSetDescrObject`` will probably fail. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -83,12 +83,18 @@ return 1 return exitcode + return entry_point, get_additional_entrypoints(space, w_initstdio) + + +def get_additional_entrypoints(space, w_initstdio): # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype + if space.config.objspace.disable_entrypoints: + return {} + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): @@ -188,11 +194,11 @@ return -1 return 0 - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + return {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -428,6 +428,8 @@ make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None + self._builtin_functions_by_identifier = {'': None} + # can be overridden to a subclass self.initialize() diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -247,16 +247,15 @@ def descr_function_repr(self): return self.getrepr(self.space, 'function %s' % (self.name,)) - # delicate - _all = {'': None} def _cleanup_(self): + # delicate from pypy.interpreter.gateway import BuiltinCode if isinstance(self.code, BuiltinCode): # we have been seen by other means so rtyping should not choke # on us identifier = self.code.identifier - previous = Function._all.get(identifier, self) + previous = self.space._builtin_functions_by_identifier.get(identifier, self) assert previous is self, ( "duplicate function ids with identifier=%r: %r and %r" % ( identifier, previous, self)) @@ -264,10 +263,10 @@ return False def add_to_table(self): - Function._all[self.code.identifier] = self + self.space._builtin_functions_by_identifier[self.code.identifier] = self - def find(identifier): - return Function._all[identifier] + def find(space, identifier): + return space._builtin_functions_by_identifier[identifier] find = staticmethod(find) def descr_function__reduce__(self, space): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -671,10 +671,10 @@ return space.newtuple([builtin_code, space.newtuple([space.wrap(self.identifier)])]) - def find(indentifier): + @staticmethod + def find(space, identifier): from pypy.interpreter.function import Function - return Function._all[indentifier].code - find = staticmethod(find) + return Function.find(space, identifier).code def signature(self): return self.sig diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -4,6 +4,7 @@ import sys from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized, check_nonneg +from rpython.rlib.debug import ll_assert_not_none from rpython.rlib.jit import hint from rpython.rlib.objectmodel import instantiate, specialize, we_are_translated from rpython.rlib.rarithmetic import intmask, r_uint @@ -298,7 +299,13 @@ # stack manipulation helpers def pushvalue(self, w_object): depth = self.valuestackdepth - self.locals_cells_stack_w[depth] = w_object + self.locals_cells_stack_w[depth] = ll_assert_not_none(w_object) + self.valuestackdepth = depth + 1 + + def pushvalue_none(self): + depth = self.valuestackdepth + # the entry is already None, and remains None + assert self.locals_cells_stack_w[depth] is None self.valuestackdepth = depth + 1 def _check_stack_index(self, index): @@ -311,6 +318,9 @@ return index >= stackstart def popvalue(self): + return ll_assert_not_none(self.popvalue_maybe_none()) + + def popvalue_maybe_none(self): depth = self.valuestackdepth - 1 assert self._check_stack_index(depth) assert depth >= 0 @@ -385,6 +395,9 @@ def peekvalue(self, index_from_top=0): # NOTE: top of the stack is peekvalue(0). # Contrast this with CPython where it's PEEK(-1). + return ll_assert_not_none(self.peekvalue_maybe_none(index_from_top)) + + def peekvalue_maybe_none(self, index_from_top=0): index_from_top = hint(index_from_top, promote=True) index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) @@ -396,7 +409,7 @@ index = self.valuestackdepth + ~index_from_top assert self._check_stack_index(index) assert index >= 0 - self.locals_cells_stack_w[index] = w_object + self.locals_cells_stack_w[index] = ll_assert_not_none(w_object) @jit.unroll_safe def dropvaluesuntil(self, finaldepth): diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -580,3 +580,25 @@ pass sys.settrace(None) assert seen == ['call', 'exception', 'return'] + + def test_generator_trace_stopiteration(self): + import sys + def f(): + yield 5 + gen = f() + assert next(gen) == 5 + seen = [] + def trace_func(frame, event, *args): + print('TRACE:', frame, event, args) + seen.append(event) + return trace_func + def g(): + for x in gen: + never_entered + sys.settrace(trace_func) + g() + sys.settrace(None) + print 'seen:', seen + # on Python 3 we get an extra 'exception' when 'for' catches + # StopIteration + assert seen == ['call', 'line', 'call', 'return', 'return'] diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,6 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi VERSION = "1.9.1" @@ -68,9 +68,14 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL - def startup(self, space): - from pypy.module._cffi_backend import embedding - embedding.glob.space = space + def __init__(self, space, *args): + MixedModule.__init__(self, space, *args) + # + if not space.config.objspace.disable_entrypoints: + # import 'embedding', which has the side-effect of registering + # the 'pypy_init_embedded_cffi_module' entry point + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): @@ -85,11 +90,3 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value - - -# write this entrypoint() here, to make sure it is registered early enough - at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], - c_name='pypy_init_embedded_cffi_module') -def pypy_init_embedded_cffi_module(version, init_struct): - from pypy.module._cffi_backend import embedding - return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,4 +1,5 @@ import os +from rpython.rlib import entrypoint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -46,6 +47,8 @@ glob = Global() + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') def pypy_init_embedded_cffi_module(version, init_struct): # called from __init__.py name = "?" diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -77,7 +77,7 @@ def builtin_code(space, identifier): from pypy.interpreter import gateway try: - return gateway.BuiltinCode.find(identifier) + return gateway.BuiltinCode.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin code: %s", identifier) @@ -86,7 +86,7 @@ def builtin_function(space, identifier): from pypy.interpreter import function try: - return function.Function.find(identifier) + return function.Function.find(space, identifier) except KeyError: raise oefmt(space.w_RuntimeError, "cannot unpickle builtin function: %s", identifier) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -169,8 +169,8 @@ } def setup_method(self, method): - # https://www.verisign.net/ - ADDR = "www.verisign.net", 443 + # https://gmail.com/ + ADDR = "gmail.com", 443 self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR): import socket diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -14,7 +14,6 @@ '_set_class_generator' : 'interp_cppyy.set_class_generator', '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', - '_is_static' : 'interp_cppyy.is_static', '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstanceBase' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', diff --git a/pypy/module/cppyy/bench/Makefile b/pypy/module/cppyy/bench/Makefile --- a/pypy/module/cppyy/bench/Makefile +++ b/pypy/module/cppyy/bench/Makefile @@ -26,4 +26,4 @@ bench02Dict_reflex.so: bench02.h bench02.cxx bench02.xml $(genreflex) bench02.h $(genreflexflags) --selection=bench02.xml -I$(ROOTSYS)/include - g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -lReflex -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) + g++ -o $@ bench02.cxx bench02_rflx.cpp -I$(ROOTSYS)/include -shared -std=c++11 -lHistPainter `root-config --libs` $(cppflags) $(cppflags2) diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -1,12 +1,11 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit -import reflex_capi as backend -#import cint_capi as backend +import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ - C_METHPTRGETTER, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR identify = backend.identify pythonize = backend.pythonize @@ -52,13 +51,6 @@ compilation_info=backend.eci) def c_get_scope_opaque(space, name): return _c_get_scope_opaque(name) -_c_get_template = rffi.llexternal( - "cppyy_get_template", - [rffi.CCHARP], C_TYPE, - releasegil=ts_reflect, - compilation_info=backend.eci) -def c_get_template(space, name): - return _c_get_template(name) _c_actual_class = rffi.llexternal( "cppyy_actual_class", [C_TYPE, C_OBJECT], C_TYPE, @@ -154,6 +146,13 @@ compilation_info=backend.eci) def c_call_d(space, cppmethod, cppobject, nargs, args): return _c_call_d(cppmethod, cppobject, nargs, args) +_c_call_ld = rffi.llexternal( + "cppyy_call_ld", + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.LONGDOUBLE, + releasegil=ts_call, + compilation_info=backend.eci) +def c_call_ld(space, cppmethod, cppobject, nargs, args): + return _c_call_ld(cppmethod, cppobject, nargs, args) _c_call_r = rffi.llexternal( "cppyy_call_r", @@ -164,11 +163,17 @@ return _c_call_r(cppmethod, cppobject, nargs, args) _c_call_s = rffi.llexternal( "cppyy_call_s", - [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP], rffi.CCHARP, + [C_METHOD, C_OBJECT, rffi.INT, rffi.VOIDP, rffi.SIZE_TP], rffi.CCHARP, releasegil=ts_call, compilation_info=backend.eci) def c_call_s(space, cppmethod, cppobject, nargs, args): - return _c_call_s(cppmethod, cppobject, nargs, args) + length = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_call_s(cppmethod, cppobject, nargs, args, length) + cstr_len = intmask(length[0]) + finally: + lltype.free(length, flavor='raw') + return cstr, cstr_len _c_constructor = rffi.llexternal( "cppyy_constructor", @@ -185,15 +190,14 @@ def c_call_o(space, method, cppobj, nargs, args, cppclass): return _c_call_o(method, cppobj, nargs, args, cppclass.handle) -_c_get_methptr_getter = rffi.llexternal( - "cppyy_get_methptr_getter", - [C_SCOPE, C_INDEX], C_METHPTRGETTER_PTR, +_c_get_function_address = rffi.llexternal( + "cppyy_get_function_address", + [C_SCOPE, C_INDEX], C_FUNC_PTR, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) -def c_get_methptr_getter(space, cppscope, index): - return _c_get_methptr_getter(cppscope.handle, index) +def c_get_function_address(space, cppscope, index): + return _c_get_function_address(cppscope.handle, index) # handling of function argument buffer --------------------------------------- _c_allocate_function_args = rffi.llexternal( @@ -215,8 +219,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_sizeof(space): return _c_function_arg_sizeof() _c_function_arg_typeoffset = rffi.llexternal( @@ -224,8 +228,8 @@ [], rffi.SIZE_T, releasegil=ts_memory, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) + at jit.elidable def c_function_arg_typeoffset(space): return _c_function_arg_typeoffset() @@ -237,6 +241,20 @@ compilation_info=backend.eci) def c_is_namespace(space, scope): return _c_is_namespace(scope) +_c_is_template = rffi.llexternal( + "cppyy_is_template", + [rffi.CCHARP], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_template(space, name): + return _c_is_template(name) +_c_is_abstract = rffi.llexternal( + "cppyy_is_abstract", + [C_SCOPE], rffi.INT, + releasegil=ts_reflect, + compilation_info=backend.eci) +def c_is_abstract(space, cpptype): + return _c_is_abstract(cpptype) _c_is_enum = rffi.llexternal( "cppyy_is_enum", [rffi.CCHARP], rffi.INT, @@ -286,9 +304,8 @@ [C_TYPE, C_TYPE], rffi.INT, releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('2') + at jit.elidable def c_is_subtype(space, derived, base): if derived == base: return 1 @@ -296,12 +313,11 @@ _c_base_offset = rffi.llexternal( "cppyy_base_offset", - [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.SIZE_T, + [C_TYPE, C_TYPE, C_OBJECT, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci, - elidable_function=True, random_effects_on_gcobjs=False) - at jit.elidable_promote('1,2,4') + at jit.elidable def c_base_offset(space, derived, base, address, direction): if derived == base: return 0 @@ -340,7 +356,7 @@ i += 1 py_indices.append(index) index = indices[i] - c_free(rffi.cast(rffi.VOIDP, indices)) # c_free defined below + c_free(space, rffi.cast(rffi.VOIDP, indices)) # c_free defined below return py_indices _c_method_name = rffi.llexternal( @@ -474,7 +490,7 @@ return charp2str_free(space, _c_datamember_type(cppscope.handle, datamember_index)) _c_datamember_offset = rffi.llexternal( "cppyy_datamember_offset", - [C_SCOPE, rffi.INT], rffi.SIZE_T, + [C_SCOPE, rffi.INT], rffi.LONG, # actually ptrdiff_t releasegil=ts_reflect, compilation_info=backend.eci) def c_datamember_offset(space, cppscope, datamember_index): @@ -519,27 +535,29 @@ compilation_info=backend.eci) def c_strtoull(space, svalue): return _c_strtoull(svalue) -c_free = rffi.llexternal( +_c_free = rffi.llexternal( "cppyy_free", [rffi.VOIDP], lltype.Void, releasegil=ts_memory, compilation_info=backend.eci) +def c_free(space, voidp): + return _c_free(voidp) def charp2str_free(space, charp): string = rffi.charp2str(charp) voidp = rffi.cast(rffi.VOIDP, charp) - c_free(voidp) + _c_free(voidp) return string _c_charp2stdstring = rffi.llexternal( "cppyy_charp2stdstring", - [rffi.CCHARP], C_OBJECT, + [rffi.CCHARP, rffi.SIZE_T], C_OBJECT, releasegil=ts_helper, compilation_info=backend.eci) -def c_charp2stdstring(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2stdstring(charp) - return result +def c_charp2stdstring(space, pystr, sz): + with rffi.scoped_view_charp(pystr) as cstr: + cppstr = _c_charp2stdstring(cstr, sz) + return cppstr _c_stdstring2stdstring = rffi.llexternal( "cppyy_stdstring2stdstring", [C_OBJECT], C_OBJECT, @@ -547,3 +565,26 @@ compilation_info=backend.eci) def c_stdstring2stdstring(space, cppobject): return _c_stdstring2stdstring(cppobject) + +_c_stdvector_valuetype = rffi.llexternal( + "cppyy_stdvector_valuetype", + [rffi.CCHARP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuetype(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuetype(cstr) + rffi.free_charp(cstr) + if result: + return charp2str_free(space, result) + return "" +_c_stdvector_valuesize = rffi.llexternal( + "cppyy_stdvector_valuesize", + [rffi.CCHARP], rffi.SIZE_T, + releasegil=ts_helper, + compilation_info=backend.eci) +def c_stdvector_valuesize(space, pystr): + cstr = rffi.str2charp(pystr) + result = _c_stdvector_valuesize(cstr) + rffi.free_charp(cstr) + return result diff --git a/pypy/module/cppyy/capi/capi_types.py b/pypy/module/cppyy/capi/capi_types.py --- a/pypy/module/cppyy/capi/capi_types.py +++ b/pypy/module/cppyy/capi/capi_types.py @@ -18,5 +18,4 @@ C_INDEX_ARRAY = rffi.LONGP WLAVC_INDEX = rffi.LONG -C_METHPTRGETTER = lltype.FuncType([C_OBJECT], rffi.VOIDP) -C_METHPTRGETTER_PTR = lltype.Ptr(C_METHPTRGETTER) +C_FUNC_PTR = rffi.VOIDP diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py deleted file mode 100644 --- a/pypy/module/cppyy/capi/cint_capi.py +++ /dev/null @@ -1,437 +0,0 @@ -import py, os, sys - -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root - -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib import libffi, rdynload -from rpython.tool.udir import udir - -from pypy.module.cppyy.capi.capi_types import C_OBJECT - - -__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] - -pkgpath = py.path.local(__file__).dirpath().join(os.pardir) -srcpath = pkgpath.join("src") -incpath = pkgpath.join("include") - -if os.environ.get("ROOTSYS"): - import commands - (stat, incdir) = commands.getstatusoutput("root-config --incdir") - if stat != 0: - rootincpath = [os.path.join(os.environ["ROOTSYS"], "include"), py.path.local(udir)] - rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] - else: - rootincpath = [incdir, py.path.local(udir)] - rootlibpath = commands.getoutput("root-config --libdir").split() -else: - rootincpath = [py.path.local(udir)] - rootlibpath = [] - -def identify(): - return 'CINT' - -ts_reflect = True -ts_call = True -ts_memory = False -ts_helper = False - -std_string_name = 'string' - -# force loading in global mode of core libraries, rather than linking with -# them as PyPy uses various version of dlopen in various places; note that -# this isn't going to fly on Windows (note that locking them in objects and -# calling dlclose in __del__ seems to come too late, so this'll do for now) -with rffi.scoped_str2charp('libCint.so') as ll_libname: - _cintdll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libCore.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) -with rffi.scoped_str2charp('libHist.so') as ll_libname: - _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) - -eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join("cintcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, - includes=["cintcwrapper.h"], - library_dirs=rootlibpath, - libraries=["Hist", "Core", "Cint"], - use_cpp_linker=True, -) - -_c_load_dictionary = rffi.llexternal( - "cppyy_load_dictionary", - [rffi.CCHARP], rdynload.DLLHANDLE, - releasegil=False, - compilation_info=eci) - -def c_load_dictionary(name): - result = _c_load_dictionary(name) - # ignore result: libffi.CDLL(name) either returns a handle to the already - # open file, or will fail as well and produce a correctly formatted error - return libffi.CDLL(name) - - -# CINT-specific pythonizations =============================================== -_c_charp2TString = rffi.llexternal( - "cppyy_charp2TString", - [rffi.CCHARP], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_charp2TString(space, svalue): - with rffi.scoped_view_charp(svalue) as charp: - result = _c_charp2TString(charp) - return result -_c_TString2TString = rffi.llexternal( - "cppyy_TString2TString", - [C_OBJECT], C_OBJECT, - releasegil=ts_helper, - compilation_info=eci) -def c_TString2TString(space, cppobject): - return _c_TString2TString(cppobject) - -def _get_string_data(space, w_obj, m1, m2 = None): - from pypy.module.cppyy import interp_cppyy - obj = space.interp_w(interp_cppyy.W_CPPInstance, w_obj) - w_1 = obj.space.call_method(w_obj, m1) - if m2 is None: - return w_1 - return obj.space.call_method(w_1, m2) - -### TF1 ---------------------------------------------------------------------- -class State(object): - def __init__(self, space): - self.tfn_pyfuncs = [] - self.tfn_callbacks = [] - -_create_tf1 = rffi.llexternal( - "cppyy_create_tf1", - [rffi.CCHARP, rffi.ULONG, rffi.DOUBLE, rffi.DOUBLE, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def tf1_tf1(space, w_self, args_w): - """Pythonized version of TF1 constructor: - takes functions and callable objects, and allows a callback into them.""" - - from pypy.module.cppyy import interp_cppyy - tf1_class = interp_cppyy.scope_byname(space, "TF1") - - # expected signature: - # 1. (char* name, pyfunc, double xmin, double xmax, int npar = 0) - argc = len(args_w) - - try: - if argc < 4 or 5 < argc: - raise TypeError("wrong number of arguments") - - # first argument must be a name - funcname = space.str_w(args_w[0]) - - # last (optional) argument is number of parameters - npar = 0 - if argc == 5: npar = space.int_w(args_w[4]) - - # second argument must be a callable python object - w_callable = args_w[1] - if not space.is_true(space.callable(w_callable)): - raise TypeError("2nd argument is not a valid python callable") - - # generate a pointer to function - from pypy.module._cffi_backend import newtype, ctypefunc, func - - c_double = newtype.new_primitive_type(space, 'double') - c_doublep = newtype.new_pointer_type(space, c_double) - - # wrap the callable as the signature needs modifying - w_ifunc = interp_cppyy.get_interface_func(space, w_callable, npar) - - w_cfunc = ctypefunc.W_CTypeFunc(space, [c_doublep, c_doublep], c_double, False) - w_callback = func.callback(space, w_cfunc, w_ifunc, None) - funcaddr = rffi.cast(rffi.ULONG, w_callback.get_closure()) - - # so far, so good; leaves on issue: CINT is expecting a wrapper, but - # we need the overload that takes a function pointer, which is not in - # the dictionary, hence this helper: - newinst = _create_tf1(space.str_w(args_w[0]), funcaddr, - space.float_w(args_w[2]), space.float_w(args_w[3]), npar) - - # w_self is a null-ptr bound as TF1 - from pypy.module.cppyy.interp_cppyy import W_CPPInstance, memory_regulator - cppself = space.interp_w(W_CPPInstance, w_self, can_be_None=False) - cppself._rawobject = newinst - memory_regulator.register(cppself) - - # tie all the life times to the TF1 instance - space.setattr(w_self, space.wrap('_callback'), w_callback) - - # by definition for __init__ - return None - - except (OperationError, TypeError, IndexError) as e: - newargs_w = args_w[1:] # drop class - - # return control back to the original, unpythonized overload - ol = tf1_class.get_overload("TF1") - return ol.call(None, newargs_w) - -### TTree -------------------------------------------------------------------- -_ttree_Branch = rffi.llexternal( - "cppyy_ttree_Branch", - [rffi.VOIDP, rffi.CCHARP, rffi.CCHARP, rffi.VOIDP, rffi.INT, rffi.INT], C_OBJECT, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_Branch(space, w_self, args_w): - """Pythonized version of TTree::Branch(): takes proxy objects and by-passes - the CINT-manual layer.""" - - from pypy.module.cppyy import interp_cppyy - tree_class = interp_cppyy.scope_byname(space, "TTree") - - # sigs to modify (and by-pass CINT): - # 1. (const char*, const char*, T**, Int_t=32000, Int_t=99) - # 2. (const char*, T**, Int_t=32000, Int_t=99) - argc = len(args_w) - - # basic error handling of wrong arguments is best left to the original call, - # so that error messages etc. remain consistent in appearance: the following - # block may raise TypeError or IndexError to break out anytime - - try: - if argc < 2 or 5 < argc: - raise TypeError("wrong number of arguments") - - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=True) - if (tree is None) or (tree.cppclass != tree_class): - raise TypeError("not a TTree") - - # first argument must always always be cont char* - branchname = space.str_w(args_w[0]) - - # if args_w[1] is a classname, then case 1, else case 2 - try: - classname = space.str_w(args_w[1]) - addr_idx = 2 - w_address = args_w[addr_idx] - except (OperationError, TypeError): - addr_idx = 1 - w_address = args_w[addr_idx] - - bufsize, splitlevel = 32000, 99 - if addr_idx+1 < argc: bufsize = space.c_int_w(args_w[addr_idx+1]) - if addr_idx+2 < argc: splitlevel = space.c_int_w(args_w[addr_idx+2]) - - # now retrieve the W_CPPInstance and build other stub arguments - space = tree.space # holds the class cache in State - cppinstance = space.interp_w(interp_cppyy.W_CPPInstance, w_address) - address = rffi.cast(rffi.VOIDP, cppinstance.get_rawobject()) - klassname = cppinstance.cppclass.full_name() - vtree = rffi.cast(rffi.VOIDP, tree.get_rawobject()) - - # call the helper stub to by-pass CINT - vbranch = _ttree_Branch(vtree, branchname, klassname, address, bufsize, splitlevel) - branch_class = interp_cppyy.scope_byname(space, "TBranch") - w_branch = interp_cppyy.wrap_cppobject(space, vbranch, branch_class) - return w_branch - except (OperationError, TypeError, IndexError): - pass - - # return control back to the original, unpythonized overload - ol = tree_class.get_overload("Branch") - return ol.call(w_self, args_w) - -def activate_branch(space, w_branch): - w_branches = space.call_method(w_branch, "GetListOfBranches") - for i in range(space.r_longlong_w(space.call_method(w_branches, "GetEntriesFast"))): - w_b = space.call_method(w_branches, "At", space.wrap(i)) - activate_branch(space, w_b) - space.call_method(w_branch, "SetStatus", space.wrap(1)) - space.call_method(w_branch, "ResetReadEntry") - -c_ttree_GetEntry = rffi.llexternal( - "cppyy_ttree_GetEntry", - [rffi.VOIDP, rffi.LONGLONG], rffi.LONGLONG, - releasegil=False, - compilation_info=eci) - - at unwrap_spec(args_w='args_w') -def ttree_getattr(space, w_self, args_w): - """Specialized __getattr__ for TTree's that allows switching on/off the - reading of individual branchs.""" - - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_self) - - space = tree.space # holds the class cache in State - - # prevent recursion - attr = space.str_w(args_w[0]) - if attr and attr[0] == '_': - raise OperationError(space.w_AttributeError, args_w[0]) - - # try the saved cdata (for builtin types) - try: - w_cdata = space.getattr(w_self, space.wrap('_'+attr)) - from pypy.module._cffi_backend import cdataobj - cdata = space.interp_w(cdataobj.W_CData, w_cdata, can_be_None=False) - return cdata.convert_to_object() - except OperationError: - pass - - # setup branch as a data member and enable it for reading - w_branch = space.call_method(w_self, "GetBranch", args_w[0]) - if not space.is_true(w_branch): - raise OperationError(space.w_AttributeError, args_w[0]) - activate_branch(space, w_branch) - - # figure out from where we're reading - entry = space.r_longlong_w(space.call_method(w_self, "GetReadEntry")) - if entry == -1: - entry = 0 - - # setup cache structure - w_klassname = space.call_method(w_branch, "GetClassName") - if space.is_true(w_klassname): - # some instance - klass = interp_cppyy.scope_byname(space, space.str_w(w_klassname)) - w_obj = klass.construct() - # 0x10000 = kDeleteObject; reset because we own the object - space.call_method(w_branch, "ResetBit", space.wrap(0x10000)) - space.call_method(w_branch, "SetObject", w_obj) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - space.setattr(w_self, args_w[0], w_obj) - return w_obj - else: - # builtin data - w_leaf = space.call_method(w_self, "GetLeaf", args_w[0]) - space.call_method(w_branch, "GetEntry", space.wrap(entry)) - - # location - w_address = space.call_method(w_leaf, "GetValuePointer") - buf = space.getarg_w('s*', w_address) - from pypy.module._rawffi import buffer - assert isinstance(buf, buffer.RawFFIBuffer) - address = rffi.cast(rffi.CCHARP, buf.datainstance.ll_buffer) - - # placeholder - w_typename = space.call_method(w_leaf, "GetTypeName" ) - from pypy.module.cppyy import capi - typename = capi.c_resolve_name(space, space.str_w(w_typename)) - if typename == 'bool': typename = '_Bool' - w_address = space.call_method(w_leaf, "GetValuePointer") - from pypy.module._cffi_backend import cdataobj, newtype - cdata = cdataobj.W_CData(space, address, newtype.new_primitive_type(space, typename)) - - # cache result - space.setattr(w_self, space.wrap('_'+attr), space.wrap(cdata)) - return space.getattr(w_self, args_w[0]) - -class W_TTreeIter(W_Root): - def __init__(self, space, w_tree): - from pypy.module.cppyy import interp_cppyy - tree = space.interp_w(interp_cppyy.W_CPPInstance, w_tree) - self.vtree = rffi.cast(rffi.VOIDP, tree.get_cppthis(tree.cppclass)) - self.w_tree = w_tree - - self.current = 0 - self.maxentry = space.r_longlong_w(space.call_method(w_tree, "GetEntriesFast")) - - space = self.space = tree.space # holds the class cache in State - space.call_method(w_tree, "SetBranchStatus", space.wrap("*"), space.wrap(0)) - - def iter_w(self): - return self.space.wrap(self) - - def next_w(self): - if self.current == self.maxentry: - raise OperationError(self.space.w_StopIteration, self.space.w_None) - # TODO: check bytes read? - c_ttree_GetEntry(self.vtree, self.current) - self.current += 1 - return self.w_tree - -W_TTreeIter.typedef = TypeDef( - 'TTreeIter', - __iter__ = interp2app(W_TTreeIter.iter_w), - next = interp2app(W_TTreeIter.next_w), -) - -def ttree_iter(space, w_self): - """Allow iteration over TTree's. Also initializes branch data members and - sets addresses, if needed.""" - w_treeiter = W_TTreeIter(space, w_self) - return w_treeiter - -# setup pythonizations for later use at run-time -_pythonizations = {} -def register_pythonizations(space): - "NOT_RPYTHON" - - allfuncs = [ - - ### TF1 - tf1_tf1, - - ### TTree - ttree_Branch, ttree_iter, ttree_getattr, - ] - - for f in allfuncs: - _pythonizations[f.__name__] = space.wrap(interp2app(f)) - -def _method_alias(space, w_pycppclass, m1, m2): - space.setattr(w_pycppclass, space.wrap(m1), - space.getattr(w_pycppclass, space.wrap(m2))) - -# callback coming in when app-level bound classes have been created -def pythonize(space, name, w_pycppclass): - - if name == "TCollection": - _method_alias(space, w_pycppclass, "append", "Add") - _method_alias(space, w_pycppclass, "__len__", "GetSize") - - elif name == "TF1": - space.setattr(w_pycppclass, space.wrap("__init__"), _pythonizations["tf1_tf1"]) - - elif name == "TFile": - _method_alias(space, w_pycppclass, "__getattr__", "Get") - - elif name == "TObjString": - _method_alias(space, w_pycppclass, "__str__", "GetName") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "GetString") - - elif name == "TString": - _method_alias(space, w_pycppclass, "__str__", "Data") - _method_alias(space, w_pycppclass, "__len__", "Length") - _method_alias(space, w_pycppclass, "__cmp__", "CompareTo") - _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "Data") - - elif name == "TTree": - _method_alias(space, w_pycppclass, "_unpythonized_Branch", "Branch") - - space.setattr(w_pycppclass, space.wrap("Branch"), _pythonizations["ttree_Branch"]) - space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["ttree_iter"]) - space.setattr(w_pycppclass, space.wrap("__getattr__"), _pythonizations["ttree_getattr"]) - - elif name[0:8] == "TVectorT": # TVectorT<> template - _method_alias(space, w_pycppclass, "__len__", "GetNoElements") - -# destruction callback (needs better solution, but this is for CINT -# only and should not appear outside of ROOT-specific uses) -from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL - - at cpython_api([rffi.VOIDP], lltype.Void, error=CANNOT_FAIL) -def _Py_cppyy_recursive_remove(space, cppobject): - from pypy.module.cppyy.interp_cppyy import memory_regulator - from pypy.module.cppyy.capi import C_OBJECT, C_NULL_OBJECT - - obj = memory_regulator.retrieve(rffi.cast(C_OBJECT, cppobject)) - if obj is not None: - memory_regulator.unregister(obj) - obj._rawobject = C_NULL_OBJECT diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -1,8 +1,17 @@ import py, os +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject + +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app + from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib import libffi, rdynload +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit, libffi, rdynload + +from pypy.module._rawffi.array import W_ArrayInstance +from pypy.module.cppyy.capi.capi_types import C_OBJECT __all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] @@ -16,7 +25,8 @@ if os.environ.get("ROOTSYS"): if config_stat != 0: # presumably Reflex-only rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include"), + os.path.join(os.environ["ROOTSYS"], "include"),] rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: rootincpath = [incdir] @@ -39,13 +49,21 @@ std_string_name = 'std::basic_string' +# force loading (and exposure) of libCore symbols +with rffi.scoped_str2charp('libCore.so') as ll_libname: + _coredll = rdynload.dlopen(ll_libname, rdynload.RTLD_GLOBAL | rdynload.RTLD_NOW) + +# require local translator path to pickup common defs +from rpython.translator import cdir +translator_c_dir = py.path.local(cdir) + eci = ExternalCompilationInfo( separate_module_files=[srcpath.join("clingcwrapper.cxx")], - include_dirs=[incpath] + rootincpath, + include_dirs=[incpath, translator_c_dir] + rootincpath, includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) @@ -59,11 +77,120 @@ pch = _c_load_dictionary(name) return pch +_c_stdstring2charp = rffi.llexternal( + "cppyy_stdstring2charp", + [C_OBJECT, rffi.SIZE_TP], rffi.CCHARP, + releasegil=ts_helper, + compilation_info=eci) +def c_stdstring2charp(space, cppstr): + sz = lltype.malloc(rffi.SIZE_TP.TO, 1, flavor='raw') + try: + cstr = _c_stdstring2charp(cppstr, sz) + cstr_len = intmask(sz[0]) + finally: + lltype.free(sz, flavor='raw') + return rffi.charpsize2str(cstr, cstr_len) -# Cling-specific pythonizations +# TODO: factor these out ... +# pythonizations + +# +# std::string behavior +def stdstring_c_str(space, w_self): + """Return a python string taking into account \0""" + + from pypy.module.cppyy import interp_cppyy + cppstr = space.interp_w(interp_cppyy.W_CPPInstance, w_self, can_be_None=False) + return space.wrap(c_stdstring2charp(space, cppstr._rawobject)) + +# +# std::vector behavior +class W_STLVectorIter(W_AbstractSeqIterObject): + _immutable_fields_ = ['overload', 'len']#'data', 'converter', 'len', 'stride', 'vector'] + + def __init__(self, space, w_vector): + W_AbstractSeqIterObject.__init__(self, w_vector) + # TODO: this should live in rpythonize.py or something so that the + # imports can move to the top w/o getting circles + from pypy.module.cppyy import interp_cppyy + assert isinstance(w_vector, interp_cppyy.W_CPPInstance) + vector = space.interp_w(interp_cppyy.W_CPPInstance, w_vector) + self.overload = vector.cppclass.get_overload("__getitem__") + + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, vector.cppclass.name) + v_size = capi.c_stdvector_valuesize(space, vector.cppclass.name) + + if not v_type or not v_size: + raise NotImplementedError # fallback on getitem + + w_arr = vector.cppclass.get_overload("data").call(w_vector, []) + arr = space.interp_w(W_ArrayInstance, w_arr, can_be_None=True) + if not arr: + raise OperationError(space.w_StopIteration, space.w_None) + + self.data = rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + + from pypy.module.cppyy import converter + self.converter = converter.get_converter(space, v_type, '') + self.len = space.uint_w(vector.cppclass.get_overload("size").call(w_vector, [])) + self.stride = v_size + + def descr_next(self, space): + if self.w_seq is None: + raise OperationError(space.w_StopIteration, space.w_None) + if self.len <= self.index: + self.w_seq = None + raise OperationError(space.w_StopIteration, space.w_None) + try: + from pypy.module.cppyy import capi # TODO: refector + offset = capi.direct_ptradd(rffi.cast(C_OBJECT, self.data), self.index*self.stride) + w_item = self.converter.from_memory(space, space.w_None, space.w_None, offset) + except OperationError as e: + self.w_seq = None + if not e.match(space, space.w_IndexError): + raise + raise OperationError(space.w_StopIteration, space.w_None) + self.index += 1 + return w_item + +def stdvector_iter(space, w_self): + return W_STLVectorIter(space, w_self) + +# setup pythonizations for later use at run-time +_pythonizations = {} def register_pythonizations(space): "NOT_RPYTHON" - pass + + allfuncs = [ + + ### std::string + stdstring_c_str, + + ### std::vector + stdvector_iter, + + ] + + for f in allfuncs: + _pythonizations[f.__name__] = space.wrap(interp2app(f)) + +def _method_alias(space, w_pycppclass, m1, m2): + space.setattr(w_pycppclass, space.wrap(m1), + space.getattr(w_pycppclass, space.wrap(m2))) def pythonize(space, name, w_pycppclass): - pass + if name == "string": + space.setattr(w_pycppclass, space.wrap("c_str"), _pythonizations["stdstring_c_str"]) + _method_alias(space, w_pycppclass, "_cppyy_as_builtin", "c_str") + _method_alias(space, w_pycppclass, "__str__", "c_str") + + if "vector" in name[:11]: # len('std::vector') == 11 + from pypy.module.cppyy import capi + v_type = capi.c_stdvector_valuetype(space, name) + if v_type: + space.setattr(w_pycppclass, space.wrap("value_type"), space.wrap(v_type)) + v_size = capi.c_stdvector_valuesize(space, name) + if v_size: + space.setattr(w_pycppclass, space.wrap("value_size"), space.wrap(v_size)) + space.setattr(w_pycppclass, space.wrap("__iter__"), _pythonizations["stdvector_iter"]) diff --git a/pypy/module/cppyy/capi/loadable_capi.py b/pypy/module/cppyy/capi/loadable_capi.py --- a/pypy/module/cppyy/capi/loadable_capi.py +++ b/pypy/module/cppyy/capi/loadable_capi.py @@ -1,14 +1,18 @@ from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit, jit_libffi, libffi, rdynload, objectmodel from rpython.rlib.rarithmetic import r_singlefloat from rpython.tool import leakfinder +from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import ctypefunc, ctypeprim, cdataobj, misc +from pypy.module._cffi_backend import newtype +from pypy.module.cppyy import ffitypes from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ - C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_METHPTRGETTER_PTR + C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX, C_FUNC_PTR reflection_library = 'libcppyy_backend.so' @@ -21,11 +25,32 @@ class _Arg: # poor man's union _immutable_ = True - def __init__(self, h = 0, l = -1, s = '', vp = rffi.cast(rffi.VOIDP, 0)): + def __init__(self, tc, h = 0, l = -1, s = '', p = rffi.cast(rffi.VOIDP, 0)): + self.tc = tc self._handle = h self._long = l self._string = s - self._voidp = vp + self._voidp = p + +class _ArgH(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'h', h = val) + +class _ArgL(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'l', l = val) + +class _ArgS(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 's', s = val) + +class _ArgP(_Arg): + _immutable_ = True + def __init__(self, val): + _Arg.__init__(self, 'p', p = val) # For the loadable CAPI, the calls start and end in RPython. Therefore, the standard # _call of W_CTypeFunc, which expects wrapped objects, does not quite work: some @@ -55,14 +80,18 @@ argtype = self.fargs[i] # the following is clumsy, but the data types used as arguments are # very limited, so it'll do for now - if isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned): + if obj.tc == 'l': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveSigned) misc.write_raw_signed_data(data, rffi.cast(rffi.LONG, obj._long), argtype.size) - elif isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned): + elif obj.tc == 'h': + assert isinstance(argtype, ctypeprim.W_CTypePrimitiveUnsigned) misc.write_raw_unsigned_data(data, rffi.cast(rffi.ULONG, obj._handle), argtype.size) - elif obj._voidp != rffi.cast(rffi.VOIDP, 0): + elif obj.tc == 'p': + assert obj._voidp != rffi.cast(rffi.VOIDP, 0) data = rffi.cast(rffi.VOIDPP, data) data[0] = obj._voidp else: # only other use is sring + assert obj.tc == 's' n = len(obj._string) assert raw_string == rffi.cast(rffi.CCHARP, 0) # XXX could use rffi.get_nonmovingbuffer_final_null() @@ -89,35 +118,36 @@ self.library = None self.capi_calls = {} - import pypy.module._cffi_backend.newtype as nt + nt = newtype # module from _cffi_backend + state = space.fromcache(ffitypes.State) # factored out common types # TODO: the following need to match up with the globally defined C_XYZ low-level # types (see capi/__init__.py), but by using strings here, that isn't guaranteed - c_opaque_ptr = nt.new_primitive_type(space, 'unsigned long') + c_opaque_ptr = state.c_ulong - c_scope = c_opaque_ptr - c_type = c_scope - c_object = c_opaque_ptr - c_method = c_opaque_ptr - c_index = nt.new_primitive_type(space, 'long') + c_scope = c_opaque_ptr + c_type = c_scope + c_object = c_opaque_ptr + c_method = c_opaque_ptr + c_index = state.c_long + c_index_array = state.c_voidp - c_void = nt.new_void_type(space) - c_char = nt.new_primitive_type(space, 'char') - c_uchar = nt.new_primitive_type(space, 'unsigned char') - c_short = nt.new_primitive_type(space, 'short') - c_int = nt.new_primitive_type(space, 'int') - c_long = nt.new_primitive_type(space, 'long') - c_llong = nt.new_primitive_type(space, 'long long') - c_ullong = nt.new_primitive_type(space, 'unsigned long long') - c_float = nt.new_primitive_type(space, 'float') - c_double = nt.new_primitive_type(space, 'double') + c_void = state.c_void + c_char = state.c_char + c_uchar = state.c_uchar + c_short = state.c_short + c_int = state.c_int + c_long = state.c_long + c_llong = state.c_llong + c_ullong = state.c_ullong + c_float = state.c_float + c_double = state.c_double + c_ldouble = state.c_ldouble - c_ccharp = nt.new_pointer_type(space, c_char) - c_index_array = nt.new_pointer_type(space, c_void) + c_ccharp = state.c_ccharp + c_voidp = state.c_voidp - c_voidp = nt.new_pointer_type(space, c_void) c_size_t = nt.new_primitive_type(space, 'size_t') - c_ptrdiff_t = nt.new_primitive_type(space, 'ptrdiff_t') self.capi_call_ifaces = { @@ -127,7 +157,6 @@ 'resolve_name' : ([c_ccharp], c_ccharp), 'get_scope' : ([c_ccharp], c_scope), - 'get_template' : ([c_ccharp], c_type), 'actual_class' : ([c_type, c_object], c_type), # memory management @@ -146,14 +175,16 @@ 'call_ll' : ([c_method, c_object, c_int, c_voidp], c_llong), 'call_f' : ([c_method, c_object, c_int, c_voidp], c_float), 'call_d' : ([c_method, c_object, c_int, c_voidp], c_double), + 'call_ld' : ([c_method, c_object, c_int, c_voidp], c_ldouble), 'call_r' : ([c_method, c_object, c_int, c_voidp], c_voidp), - 'call_s' : ([c_method, c_object, c_int, c_voidp], c_ccharp), + # call_s actually takes an size_t* as last parameter, but this will do + 'call_s' : ([c_method, c_object, c_int, c_voidp, c_voidp], c_ccharp), 'constructor' : ([c_method, c_object, c_int, c_voidp], c_object), 'call_o' : ([c_method, c_object, c_int, c_voidp, c_type], c_object), - 'get_methptr_getter' : ([c_scope, c_index], c_voidp), # TODO: verify + 'get_function_address' : ([c_scope, c_index], c_voidp), # TODO: verify # handling of function argument buffer 'allocate_function_args' : ([c_int], c_voidp), @@ -163,6 +194,8 @@ # scope reflection information From pypy.commits at gmail.com Tue Dec 20 10:55:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 07:55:11 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: extended test and made some simplifications Message-ID: <5859545f.ce841c0a.66e63.6709@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89202:392161cec298 Date: 2016-12-20 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/392161cec298/ Log: extended test and made some simplifications diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -543,17 +543,8 @@ NOTE: Only use for immutable objects! """ - pass - -class MoveOutOfNurseryEntry(ExtRegistryEntry): - _about_ = move_out_of_nursery - - def compute_result_annotation(self, s_obj): - return s_obj - - def specialize_call(self, hop): - hop.exception_cannot_occur() - return hop.genop('gc_move_out_of_nursery', hop.args_v, resulttype=hop.r_result) + from rpython.rtyper.lltypesystem.lloperation import llop + return llop.gc_move_out_of_nursery(lltype.Void, obj) # ____________________________________________________________ diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1313,14 +1313,15 @@ calling_conv='c', ) -class RawBytes(object): - # literal copy of _cffi_backend/func.py - def __init__(self, string): - self.ptr = str2charp(string, track_allocation=False) - def __del__(self): - free_charp(self.ptr, track_allocation=False) if not we_are_translated(): + class RawBytes(object): + # literal copy of _cffi_backend/func.py + def __init__(self, string): + self.ptr = str2charp(string, track_allocation=False) + def __del__(self): + free_charp(self.ptr, track_allocation=False) + TEST_RAW_ADDR_KEEP_ALIVE = {} @jit.dont_look_inside @@ -1336,23 +1337,23 @@ referencing it goes out of scope. """ assert isinstance(string, str) - from rpython.rtyper.annlowlevel import llstr, hlstr + from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import llmemory from rpython.rlib import rgc if we_are_translated(): + newstring = string if rgc.can_move(string): - # create a shadow object that is exposed - string = rgc.move_out_of_nursery(string) + newstring = rgc.move_out_of_nursery(string) - # string cannot move! just return the address then! - lldata = llstr(string) + # string cannot move now! return the address + lldata = llstr(newstring) data_start = (llmemory.cast_ptr_to_adr(lldata) + offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) data_start = cast(CCHARP, data_start) - data_start[len(string)] = '\x00' # write the final extra null + data_start[len(newstring)] = '\x00' # write the final extra null return data_start else: global TEST_RAW_ADDR_KEEP_ALIVE diff --git a/rpython/rtyper/lltypesystem/test/test_ztranslated.py b/rpython/rtyper/lltypesystem/test/test_ztranslated.py --- a/rpython/rtyper/lltypesystem/test/test_ztranslated.py +++ b/rpython/rtyper/lltypesystem/test/test_ztranslated.py @@ -1,13 +1,9 @@ -import sys import gc from rpython.translator.c.test.test_genc import compile from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.annlowlevel import llstr, hlstr from rpython.rtyper.lltypesystem.lloperation import llop - -def setup_module(mod): - pass +from rpython.rlib import rgc def debug_assert(boolresult, msg): if not boolresult: @@ -15,13 +11,14 @@ assert boolresult def use_str(): - mystr = b'abc'[:] + mystr = b'abc' + #debug_assert(rgc.can_move(mystr), "short string cannot move... why?") ptr = rffi.get_raw_address_of_string(mystr) ptr2 = rffi.get_raw_address_of_string(mystr) debug_assert(ptr == ptr2, "ptr != ptr2") debug_assert(ptr[0] == b'a', "notnurseryadr[0] == b'a' is is %s" % ptr[0]) ptr[0] = b'x' # oh no no, in real programs nobody is allowed to modify that - debug_assert(mystr[0] in b'ax', "mystr[0] in b'ax'") + debug_assert(mystr[0] == b'a', "mystr[0] != b'a'") debug_assert(ptr[0] == b'x', "notnurseryadr[0] == b'x'") gc.collect() nptr = rffi.get_raw_address_of_string(mystr) @@ -30,10 +27,26 @@ debug_assert(nptr[0] == b'x', "failure b") mystr = None +def long_str(lstr): + ptr = rffi.get_raw_address_of_string(lstr) + for i,c in enumerate(lstr): + debug_assert(ptr[i] == c, "failure c") + gc.collect() + ptr2 = rffi.get_raw_address_of_string(lstr) + debug_assert(ptr == ptr2, "ptr != ptr2!!!") + return ptr + def main(argv=[]): use_str() - llop.debug_print(lltype.Void, "passed first call to use_str") gc.collect() + mystr = b"12341234aa"*4096*10 + #debug_assert(not rgc.can_move(mystr), "long string can move... why?") + p1 = long_str(mystr) + gc.collect() + copystr = mystr[:] + copystr += 'a' + p2 = long_str(copystr) + debug_assert(p1 != p2, "p1 == p2") return 0 # ____________________________________________________________ From pypy.commits at gmail.com Tue Dec 20 11:04:54 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 08:04:54 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: hg merge 1fb2fca52214, regenerate ast.py Message-ID: <585956a6.8ab81c0a.8808d.701d@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89203:5050884b7a39 Date: 2016-12-20 15:16 +0100 http://bitbucket.org/pypy/pypy/changeset/5050884b7a39/ Log: hg merge 1fb2fca52214, regenerate ast.py diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -66,14 +66,14 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = space.getattr(w_type, space.wrap("_fields")) + w_fields = space.getattr(w_type, space.newtext("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) except OperationError: pass - w_attrs = space.findattr(w_type, space.wrap("_attributes")) + w_attrs = space.findattr(w_type, space.newtext("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): try: @@ -92,12 +92,12 @@ def W_AST_new(space, w_type, __args__): node = space.allocate_instance(W_AST, w_type) - return space.wrap(node) + return node def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() fields_w = space.fixedview(space.getattr(space.type(w_self), - space.wrap("_fields"))) + space.newtext("_fields"))) num_fields = len(fields_w) if fields_w else 0 if args_w and len(args_w) != num_fields: if num_fields == 0: @@ -113,7 +113,7 @@ for i, w_field in enumerate(fields_w): space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) + space.setattr(w_self, space.newtext(field), w_value) W_AST.typedef = typedef.TypeDef("_ast.AST", @@ -142,16 +142,16 @@ def make_new_type(self, space, name, base, fields, attributes): w_base = getattr(self, 'w_%s' % base) w_dict = space.newdict() - space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + space.setitem_str(w_dict, '__module__', space.newtext('_ast')) if fields is not None: space.setitem_str(w_dict, "_fields", - space.newtuple([space.wrap(f) for f in fields])) + space.newtuple([space.newtext(f) for f in fields])) if attributes is not None: space.setitem_str(w_dict, "_attributes", - space.newtuple([space.wrap(a) for a in attributes])) + space.newtuple([space.newtext(a) for a in attributes])) w_type = space.call_function( space.w_type, - space.wrap(name), space.newtuple([w_base]), w_dict) + space.newtext(name), space.newtuple([w_base]), w_dict) setattr(self, 'w_%s' % name, w_type) def get(space): @@ -194,7 +194,7 @@ else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) return w_node @staticmethod @@ -227,7 +227,7 @@ else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) return w_node @staticmethod @@ -255,7 +255,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Expression) w_body = self.body.to_object(space) # expr - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) return w_node @staticmethod @@ -289,7 +289,7 @@ else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) return w_node @staticmethod @@ -389,28 +389,28 @@ def to_object(self, space): w_node = space.call_function(get(space).w_FunctionDef) - w_name = space.wrap(self.name.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('name'), w_name) + w_name = space.newtext(self.name) # identifier + space.setattr(w_node, space.newtext('name'), w_name) w_args = self.args.to_object(space) # arguments - space.setattr(w_node, space.wrap('args'), w_args) + space.setattr(w_node, space.newtext('args'), w_args) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.decorator_list is None: decorator_list_w = [] else: decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr w_decorator_list = space.newlist(decorator_list_w) - space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + space.setattr(w_node, space.newtext('decorator_list'), w_decorator_list) w_returns = self.returns.to_object(space) if self.returns is not None else space.w_None # expr - space.setattr(w_node, space.wrap('returns'), w_returns) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('returns'), w_returns) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -465,28 +465,28 @@ def to_object(self, space): w_node = space.call_function(get(space).w_AsyncFunctionDef) - w_name = space.wrap(self.name.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('name'), w_name) + w_name = space.newtext(self.name) # identifier + space.setattr(w_node, space.newtext('name'), w_name) w_args = self.args.to_object(space) # arguments - space.setattr(w_node, space.wrap('args'), w_args) + space.setattr(w_node, space.newtext('args'), w_args) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.decorator_list is None: decorator_list_w = [] else: decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr w_decorator_list = space.newlist(decorator_list_w) - space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + space.setattr(w_node, space.newtext('decorator_list'), w_decorator_list) w_returns = self.returns.to_object(space) if self.returns is not None else space.w_None # expr - space.setattr(w_node, space.wrap('returns'), w_returns) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('returns'), w_returns) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -542,36 +542,36 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ClassDef) - w_name = space.wrap(self.name.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('name'), w_name) + w_name = space.newtext(self.name) # identifier + space.setattr(w_node, space.newtext('name'), w_name) if self.bases is None: bases_w = [] else: bases_w = [node.to_object(space) for node in self.bases] # expr w_bases = space.newlist(bases_w) - space.setattr(w_node, space.wrap('bases'), w_bases) + space.setattr(w_node, space.newtext('bases'), w_bases) if self.keywords is None: keywords_w = [] else: keywords_w = [node.to_object(space) for node in self.keywords] # keyword w_keywords = space.newlist(keywords_w) - space.setattr(w_node, space.wrap('keywords'), w_keywords) + space.setattr(w_node, space.newtext('keywords'), w_keywords) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.decorator_list is None: decorator_list_w = [] else: decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr w_decorator_list = space.newlist(decorator_list_w) - space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('decorator_list'), w_decorator_list) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -618,11 +618,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Return) w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -659,11 +659,11 @@ else: targets_w = [node.to_object(space) for node in self.targets] # expr w_targets = space.newlist(targets_w) - space.setattr(w_node, space.wrap('targets'), w_targets) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('targets'), w_targets) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -703,13 +703,13 @@ else: targets_w = [node.to_object(space) for node in self.targets] # expr w_targets = space.newlist(targets_w) - space.setattr(w_node, space.wrap('targets'), w_targets) + space.setattr(w_node, space.newtext('targets'), w_targets) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -749,15 +749,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_AugAssign) w_target = self.target.to_object(space) # expr - space.setattr(w_node, space.wrap('target'), w_target) + space.setattr(w_node, space.newtext('target'), w_target) w_op = operator_to_class[self.op - 1]().to_object(space) # operator - space.setattr(w_node, space.wrap('op'), w_op) + space.setattr(w_node, space.newtext('op'), w_op) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -807,25 +807,25 @@ def to_object(self, space): w_node = space.call_function(get(space).w_For) w_target = self.target.to_object(space) # expr - space.setattr(w_node, space.wrap('target'), w_target) + space.setattr(w_node, space.newtext('target'), w_target) w_iter = self.iter.to_object(space) # expr - space.setattr(w_node, space.wrap('iter'), w_iter) + space.setattr(w_node, space.newtext('iter'), w_iter) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.orelse is None: orelse_w = [] else: orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) - space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('orelse'), w_orelse) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -877,25 +877,25 @@ def to_object(self, space): w_node = space.call_function(get(space).w_AsyncFor) w_target = self.target.to_object(space) # expr - space.setattr(w_node, space.wrap('target'), w_target) + space.setattr(w_node, space.newtext('target'), w_target) w_iter = self.iter.to_object(space) # expr - space.setattr(w_node, space.wrap('iter'), w_iter) + space.setattr(w_node, space.newtext('iter'), w_iter) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.orelse is None: orelse_w = [] else: orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) - space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('orelse'), w_orelse) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -945,23 +945,23 @@ def to_object(self, space): w_node = space.call_function(get(space).w_While) w_test = self.test.to_object(space) # expr - space.setattr(w_node, space.wrap('test'), w_test) + space.setattr(w_node, space.newtext('test'), w_test) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.orelse is None: orelse_w = [] else: orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) - space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('orelse'), w_orelse) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1007,23 +1007,23 @@ def to_object(self, space): w_node = space.call_function(get(space).w_If) w_test = self.test.to_object(space) # expr - space.setattr(w_node, space.wrap('test'), w_test) + space.setattr(w_node, space.newtext('test'), w_test) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.orelse is None: orelse_w = [] else: orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) - space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('orelse'), w_orelse) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1071,17 +1071,17 @@ else: items_w = [node.to_object(space) for node in self.items] # withitem w_items = space.newlist(items_w) - space.setattr(w_node, space.wrap('items'), w_items) + space.setattr(w_node, space.newtext('items'), w_items) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('body'), w_body) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1125,17 +1125,17 @@ else: items_w = [node.to_object(space) for node in self.items] # withitem w_items = space.newlist(items_w) - space.setattr(w_node, space.wrap('items'), w_items) + space.setattr(w_node, space.newtext('items'), w_items) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('body'), w_body) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1175,13 +1175,13 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Raise) w_exc = self.exc.to_object(space) if self.exc is not None else space.w_None # expr - space.setattr(w_node, space.wrap('exc'), w_exc) + space.setattr(w_node, space.newtext('exc'), w_exc) w_cause = self.cause.to_object(space) if self.cause is not None else space.w_None # expr - space.setattr(w_node, space.wrap('cause'), w_cause) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('cause'), w_cause) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1229,29 +1229,29 @@ else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) if self.handlers is None: handlers_w = [] else: handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler w_handlers = space.newlist(handlers_w) - space.setattr(w_node, space.wrap('handlers'), w_handlers) + space.setattr(w_node, space.newtext('handlers'), w_handlers) if self.orelse is None: orelse_w = [] else: orelse_w = [node.to_object(space) for node in self.orelse] # stmt w_orelse = space.newlist(orelse_w) - space.setattr(w_node, space.wrap('orelse'), w_orelse) + space.setattr(w_node, space.newtext('orelse'), w_orelse) if self.finalbody is None: finalbody_w = [] else: finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt w_finalbody = space.newlist(finalbody_w) - space.setattr(w_node, space.wrap('finalbody'), w_finalbody) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('finalbody'), w_finalbody) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1296,13 +1296,13 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Assert) w_test = self.test.to_object(space) # expr - space.setattr(w_node, space.wrap('test'), w_test) + space.setattr(w_node, space.newtext('test'), w_test) w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr - space.setattr(w_node, space.wrap('msg'), w_msg) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('msg'), w_msg) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1343,11 +1343,11 @@ else: names_w = [node.to_object(space) for node in self.names] # alias w_names = space.newlist(names_w) - space.setattr(w_node, space.wrap('names'), w_names) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('names'), w_names) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1382,20 +1382,20 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ImportFrom) - w_module = space.wrap(self.module.decode('utf-8')) if self.module is not None else space.w_None # identifier - space.setattr(w_node, space.wrap('module'), w_module) + w_module = space.newtext(self.module) if self.module is not None else space.w_None # identifier + space.setattr(w_node, space.newtext('module'), w_module) if self.names is None: names_w = [] else: names_w = [node.to_object(space) for node in self.names] # alias w_names = space.newlist(names_w) - space.setattr(w_node, space.wrap('names'), w_names) - w_level = space.wrap(self.level) # int - space.setattr(w_node, space.wrap('level'), w_level) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('names'), w_names) + w_level = space.newint(self.level) # int + space.setattr(w_node, space.newtext('level'), w_level) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1433,13 +1433,13 @@ if self.names is None: names_w = [] else: - names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier + names_w = [space.newtext(node) for node in self.names] # identifier w_names = space.newlist(names_w) - space.setattr(w_node, space.wrap('names'), w_names) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('names'), w_names) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1473,13 +1473,13 @@ if self.names is None: names_w = [] else: - names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier + names_w = [space.newtext(node) for node in self.names] # identifier w_names = space.newlist(names_w) - space.setattr(w_node, space.wrap('names'), w_names) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('names'), w_names) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1512,11 +1512,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Expr) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1547,10 +1547,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Pass) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1577,10 +1577,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Break) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1607,10 +1607,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Continue) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1712,17 +1712,17 @@ def to_object(self, space): w_node = space.call_function(get(space).w_BoolOp) w_op = boolop_to_class[self.op - 1]().to_object(space) # boolop - space.setattr(w_node, space.wrap('op'), w_op) + space.setattr(w_node, space.newtext('op'), w_op) if self.values is None: values_w = [] else: values_w = [node.to_object(space) for node in self.values] # expr w_values = space.newlist(values_w) - space.setattr(w_node, space.wrap('values'), w_values) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('values'), w_values) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1762,15 +1762,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_BinOp) w_left = self.left.to_object(space) # expr - space.setattr(w_node, space.wrap('left'), w_left) + space.setattr(w_node, space.newtext('left'), w_left) w_op = operator_to_class[self.op - 1]().to_object(space) # operator - space.setattr(w_node, space.wrap('op'), w_op) + space.setattr(w_node, space.newtext('op'), w_op) w_right = self.right.to_object(space) # expr - space.setattr(w_node, space.wrap('right'), w_right) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('right'), w_right) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1813,13 +1813,13 @@ def to_object(self, space): w_node = space.call_function(get(space).w_UnaryOp) w_op = unaryop_to_class[self.op - 1]().to_object(space) # unaryop - space.setattr(w_node, space.wrap('op'), w_op) + space.setattr(w_node, space.newtext('op'), w_op) w_operand = self.operand.to_object(space) # expr - space.setattr(w_node, space.wrap('operand'), w_operand) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('operand'), w_operand) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1859,13 +1859,13 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Lambda) w_args = self.args.to_object(space) # arguments - space.setattr(w_node, space.wrap('args'), w_args) + space.setattr(w_node, space.newtext('args'), w_args) w_body = self.body.to_object(space) # expr - space.setattr(w_node, space.wrap('body'), w_body) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('body'), w_body) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1907,15 +1907,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_IfExp) w_test = self.test.to_object(space) # expr - space.setattr(w_node, space.wrap('test'), w_test) + space.setattr(w_node, space.newtext('test'), w_test) w_body = self.body.to_object(space) # expr - space.setattr(w_node, space.wrap('body'), w_body) + space.setattr(w_node, space.newtext('body'), w_body) w_orelse = self.orelse.to_object(space) # expr - space.setattr(w_node, space.wrap('orelse'), w_orelse) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('orelse'), w_orelse) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -1965,17 +1965,17 @@ else: keys_w = [node.to_object(space) if node is not None else space.w_None for node in self.keys] # expr w_keys = space.newlist(keys_w) - space.setattr(w_node, space.wrap('keys'), w_keys) + space.setattr(w_node, space.newtext('keys'), w_keys) if self.values is None: values_w = [] else: values_w = [node.to_object(space) for node in self.values] # expr w_values = space.newlist(values_w) - space.setattr(w_node, space.wrap('values'), w_values) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('values'), w_values) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2016,11 +2016,11 @@ else: elts_w = [node.to_object(space) for node in self.elts] # expr w_elts = space.newlist(elts_w) - space.setattr(w_node, space.wrap('elts'), w_elts) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('elts'), w_elts) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2056,17 +2056,17 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ListComp) w_elt = self.elt.to_object(space) # expr - space.setattr(w_node, space.wrap('elt'), w_elt) + space.setattr(w_node, space.newtext('elt'), w_elt) if self.generators is None: generators_w = [] else: generators_w = [node.to_object(space) for node in self.generators] # comprehension w_generators = space.newlist(generators_w) - space.setattr(w_node, space.wrap('generators'), w_generators) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('generators'), w_generators) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2106,17 +2106,17 @@ def to_object(self, space): w_node = space.call_function(get(space).w_SetComp) w_elt = self.elt.to_object(space) # expr - space.setattr(w_node, space.wrap('elt'), w_elt) + space.setattr(w_node, space.newtext('elt'), w_elt) if self.generators is None: generators_w = [] else: generators_w = [node.to_object(space) for node in self.generators] # comprehension w_generators = space.newlist(generators_w) - space.setattr(w_node, space.wrap('generators'), w_generators) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('generators'), w_generators) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2158,19 +2158,19 @@ def to_object(self, space): w_node = space.call_function(get(space).w_DictComp) w_key = self.key.to_object(space) # expr - space.setattr(w_node, space.wrap('key'), w_key) + space.setattr(w_node, space.newtext('key'), w_key) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) + space.setattr(w_node, space.newtext('value'), w_value) if self.generators is None: generators_w = [] else: generators_w = [node.to_object(space) for node in self.generators] # comprehension w_generators = space.newlist(generators_w) - space.setattr(w_node, space.wrap('generators'), w_generators) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('generators'), w_generators) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2214,17 +2214,17 @@ def to_object(self, space): w_node = space.call_function(get(space).w_GeneratorExp) w_elt = self.elt.to_object(space) # expr - space.setattr(w_node, space.wrap('elt'), w_elt) + space.setattr(w_node, space.newtext('elt'), w_elt) if self.generators is None: generators_w = [] else: generators_w = [node.to_object(space) for node in self.generators] # comprehension w_generators = space.newlist(generators_w) - space.setattr(w_node, space.wrap('generators'), w_generators) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('generators'), w_generators) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2261,11 +2261,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Await) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2300,11 +2300,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Yield) w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2336,11 +2336,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_YieldFrom) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('value'), w_value) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2378,23 +2378,23 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Compare) w_left = self.left.to_object(space) # expr - space.setattr(w_node, space.wrap('left'), w_left) + space.setattr(w_node, space.newtext('left'), w_left) if self.ops is None: ops_w = [] else: ops_w = [cmpop_to_class[node - 1]().to_object(space) for node in self.ops] # cmpop w_ops = space.newlist(ops_w) - space.setattr(w_node, space.wrap('ops'), w_ops) + space.setattr(w_node, space.newtext('ops'), w_ops) if self.comparators is None: comparators_w = [] else: comparators_w = [node.to_object(space) for node in self.comparators] # expr w_comparators = space.newlist(comparators_w) - space.setattr(w_node, space.wrap('comparators'), w_comparators) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('comparators'), w_comparators) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2440,23 +2440,23 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Call) w_func = self.func.to_object(space) # expr - space.setattr(w_node, space.wrap('func'), w_func) + space.setattr(w_node, space.newtext('func'), w_func) if self.args is None: args_w = [] else: args_w = [node.to_object(space) for node in self.args] # expr w_args = space.newlist(args_w) - space.setattr(w_node, space.wrap('args'), w_args) + space.setattr(w_node, space.newtext('args'), w_args) if self.keywords is None: keywords_w = [] else: keywords_w = [node.to_object(space) for node in self.keywords] # keyword w_keywords = space.newlist(keywords_w) - space.setattr(w_node, space.wrap('keywords'), w_keywords) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('keywords'), w_keywords) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2495,11 +2495,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Num) w_n = self.n # object - space.setattr(w_node, space.wrap('n'), w_n) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('n'), w_n) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2532,11 +2532,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Str) w_s = self.s # string - space.setattr(w_node, space.wrap('s'), w_s) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('s'), w_s) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2569,11 +2569,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Bytes) w_s = self.s # bytes - space.setattr(w_node, space.wrap('s'), w_s) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('s'), w_s) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2606,11 +2606,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_NameConstant) w_single = self.single # singleton - space.setattr(w_node, space.wrap('single'), w_single) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('single'), w_single) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2641,10 +2641,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Ellipsis) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2676,15 +2676,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Attribute) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_attr = space.wrap(self.attr.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('attr'), w_attr) + space.setattr(w_node, space.newtext('value'), w_value) + w_attr = space.newtext(self.attr) # identifier + space.setattr(w_node, space.newtext('attr'), w_attr) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2729,15 +2729,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Subscript) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) + space.setattr(w_node, space.newtext('value'), w_value) w_slice = self.slice.to_object(space) # slice - space.setattr(w_node, space.wrap('slice'), w_slice) + space.setattr(w_node, space.newtext('slice'), w_slice) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2780,13 +2780,13 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Starred) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) + space.setattr(w_node, space.newtext('value'), w_value) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2823,14 +2823,14 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Name) - w_id = space.wrap(self.id.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('id'), w_id) + w_id = space.newtext(self.id) # identifier + space.setattr(w_node, space.newtext('id'), w_id) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2874,13 +2874,13 @@ else: elts_w = [node.to_object(space) for node in self.elts] # expr w_elts = space.newlist(elts_w) - space.setattr(w_node, space.wrap('elts'), w_elts) + space.setattr(w_node, space.newtext('elts'), w_elts) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2923,13 +2923,13 @@ else: elts_w = [node.to_object(space) for node in self.elts] # expr w_elts = space.newlist(elts_w) - space.setattr(w_node, space.wrap('elts'), w_elts) + space.setattr(w_node, space.newtext('elts'), w_elts) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context - space.setattr(w_node, space.wrap('ctx'), w_ctx) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('ctx'), w_ctx) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -2965,11 +2965,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Const) w_obj = self.obj # object - space.setattr(w_node, space.wrap('obj'), w_obj) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('obj'), w_obj) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -3089,11 +3089,11 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Slice) w_lower = self.lower.to_object(space) if self.lower is not None else space.w_None # expr - space.setattr(w_node, space.wrap('lower'), w_lower) + space.setattr(w_node, space.newtext('lower'), w_lower) w_upper = self.upper.to_object(space) if self.upper is not None else space.w_None # expr - space.setattr(w_node, space.wrap('upper'), w_upper) + space.setattr(w_node, space.newtext('upper'), w_upper) w_step = self.step.to_object(space) if self.step is not None else space.w_None # expr - space.setattr(w_node, space.wrap('step'), w_step) + space.setattr(w_node, space.newtext('step'), w_step) return w_node @staticmethod @@ -3129,7 +3129,7 @@ else: dims_w = [node.to_object(space) for node in self.dims] # slice w_dims = space.newlist(dims_w) - space.setattr(w_node, space.wrap('dims'), w_dims) + space.setattr(w_node, space.newtext('dims'), w_dims) return w_node @staticmethod @@ -3157,7 +3157,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Index) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) + space.setattr(w_node, space.newtext('value'), w_value) return w_node @staticmethod @@ -3496,15 +3496,15 @@ def to_object(self, space): w_node = space.call_function(get(space).w_comprehension) w_target = self.target.to_object(space) # expr - space.setattr(w_node, space.wrap('target'), w_target) + space.setattr(w_node, space.newtext('target'), w_target) w_iter = self.iter.to_object(space) # expr - space.setattr(w_node, space.wrap('iter'), w_iter) + space.setattr(w_node, space.newtext('iter'), w_iter) if self.ifs is None: ifs_w = [] else: ifs_w = [node.to_object(space) for node in self.ifs] # expr w_ifs = space.newlist(ifs_w) - space.setattr(w_node, space.wrap('ifs'), w_ifs) + space.setattr(w_node, space.newtext('ifs'), w_ifs) return w_node @staticmethod @@ -3561,19 +3561,19 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ExceptHandler) w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr - space.setattr(w_node, space.wrap('type'), w_type) - w_name = space.wrap(self.name.decode('utf-8')) if self.name is not None else space.w_None # identifier - space.setattr(w_node, space.wrap('name'), w_name) + space.setattr(w_node, space.newtext('type'), w_type) + w_name = space.newtext(self.name) if self.name is not None else space.w_None # identifier + space.setattr(w_node, space.newtext('name'), w_name) if self.body is None: body_w = [] else: body_w = [node.to_object(space) for node in self.body] # stmt w_body = space.newlist(body_w) - space.setattr(w_node, space.wrap('body'), w_body) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + space.setattr(w_node, space.newtext('body'), w_body) + w_lineno = space.newint(self.lineno) # int + space.setattr(w_node, space.newtext('lineno'), w_lineno) + w_col_offset = space.newint(self.col_offset) # int + space.setattr(w_node, space.newtext('col_offset'), w_col_offset) return w_node @staticmethod @@ -3629,29 +3629,29 @@ else: args_w = [node.to_object(space) for node in self.args] # arg w_args = space.newlist(args_w) - space.setattr(w_node, space.wrap('args'), w_args) + space.setattr(w_node, space.newtext('args'), w_args) w_vararg = self.vararg.to_object(space) if self.vararg is not None else space.w_None # arg - space.setattr(w_node, space.wrap('vararg'), w_vararg) + space.setattr(w_node, space.newtext('vararg'), w_vararg) if self.kwonlyargs is None: kwonlyargs_w = [] else: kwonlyargs_w = [node.to_object(space) for node in self.kwonlyargs] # arg w_kwonlyargs = space.newlist(kwonlyargs_w) - space.setattr(w_node, space.wrap('kwonlyargs'), w_kwonlyargs) + space.setattr(w_node, space.newtext('kwonlyargs'), w_kwonlyargs) if self.kw_defaults is None: kw_defaults_w = [] else: kw_defaults_w = [node.to_object(space) if node is not None else space.w_None for node in self.kw_defaults] # expr w_kw_defaults = space.newlist(kw_defaults_w) - space.setattr(w_node, space.wrap('kw_defaults'), w_kw_defaults) + space.setattr(w_node, space.newtext('kw_defaults'), w_kw_defaults) w_kwarg = self.kwarg.to_object(space) if self.kwarg is not None else space.w_None # arg - space.setattr(w_node, space.wrap('kwarg'), w_kwarg) + space.setattr(w_node, space.newtext('kwarg'), w_kwarg) if self.defaults is None: defaults_w = [] else: defaults_w = [node.to_object(space) for node in self.defaults] # expr w_defaults = space.newlist(defaults_w) - space.setattr(w_node, space.wrap('defaults'), w_defaults) + space.setattr(w_node, space.newtext('defaults'), w_defaults) return w_node @staticmethod @@ -3692,10 +3692,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_arg) - w_arg = space.wrap(self.arg.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('arg'), w_arg) + w_arg = space.newtext(self.arg) # identifier + space.setattr(w_node, space.newtext('arg'), w_arg) w_annotation = self.annotation.to_object(space) if self.annotation is not None else space.w_None # expr - space.setattr(w_node, space.wrap('annotation'), w_annotation) + space.setattr(w_node, space.newtext('annotation'), w_annotation) return w_node @staticmethod @@ -3725,10 +3725,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_keyword) - w_arg = space.wrap(self.arg.decode('utf-8')) if self.arg is not None else space.w_None # identifier - space.setattr(w_node, space.wrap('arg'), w_arg) + w_arg = space.newtext(self.arg) if self.arg is not None else space.w_None # identifier + space.setattr(w_node, space.newtext('arg'), w_arg) w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) + space.setattr(w_node, space.newtext('value'), w_value) return w_node @staticmethod @@ -3757,10 +3757,10 @@ def to_object(self, space): w_node = space.call_function(get(space).w_alias) - w_name = space.wrap(self.name.decode('utf-8')) # identifier - space.setattr(w_node, space.wrap('name'), w_name) - w_asname = space.wrap(self.asname.decode('utf-8')) if self.asname is not None else space.w_None # identifier - space.setattr(w_node, space.wrap('asname'), w_asname) + w_name = space.newtext(self.name) # identifier + space.setattr(w_node, space.newtext('name'), w_name) + w_asname = space.newtext(self.asname) if self.asname is not None else space.w_None # identifier + space.setattr(w_node, space.newtext('asname'), w_asname) return w_node @staticmethod @@ -3793,9 +3793,9 @@ def to_object(self, space): w_node = space.call_function(get(space).w_withitem) w_context_expr = self.context_expr.to_object(space) # expr - space.setattr(w_node, space.wrap('context_expr'), w_context_expr) + space.setattr(w_node, space.newtext('context_expr'), w_context_expr) w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr - space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars) + space.setattr(w_node, space.newtext('optional_vars'), w_optional_vars) return w_node @staticmethod diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -125,15 +125,17 @@ def get_value_converter(self, field, value): if field.type in self.data.simple_types: return "%s_to_class[%s - 1]().to_object(space)" % (field.type, value) + elif field.type in ("object", "singleton", "string", "bytes"): + return value + elif field.type == "bool": + return "space.newbool(%s)" % (value,) + elif field.type == "int": + return "space.newint(%s)" % (value,) elif field.type == "identifier": - wrapper = "space.wrap(%s.decode('utf-8'))" % (value,) + wrapper = "space.newtext(%s)" % (value,) if field.opt: wrapper += " if %s is not None else space.w_None" % (value,) return wrapper - elif field.type in ("object", "singleton", "string", "bytes"): - return value - elif field.type in ("int", "bool"): - return "space.wrap(%s)" % (value,) else: wrapper = "%s.to_object(space)" % (value,) allow_none = field.opt @@ -218,7 +220,7 @@ wrapping_code = self.get_field_converter(field) for line in wrapping_code: self.emit(line, 2) - self.emit("space.setattr(w_node, space.wrap(%r), w_%s)" % ( + self.emit("space.setattr(w_node, space.newtext(%r), w_%s)" % ( str(field.name), field.name), 2) self.emit("return w_node", 2) self.emit("") @@ -486,14 +488,14 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = space.getattr(w_type, space.wrap("_fields")) + w_fields = space.getattr(w_type, space.newtext("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, space.getattr(self, w_name)) except OperationError: pass - w_attrs = space.findattr(w_type, space.wrap("_attributes")) + w_attrs = space.findattr(w_type, space.newtext("_attributes")) if w_attrs: for w_name in space.fixedview(w_attrs): try: @@ -512,12 +514,12 @@ def W_AST_new(space, w_type, __args__): node = space.allocate_instance(W_AST, w_type) - return space.wrap(node) + return node def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() fields_w = space.fixedview(space.getattr(space.type(w_self), - space.wrap("_fields"))) + space.newtext("_fields"))) num_fields = len(fields_w) if fields_w else 0 if args_w and len(args_w) != num_fields: if num_fields == 0: @@ -533,7 +535,7 @@ for i, w_field in enumerate(fields_w): space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) + space.setattr(w_self, space.newtext(field), w_value) W_AST.typedef = typedef.TypeDef("_ast.AST", @@ -562,16 +564,16 @@ def make_new_type(self, space, name, base, fields, attributes): w_base = getattr(self, 'w_%s' % base) w_dict = space.newdict() - space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + space.setitem_str(w_dict, '__module__', space.newtext('_ast')) if fields is not None: space.setitem_str(w_dict, "_fields", - space.newtuple([space.wrap(f) for f in fields])) + space.newtuple([space.newtext(f) for f in fields])) if attributes is not None: space.setitem_str(w_dict, "_attributes", - space.newtuple([space.wrap(a) for a in attributes])) + space.newtuple([space.newtext(a) for a in attributes])) w_type = space.call_function( space.w_type, - space.wrap(name), space.newtuple([w_base]), w_dict) + space.newtext(name), space.newtuple([w_base]), w_dict) setattr(self, 'w_%s' % name, w_type) def get(space): From pypy.commits at gmail.com Tue Dec 20 11:04:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 08:04:56 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: remove a (never-called) space.wrap Message-ID: <585956a8.c64bc20a.84589.e7a6@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89204:61cc80850258 Date: 2016-12-20 15:18 +0100 http://bitbucket.org/pypy/pypy/changeset/61cc80850258/ Log: remove a (never-called) space.wrap diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -47,10 +47,10 @@ "Hack around the fact we can't store tuples on a TypeDef." def __init__(self, fields): - self.fields = fields + assert fields == [] def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) + return space.newtuple([]) class W_AST(W_Root): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -469,10 +469,10 @@ "Hack around the fact we can't store tuples on a TypeDef." def __init__(self, fields): - self.fields = fields + assert fields == [] def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) + return space.newtuple([]) class W_AST(W_Root): From pypy.commits at gmail.com Tue Dec 20 11:04:57 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 08:04:57 -0800 (PST) Subject: [pypy-commit] pypy py3.5-newtext: fix dict tests Message-ID: <585956a9.54161c0a.15d7f.72ee@mx.google.com> Author: Armin Rigo Branch: py3.5-newtext Changeset: r89205:f7ab12c69fc9 Date: 2016-12-20 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/f7ab12c69fc9/ Log: fix dict tests diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -139,6 +139,7 @@ # i.e. surrogates are accepted and not treated specially at all. # If there happen to be two 3-bytes encoding a pair of surrogates, # you still get two surrogate unicode characters in the result. + assert isinstance(string, str) result, consumed = runicode.str_decode_utf_8( string, len(string), "strict", final=True, errorhandler=decode_error_handler(space), diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -42,9 +42,9 @@ def buffer_w(self, space, flags): return StringBuffer("foobar") - def str_w(self, space): + def text_w(self, space): return NonConstant("foobar") - identifier_w = bytes_w = str_w + identifier_w = bytes_w = text_w def unicode_w(self, space): return NonConstant(u"foobar") @@ -118,7 +118,7 @@ # ____________________________________________________________ -BUILTIN_TYPES = ['int', 'str', 'float', 'tuple', 'list', 'dict', 'bytes', +BUILTIN_TYPES = ['int', 'float', 'tuple', 'list', 'dict', 'bytes', 'unicode', 'complex', 'slice', 'bool', 'text', 'object', 'set', 'frozenset', 'bytearray', 'memoryview'] diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1016,7 +1016,7 @@ def is_correct_type(self, w_obj): space = self.space - return space.is_w(space.type(w_obj), space.w_str) + return space.is_w(space.type(w_obj), space.w_bytes) def get_empty_storage(self): res = {} @@ -1167,7 +1167,7 @@ space = self.space # XXX there are many more types return (space.is_w(w_lookup_type, space.w_NoneType) or - space.is_w(w_lookup_type, space.w_str) or + space.is_w(w_lookup_type, space.w_bytes) or space.is_w(w_lookup_type, space.w_unicode) ) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1131,6 +1131,8 @@ return l def newlist_bytes(self, l): return l + def newlist_text(self, l): + return l def newlist_unicode(self, l): return l DictObjectCls = W_DictObject @@ -1140,23 +1142,20 @@ if isinstance(w_obj, FakeUnicode): return unicode return type(w_obj) - w_str = str w_unicode = unicode + w_bytes = str - def str_w(self, string): - if isinstance(string, unicode): - return string.encode('utf-8') - assert isinstance(string, str) - return string - bytes_w = str_w + def text_w(self, u): + assert isinstance(u, unicode) + return u.encode('utf-8') def bytes_w(self, string): assert isinstance(string, str) return string - def unicode_w(self, string): - assert isinstance(string, unicode) - return string + def unicode_w(self, u): + assert isinstance(u, unicode) + return u def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) @@ -1166,7 +1165,14 @@ if isinstance(obj, str): return obj.decode('ascii') return obj - newtext = newbytes = wrap + + def newunicode(self, u): + assert isinstance(u, unicode) + return u + + def newtext(self, string): + assert isinstance(string, str) + return string.decode('utf-8') def newbytes(self, obj): return obj @@ -1212,7 +1218,7 @@ StringObjectCls = FakeString UnicodeObjectCls = FakeUnicode w_dict = W_DictObject - w_text = str + w_text = unicode iter = iter fixedview = list listview = list @@ -1355,7 +1361,7 @@ def test_devolve(self): impl = self.impl for x in xrange(100): - impl.setitem(self.fakespace.str_w(str(x)), x) + impl.setitem(self.fakespace.text_w(unicode(x)), x) impl.setitem(x, x) assert type(impl.get_strategy()) is ObjectDictStrategy @@ -1419,10 +1425,10 @@ assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) def test_setitem_str(self): - self.impl.setitem_str(self.fakespace.str_w(self.string), 1000) + self.impl.setitem_str(self.fakespace.text_w(self.string), 1000) assert self.impl.length() == 1 assert self.impl.getitem(self.string) == 1000 - assert self.impl.getitem_str(self.string) == 1000 + assert self.impl.getitem_str(str(self.string)) == 1000 self.check_not_devolved() class TestBytesDictImplementation(BaseTestRDictImplementation): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -326,8 +326,8 @@ def descr_repr(self, space): chars = self._value size = len(chars) - s = _repr_function(chars, size, "strict") - return space.newtext(s) + u = _repr_function(chars, size, "strict") + return space.newunicode(u) def descr_str(self, space): if space.is_w(space.type(self), space.w_unicode): From pypy.commits at gmail.com Tue Dec 20 11:04:59 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 08:04:59 -0800 (PST) Subject: [pypy-commit] pypy default: Maybe temporary: change py.test, which displays unicode strings 'xx' Message-ID: <585956ab.849c1c0a.7f283.65c4@mx.google.com> Author: Armin Rigo Branch: Changeset: r89206:4ac343e8285f Date: 2016-12-20 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/4ac343e8285f/ Log: Maybe temporary: change py.test, which displays unicode strings 'xx' instead of u'xx'. It's very annoying when the difference is essential, like in the py3.5-newtext branch. diff --git a/py/_io/saferepr.py b/py/_io/saferepr.py --- a/py/_io/saferepr.py +++ b/py/_io/saferepr.py @@ -16,11 +16,11 @@ # Strictly speaking wrong on narrow builds def repr(u): if "'" not in u: - return py.builtin._totext("'%s'") % u + return py.builtin._totext("u'%s'") % u elif '"' not in u: - return py.builtin._totext('"%s"') % u + return py.builtin._totext('u"%s"') % u else: - return py.builtin._totext("'%s'") % u.replace("'", r"\'") + return py.builtin._totext("u'%s'") % u.replace("'", r"\'") s = repr(x[:self.maxstring]) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) From pypy.commits at gmail.com Tue Dec 20 11:12:52 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 20 Dec 2016 08:12:52 -0800 (PST) Subject: [pypy-commit] pypy default: Reduce the diff with the original Message-ID: <58595884.61c9c20a.d7016.4b80@mx.google.com> Author: Armin Rigo Branch: Changeset: r89207:004274f1f5a3 Date: 2016-12-20 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/004274f1f5a3/ Log: Reduce the diff with the original diff --git a/py/_io/saferepr.py b/py/_io/saferepr.py --- a/py/_io/saferepr.py +++ b/py/_io/saferepr.py @@ -16,11 +16,16 @@ # Strictly speaking wrong on narrow builds def repr(u): if "'" not in u: - return py.builtin._totext("u'%s'") % u + return py.builtin._totext("'%s'") % u elif '"' not in u: - return py.builtin._totext('u"%s"') % u + return py.builtin._totext('"%s"') % u else: - return py.builtin._totext("u'%s'") % u.replace("'", r"\'") + return py.builtin._totext("'%s'") % u.replace("'", r"\'") + + repr = builtin_repr + # ^^^ it's very annoying to display 'xx' instead of u'xx' when + # the difference can be essential, particularly in PyPy + s = repr(x[:self.maxstring]) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) From pypy.commits at gmail.com Tue Dec 20 12:56:47 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 09:56:47 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: restore last line that was accidentally deleted Message-ID: <585970df.a516190a.4c1f9.d91d@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89208:428d4fc6f787 Date: 2016-12-20 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/428d4fc6f787/ Log: restore last line that was accidentally deleted diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -3158,3 +3158,4 @@ if surviving_dict: surviving_dict.insertclean(obj, pyobject) else: + self._rrc_free(pyobject) diff --git a/rpython/rtyper/lltypesystem/test/test_ztranslated.py b/rpython/rtyper/lltypesystem/test/test_ztranslated.py --- a/rpython/rtyper/lltypesystem/test/test_ztranslated.py +++ b/rpython/rtyper/lltypesystem/test/test_ztranslated.py @@ -57,3 +57,7 @@ def test_compiled(): fn = compile(main, [], gcpolicy="minimark") fn() + +def test_compiled_incminimark(): + fn = compile(main, [], gcpolicy="incminimark") + fn() From pypy.commits at gmail.com Tue Dec 20 12:56:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 09:56:50 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: pass the test with incminimark enabled Message-ID: <585970e2.42452e0a.91ed9.9470@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89209:101c94950d90 Date: 2016-12-20 18:55 +0100 http://bitbucket.org/pypy/pypy/changeset/101c94950d90/ Log: pass the test with incminimark enabled diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1599,6 +1599,7 @@ v_ret = hop.genop("direct_call", [self.move_out_of_nursery_ptr, self.c_const_gc, v_adr], resulttype=llmemory.Address) + import pdb; pdb.set_trace() hop.genop("cast_adr_to_ptr", [v_ret], resultvar = hop.spaceop.result) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -543,8 +543,17 @@ NOTE: Only use for immutable objects! """ - from rpython.rtyper.lltypesystem.lloperation import llop - return llop.gc_move_out_of_nursery(lltype.Void, obj) + pass + +class MoveOutOfNurseryEntry(ExtRegistryEntry): + _about_ = move_out_of_nursery + + def compute_result_annotation(self, s_obj): + return s_obj + + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.genop('gc_move_out_of_nursery', hop.args_v, resulttype=hop.r_result) # ____________________________________________________________ diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1343,17 +1343,16 @@ from rpython.rlib import rgc if we_are_translated(): - newstring = string if rgc.can_move(string): - newstring = rgc.move_out_of_nursery(string) + string = rgc.move_out_of_nursery(string) # string cannot move now! return the address - lldata = llstr(newstring) + lldata = llstr(string) data_start = (llmemory.cast_ptr_to_adr(lldata) + offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) data_start = cast(CCHARP, data_start) - data_start[len(newstring)] = '\x00' # write the final extra null + data_start[len(string)] = '\x00' # write the final extra null return data_start else: global TEST_RAW_ADDR_KEEP_ALIVE diff --git a/rpython/rtyper/lltypesystem/test/test_ztranslated.py b/rpython/rtyper/lltypesystem/test/test_ztranslated.py --- a/rpython/rtyper/lltypesystem/test/test_ztranslated.py +++ b/rpython/rtyper/lltypesystem/test/test_ztranslated.py @@ -54,10 +54,6 @@ def target(driver, args): return main -def test_compiled(): - fn = compile(main, [], gcpolicy="minimark") - fn() - def test_compiled_incminimark(): fn = compile(main, [], gcpolicy="incminimark") fn() From pypy.commits at gmail.com Wed Dec 21 02:49:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 23:49:00 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: remove pdb.set_trace() for debugging, add move_out_of_nursery to LLInterpreter Message-ID: <585a33ec.c515c20a.a3d17.fa6e@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89210:844d198c0d79 Date: 2016-12-21 08:48 +0100 http://bitbucket.org/pypy/pypy/changeset/844d198c0d79/ Log: remove pdb.set_trace() for debugging, add move_out_of_nursery to LLInterpreter diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1599,7 +1599,6 @@ v_ret = hop.genop("direct_call", [self.move_out_of_nursery_ptr, self.c_const_gc, v_adr], resulttype=llmemory.Address) - import pdb; pdb.set_trace() hop.genop("cast_adr_to_ptr", [v_ret], resultvar = hop.spaceop.result) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -1135,6 +1135,9 @@ exc_data.exc_value = lltype.typeOf(evalue)._defl() return bool(etype) + def op_gc_move_out_of_nursery(self, obj): + raise NotImplementedError("gc_move_out_of_nursery") + class Tracer(object): Counter = 0 From pypy.commits at gmail.com Wed Dec 21 02:49:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 20 Dec 2016 23:49:39 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: merge default Message-ID: <585a3413.0b561c0a.9ea67.9ccf@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89211:6202b84a767f Date: 2016-12-21 08:49 +0100 http://bitbucket.org/pypy/pypy/changeset/6202b84a767f/ Log: merge default diff too long, truncating to 2000 out of 22218 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Wed Dec 21 05:58:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 21 Dec 2016 02:58:53 -0800 (PST) Subject: [pypy-commit] pypy interp-opt: merge default Message-ID: <585a606d.12ad1c0a.593aa.e682@mx.google.com> Author: Richard Plangger Branch: interp-opt Changeset: r89212:fce9bec43428 Date: 2016-12-20 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/fce9bec43428/ Log: merge default diff too long, truncating to 2000 out of 53292 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -33,3 +33,4 @@ 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 050d84dd78997f021acf0e133934275d63547cc0 release-pypy2.7-v5.4.1 0e2d9a73f5a1818d0245d75daccdbe21b2d5c3ef release-pypy2.7-v5.4.1 +aff251e543859ce4508159dd9f1a82a2f553de00 release-pypy2.7-v5.6.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,15 +44,15 @@ Matti Picus Alex Gaynor Philip Jenvey + Ronan Lamy Brian Kearns - Ronan Lamy + Richard Plangger Michael Hudson Manuel Jacob David Schneider Holger Krekel Christian Tismer Hakan Ardo - Richard Plangger Benjamin Peterson Anders Chrigstrom Eric van Riet Paap @@ -68,8 +68,8 @@ Niklaus Haldimann Camillo Bruni Laura Creighton + Romain Guillebert Toon Verwaest - Romain Guillebert Leonardo Santagada Seo Sanghyeon Ronny Pfannschmidt @@ -89,7 +89,9 @@ Ludovic Aubry Jacob Hallen Jason Creighton + Mark Young Alex Martelli + Spenser Bauman Michal Bendowski stian Jan de Mooij @@ -100,20 +102,21 @@ Stefan Schwarzer Valentino Volonghi Tomek Meka + Stefano Rivera Patrick Maupin + Devin Jeanpierre Bob Ippolito Bruno Gola David Malcolm Jean-Paul Calderone - Mark Young Timo Paulssen + Edd Barrett Squeaky - Devin Jeanpierre Marius Gedminas Alexandre Fayolle Simon Burton - Stefano Rivera Martin Matusiak + Nicolas Truessel Konstantin Lopuhin Wenzhu Man John Witulski @@ -123,14 +126,12 @@ Dario Bertini Mark Pearse Simon Cross - Edd Barrett + Jeremy Thurgood Andreas Stührk Tobias Pape Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Spenser Bauman - Jeremy Thurgood Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy @@ -141,7 +142,6 @@ tav Taavi Burns Georg Brandl - Nicolas Truessel Bert Freudenberg Stian Andreassen Wanja Saatkamp @@ -156,19 +156,20 @@ Preston Timmons David Ripton Jeff Terrace + Tim Felgentreff Dusty Phillips Lukas Renggli Guenter Jantzen William Leslie Ned Batchelder - Tim Felgentreff Anton Gulenko Amit Regmi Ben Young - Sergey Matyunin + Jasper Schulz Nicolas Chauvat Andrew Durdin Andrew Chambers + Sergey Matyunin Michael Schneider Nicholas Riley Jason Chu @@ -184,16 +185,16 @@ Jared Grubb Karl Bartel Wouter van Heyst - Sebastian Pawluś Brian Dorsey Victor Stinner Andrews Medina + Sebastian Pawluś Stuart Williams - Jasper Schulz - Christian Hudon + Daniel Patrick + Aaron Iles Toby Watson Antoine Pitrou - Aaron Iles + Christian Hudon Michael Cheng Justas Sadzevicius Gasper Zejn @@ -201,8 +202,8 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi + Faye Zhao Elmo Mäntynen - Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson @@ -211,11 +212,12 @@ Alex Perry Vaibhav Sood Alan McIntyre + Reuben Cummings Alexander Sedov p_zieschang at yahoo.de Attila Gobi - Jasper.Schulz Christopher Pope + Aaron Gallagher Florin Papa Christian Tismer Marc Abramowitz @@ -232,7 +234,6 @@ Gabriel Lukas Vacek Kunal Grover - Aaron Gallagher Andrew Dalke Sylvain Thenault Jakub Stasiak @@ -255,6 +256,7 @@ Philipp Rustemeuer Henrik Vendelbo Richard Lancaster + Yasir Suhail Dan Buch Miguel de Val Borro Artur Lisiecki @@ -267,6 +269,7 @@ Catalin Gabriel Manciu Tomo Cocoa Kim Jin Su + rafalgalczynski at gmail.com Toni Mattis Amber Brown Lucas Stadler @@ -294,9 +297,7 @@ Michael Hudson-Doyle Anders Sigfridsson Nikolay Zinov - Yasir Suhail Jason Michalski - rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven Akira Li @@ -310,9 +311,10 @@ James Lan Volodymyr Vladymyrov shoma hosaka - Daniel Neuhäuser Ben Mather Niclas Olofsson + Matthew Miller + Rodrigo Araújo halgari Boglarka Vezer Chris Pressey @@ -322,8 +324,9 @@ Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Daniil Yarancev Jim Baker - Rodrigo Araújo + Dan Crosta Nikolaos-Digenis Karagiannis James Robert Armin Ronacher @@ -337,32 +340,31 @@ Tomer Chachamu Christopher Groskopf Asmo Soinio - Stefan Marr jiaaro Mads Kiilerich - opassembler.py Antony Lee Jason Madden + Daniel Neuh�user + reubano at gmail.com Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer Even Wiik Thomassen jbs squeaky - Zearin soareschen Jonas Pfannschmidt Kurt Griffiths Mike Bayer - Matthew Miller + Stefan Marr Flavio Percoco Kristoffer Kleine - yasirs Michael Chermside Anna Ravencroft pizi + remarkablerocket Andrey Churin - Dan Crosta + Zearin Eli Stevens Tobias Diaz Julien Phalip diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.5.2' +__version__ = '2.9.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py --- a/_pytest/_argcomplete.py +++ b/_pytest/_argcomplete.py @@ -88,9 +88,6 @@ return completion if os.environ.get('_ARGCOMPLETE'): - # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format - if sys.version_info[:2] < (2, 6): - sys.exit(1) try: import argcomplete.completers except ImportError: diff --git a/_pytest/_code/__init__.py b/_pytest/_code/__init__.py new file mode 100644 --- /dev/null +++ b/_pytest/_code/__init__.py @@ -0,0 +1,12 @@ +""" python inspection/code generation API """ +from .code import Code # noqa +from .code import ExceptionInfo # noqa +from .code import Frame # noqa +from .code import Traceback # noqa +from .code import getrawcode # noqa +from .code import patch_builtins # noqa +from .code import unpatch_builtins # noqa +from .source import Source # noqa +from .source import compile_ as compile # noqa +from .source import getfslineno # noqa + diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py new file mode 100644 --- /dev/null +++ b/_pytest/_code/_py2traceback.py @@ -0,0 +1,81 @@ +# copied from python-2.7.3's traceback.py +# CHANGES: +# - some_str is replaced, trying to create unicode strings +# +import types + +def format_exception_only(etype, value): + """Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + """ + + # An instance should not have a meaningful value parameter, but + # sometimes does, particularly for string exceptions, such as + # >>> raise string1, string2 # deprecated + # + # Clear these out first because issubtype(string1, SyntaxError) + # would throw another exception and mask the original problem. + if (isinstance(etype, BaseException) or + isinstance(etype, types.InstanceType) or + etype is None or type(etype) is str): + return [_format_final_exc_line(etype, value)] + + stype = etype.__name__ + + if not issubclass(etype, SyntaxError): + return [_format_final_exc_line(stype, value)] + + # It was a syntax error; show exactly where the problem was found. + lines = [] + try: + msg, (filename, lineno, offset, badline) = value.args + except Exception: + pass + else: + filename = filename or "" + lines.append(' File "%s", line %d\n' % (filename, lineno)) + if badline is not None: + if isinstance(badline, bytes): # python 2 only + badline = badline.decode('utf-8', 'replace') + lines.append(u' %s\n' % badline.strip()) + if offset is not None: + caretspace = badline.rstrip('\n')[:offset].lstrip() + # non-space whitespace (likes tabs) must be kept for alignment + caretspace = ((c.isspace() and c or ' ') for c in caretspace) + # only three spaces to account for offset1 == pos 0 + lines.append(' %s^\n' % ''.join(caretspace)) + value = msg + + lines.append(_format_final_exc_line(stype, value)) + return lines + +def _format_final_exc_line(etype, value): + """Return a list of a single line -- normal case for format_exception_only""" + valuestr = _some_str(value) + if value is None or not valuestr: + line = "%s\n" % etype + else: + line = "%s: %s\n" % (etype, valuestr) + return line + +def _some_str(value): + try: + return unicode(value) + except Exception: + try: + return str(value) + except Exception: + pass + return '' % type(value).__name__ diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py new file mode 100644 --- /dev/null +++ b/_pytest/_code/code.py @@ -0,0 +1,805 @@ +import sys +from inspect import CO_VARARGS, CO_VARKEYWORDS + +import py + +builtin_repr = repr + +reprlib = py.builtin._tryimport('repr', 'reprlib') + +if sys.version_info[0] >= 3: + from traceback import format_exception_only +else: + from ._py2traceback import format_exception_only + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + if not hasattr(rawcode, "co_filename"): + rawcode = getrawcode(rawcode) + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + self.raw = rawcode + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + @property + def path(self): + """ return a path object pointing to source code (note that it + might not point to an actually existing file). """ + p = py.path.local(self.raw.co_filename) + # maybe don't try this checking + if not p.check(): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + @property + def fullsource(self): + """ return a _pytest._code.Source object for the full source file of the code + """ + from _pytest._code import source + full, _ = source.findsource(self.raw) + return full + + def source(self): + """ return a _pytest._code.Source object for the code object's source only + """ + # return source only for that part of code + import _pytest._code + return _pytest._code.Source(self.raw) + + def getargs(self, var=False): + """ return a tuple with the argument names for the code object + + if 'var' is set True also return the names of the variable and + keyword arguments when present + """ + # handfull shortcut for getting args + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + self.code = Code(frame.f_code) + + @property + def statement(self): + """ statement this frame is at """ + import _pytest._code + if self.code.fullsource is None: + return _pytest._code.Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return py.io.saferepr(object) + + def is_true(self, object): + return object + + def getargs(self, var=False): + """ return a list of tuples (name, value) for all arguments + + if 'var' is set True also include the variable and keyword + arguments when present + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + _repr_style = None + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.lineno = rawentry.tb_lineno - 1 + + def set_repr_style(self, mode): + assert mode in ("short", "long") + self._repr_style = mode + + @property + def frame(self): + import _pytest._code + return _pytest._code.Frame(self._rawentry.tb_frame) + + @property + def relline(self): + return self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + @property + def statement(self): + """ _pytest._code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + + @property + def path(self): + """ path to the source code """ + return self.frame.code.path + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + from _pytest.assertion.reinterpret import reinterpret + if self.exprinfo is None: + source = py.builtin._totext(self.statement).strip() + x = reinterpret(source, self.frame, should_fail=True) + if not py.builtin._istext(x): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) + + def getsource(self, astcache=None): + """ return failing source code. """ + # we use the passed in astcache to not reparse asttrees + # within exception info printing + from _pytest._code.source import getstatementrange_ast + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast(self.lineno, source, + astnode=astnode) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.f_locals['__tracebackhide__'] + except KeyError: + try: + return self.frame.f_globals['__tracebackhide__'] + except KeyError: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackEntry + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackEntries which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + for i in range(-1, -len(self)-1, -1): + entry = self[i] + if not entry.ishidden(): + return entry + return self[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackEntry where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + import _pytest._code + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + #: the exception class + self.type = tup[0] + #: the exception instance + self.value = tup[1] + #: the exception raw traceback + self.tb = tup[2] + #: the exception type name + self.typename = self.type.__name__ + #: the exception traceback (_pytest._code.Traceback instance) + self.traceback = _pytest._code.Traceback(self.tb) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + _pytest._code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + return ReprFileLocation(path, lineno+1, exconly) + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no|native traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + + in case of style==native, tbfilter and showlocals is ignored. + """ + if style == 'native': + return ReprExceptionInfo(ReprTracebackNative( + py.std.traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )), self._getreprcrash()) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + + def __unicode__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return unicode(loc) + + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + self.astcache = {} + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return py.io.saferepr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None, short=False): + """ return formatted and marked up source lines. """ + import _pytest._code + lines = [] + if source is None or line_index >= len(source.lines): + source = _pytest._code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index+1:]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + import _pytest._code + source = self._getentrysource(entry) + if source is None: + source = _pytest._code.Source("???") + line_index = 0 + else: + # entry.getfirstlinesource() can be -1, should be 0 on jython + line_index = entry.lineno - max(entry.getfirstlinesource(), 0) + + lines = [] + style = entry._repr_style + if style is None: + style = self.style + if style in ("short", "long"): + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" %(entry.name) + else: + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = None + if not short: + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path): + if not self.abspath: + try: + np = py.path.local().bestrelpath(path) + except OSError: + return path + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if is_recursion_error(excinfo): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + s = self.__unicode__() + if sys.version_info[0] < 3 and isinstance(s, unicode): + s = s.encode('utf-8') + return s + + def __unicode__(self): + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = py.io.TextIO() + tw = py.io.TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + # the entries might have different styles + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i+1] + if entry.style == "long" or \ + entry.style == "short" and next_entry.style == "long": + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines): + self.style = "native" + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + +class ReprEntryNative(TerminalRepr): + style = "native" + + def __init__(self, tblines): + self.lines = tblines + + def toterminal(self, tw): + tw.write("".join(self.lines)) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + self.style = style + + def toterminal(self, tw): + if self.style == "short": + self.reprfileloc.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + #tw.line("") + return + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from _pytest.assertion import reinterpret + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = reinterpret.AssertionError + if compile: + import _pytest._code + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = _pytest._code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj, trycall=True): + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + if trycall and not hasattr(obj, 'co_firstlineno'): + if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): + x = getrawcode(obj.__call__, trycall=False) + if hasattr(x, 'co_firstlineno'): + return x + return obj + +if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5 + def is_recursion_error(excinfo): + return excinfo.errisinstance(RecursionError) # noqa +else: + def is_recursion_error(excinfo): + if not excinfo.errisinstance(RuntimeError): + return False + try: + return "maximum recursion depth exceeded" in str(excinfo.value) + except UnicodeError: + return False diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py new file mode 100644 --- /dev/null +++ b/_pytest/_code/source.py @@ -0,0 +1,423 @@ +from __future__ import generators + +from bisect import bisect_right +import sys +import inspect, tokenize +import py +from types import ModuleType +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + _compilecounter = 0 + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, (tuple, list)): + partlines = [x.rstrip("\n") for x in part] + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno, assertion=False): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno, assertion) + return self[start:end] + + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except KeyboardInterrupt: + raise + except Exception: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + base = "<%d-codegen " % self._compilecounter + self.__class__._compilecounter += 1 + if not filename: + filename = base + '%s:%d>' % (fn, lineno) + else: + filename = base + '%r %s:%d>' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("(code was compiled probably from here: %s)" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + lines = [(x + "\n") for x in self.lines] + if sys.version_info[0] >= 3: + # XXX py3's inspect.getsourcefile() checks for a module + # and a pep302 __loader__ ... we don't have a module + # at code compile-time so we need to fake it here + m = ModuleType("_pycodecompile_pseudo_module") + py.std.inspect.modulesbyfile[filename] = None + py.std.sys.modules[None] = m + m.__loader__ = 1 + py.std.linecache.cache[filename] = (1, None, lines, filename) + return co + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + and maintain an internal cache which allows later + retrieval of the source code for the code object + and any recursively created code objects. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + """ Return source location (path, lineno) for the given object. + If the source cannot be determined return ("", -1) + """ + import _pytest._code + try: + code = _pytest._code.Code(obj) + except TypeError: + try: + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + except TypeError: + return "", -1 + + fspath = fn and py.path.local(fn) or None + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + pass + else: + fspath = code.path + lineno = code.firstlineno + assert isinstance(lineno, int) + return fspath, lineno + +# +# helper functions +# + +def findsource(obj): + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except py.builtin._sysex: + raise + except: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + +def getsource(obj, **kwargs): + import _pytest._code + obj = _pytest._code.getrawcode(obj) + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + it = readline_generator(lines) + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines + + +def get_statement_startend2(lineno, node): + import ast + # flatten all statements and except handlers into one lineno-list + # AST's line numbers start indexing at 1 + l = [] + for x in ast.walk(node): + if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): + l.append(x.lineno - 1) + for name in "finalbody", "orelse": + val = getattr(x, name, None) + if val: + # treat the finally/orelse part as its own statement + l.append(val[0].lineno - 1 - 1) + l.sort() + insert_index = bisect_right(l, lineno) + start = l[insert_index - 1] + if insert_index >= len(l): + end = None + else: + end = l[insert_index] + return start, end + + +def getstatementrange_ast(lineno, source, assertion=False, astnode=None): + if astnode is None: + content = str(source) + if sys.version_info < (2,7): + content += "\n" + try: + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + except ValueError: + start, end = getstatementrange_old(lineno, source, assertion) + return None, start, end + start, end = get_statement_startend2(lineno, astnode) + # we need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself + block_finder = inspect.BlockFinder() + # if we start with an indented line, put blockfinder to "started" mode + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # the end might still point to a comment or empty line, correct it + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end + + +def getstatementrange_old(lineno, source, assertion=False): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + raise an IndexError if no such statementrange can be found. + """ + # XXX this logic is only used on python2.4 and below + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + if assertion: + line = source.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue + trylines = source.lines[start:lineno+1] + # quick hack to prepare parsing an indented line with + # compile_command() (which errors on "return" outside defs) + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + continue + + # 2. find the end of the statement + for end in range(lineno+1, len(source)+1): + trysource = source[start:end] + if trysource.isparseable(): + return start, end + if end == start + 100: # XXX otherwise, it takes forever + break # XXX + raise SyntaxError("no valid source range around line %d " % (lineno,)) + + diff --git a/_pytest/_pluggy.py b/_pytest/_pluggy.py new file mode 100644 --- /dev/null +++ b/_pytest/_pluggy.py @@ -0,0 +1,11 @@ +""" +imports symbols from vendored "pluggy" if available, otherwise +falls back to importing "pluggy" from the default namespace. +""" + +try: + from _pytest.vendored_packages.pluggy import * # noqa + from _pytest.vendored_packages.pluggy import __version__ # noqa +except ImportError: + from pluggy import * # noqa + from pluggy import __version__ # noqa diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -2,24 +2,37 @@ support for presenting detailed information in failing assertions. """ import py +import os import sys from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util + def pytest_addoption(parser): group = parser.getgroup("debugconfig") - group.addoption('--assert', action="store", dest="assertmode", + group.addoption('--assert', + action="store", + dest="assertmode", choices=("rewrite", "reinterp", "plain",), - default="rewrite", metavar="MODE", - help="""control assertion debugging tools. -'plain' performs no assertion debugging. -'reinterp' reinterprets assert statements after they failed to provide assertion expression information. -'rewrite' (the default) rewrites assert statements in test modules on import -to provide assert expression information. """) - group.addoption('--no-assert', action="store_true", default=False, - dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', '--no-magic', action="store_true", - default=False, help="DEPRECATED equivalent to --assert=plain") + default="rewrite", + metavar="MODE", + help="""control assertion debugging tools. 'plain' + performs no assertion debugging. 'reinterp' + reinterprets assert statements after they failed + to provide assertion expression information. + 'rewrite' (the default) rewrites assert + statements in test modules on import to + provide assert expression information. """) + group.addoption('--no-assert', + action="store_true", + default=False, + dest="noassert", + help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', + action="store_true", + default=False, + help="DEPRECATED equivalent to --assert=plain") + class AssertionState: """State for the assertion plugin.""" @@ -28,6 +41,7 @@ self.mode = mode self.trace = config.trace.root.get("assertion") + def pytest_configure(config): mode = config.getvalue("assertmode") if config.getvalue("noassert") or config.getvalue("nomagic"): @@ -41,7 +55,7 @@ # Both Jython and CPython 2.6.0 have AST bugs that make the # assertion rewriting hook malfunction. if (sys.platform.startswith('java') or - sys.version_info[:3] == (2, 6, 0)): + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) @@ -57,11 +71,12 @@ config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook config._assertstate.trace("configured with mode set to %r" % (mode,)) + def undo(): + hook = config._assertstate.hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + config.add_cleanup(undo) -def pytest_unconfigure(config): - hook = config._assertstate.hook - if hook is not None: - sys.meta_path.remove(hook) def pytest_collection(session): # this hook is only called when test modules are collected @@ -71,36 +86,66 @@ if hook is not None: hook.set_session(session) + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ['CI', 'BUILD_NUMBER'] + return any(var in os.environ for var in env_vars) + + def pytest_runtest_setup(item): + """Setup the pytest_assertrepr_compare hook + + The newinterpret and rewrite modules will use util._reprcompare if + it exists to use custom reporting via the + pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ def callbinrepr(op, left, right): + """Call the pytest_assertrepr_compare hook and prepare the result + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are dropped unless -vv was used or + running on a CI. + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) - for new_expl in hook_result: if new_expl: - # Don't include pageloads of data unless we are very - # verbose (-vv) - if (sum(len(p) for p in new_expl[1:]) > 80*8 - and item.config.option.verbose < 2): - new_expl[1:] = [py.builtin._totext( - 'Detailed information truncated, use "-vv" to show')] - res = py.builtin._totext('\n~').join(new_expl) + if (sum(len(p) for p in new_expl[1:]) > 80*8 and + item.config.option.verbose < 2 and + not _running_on_ci()): + show_max = 10 + truncated_lines = len(new_expl) - show_max + new_expl[show_max:] = [py.builtin._totext( + 'Detailed information truncated (%d more lines)' + ', use "-vv" to show' % truncated_lines)] + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = py.builtin._totext("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": - # The result will be fed back a python % formatting - # operation, which will fail if there are extraneous - # '%'s in the string. Escape them here. res = res.replace("%", "%%") return res util._reprcompare = callbinrepr + def pytest_runtest_teardown(item): util._reprcompare = None + def pytest_sessionfinish(session): hook = session.config._assertstate.hook if hook is not None: hook.session = None + def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret @@ -108,6 +153,7 @@ if mode == "rewrite": from _pytest.assertion import rewrite # noqa + def warn_about_missing_assertion(mode): try: assert False @@ -121,8 +167,10 @@ specifically = "failing tests may report as passing" sys.stderr.write("WARNING: " + specifically + - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n") + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n") + +# Expose this plugin's implementation for the pytest_assertrepr_compare hook pytest_assertrepr_compare = util.assertrepr_compare diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py deleted file mode 100644 --- a/_pytest/assertion/newinterpret.py +++ /dev/null @@ -1,333 +0,0 @@ -""" -Find intermediate evalutation results in assert statements through builtin AST. -This should replace oldinterpret.py eventually. -""" - -import sys -import ast - -import py -from _pytest.assertion import util -from _pytest.assertion.reinterpret import BuiltinAssertionError - - -if sys.platform.startswith("java"): - # See http://bugs.jython.org/issue1497 - _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", - "ListComp", "GeneratorExp", "Yield", "Compare", "Call", - "Repr", "Num", "Str", "Attribute", "Subscript", "Name", - "List", "Tuple") - _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", - "AugAssign", "Print", "For", "While", "If", "With", "Raise", - "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", - "Exec", "Global", "Expr", "Pass", "Break", "Continue") - _expr_nodes = set(getattr(ast, name) for name in _exprs) - _stmt_nodes = set(getattr(ast, name) for name in _stmts) - def _is_ast_expr(node): - return node.__class__ in _expr_nodes - def _is_ast_stmt(node): - return node.__class__ in _stmt_nodes -else: - def _is_ast_expr(node): - return isinstance(node, ast.expr) - def _is_ast_stmt(node): - return isinstance(node, ast.stmt) - - -class Failure(Exception): - """Error found while interpreting AST.""" - - def __init__(self, explanation=""): - self.cause = sys.exc_info() - self.explanation = explanation - - -def interpret(source, frame, should_fail=False): - mod = ast.parse(source) - visitor = DebugInterpreter(frame) - try: - visitor.visit(mod) - except Failure: - failure = sys.exc_info()[1] - return getfailure(failure) - if should_fail: - return ("(assertion failed, but when it was re-run for " - "printing intermediate values, it did not fail. Suggestions: " - "compute assert expression before the assert or use --assert=plain)") - -def run(offending_line, frame=None): - if frame is None: - frame = py.code.Frame(sys._getframe(1)) - return interpret(offending_line, frame) - -def getfailure(e): - explanation = util.format_explanation(e.explanation) - value = e.cause[1] - if str(value): - lines = explanation.split('\n') - lines[0] += " << %s" % (value,) - explanation = '\n'.join(lines) - text = "%s: %s" % (e.cause[0].__name__, explanation) - if text.startswith('AssertionError: assert '): - text = text[16:] - return text - -operator_map = { - ast.BitOr : "|", - ast.BitXor : "^", - ast.BitAnd : "&", - ast.LShift : "<<", - ast.RShift : ">>", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" -} - -unary_map = { - ast.Not : "not %s", - ast.Invert : "~%s", - ast.USub : "-%s", - ast.UAdd : "+%s" -} - - -class DebugInterpreter(ast.NodeVisitor): - """Interpret AST nodes to gleam useful debugging information. """ - - def __init__(self, frame): - self.frame = frame - - def generic_visit(self, node): - # Fallback when we don't have a special implementation. - if _is_ast_expr(node): - mod = ast.Expression(node) - co = self._compile(mod) - try: - result = self.frame.eval(co) - except Exception: - raise Failure() - explanation = self.frame.repr(result) - return explanation, result - elif _is_ast_stmt(node): - mod = ast.Module([node]) - co = self._compile(mod, "exec") - try: - self.frame.exec_(co) - except Exception: - raise Failure() - return None, None - else: - raise AssertionError("can't handle %s" %(node,)) - - def _compile(self, source, mode="eval"): - return compile(source, "", mode) - - def visit_Expr(self, expr): - return self.visit(expr.value) - - def visit_Module(self, mod): - for stmt in mod.body: - self.visit(stmt) - - def visit_Name(self, name): - explanation, result = self.generic_visit(name) - # See if the name is local. - source = "%r in locals() is not globals()" % (name.id,) - co = self._compile(source) - try: - local = self.frame.eval(co) - except Exception: - # have to assume it isn't - local = None - if local is None or not self.frame.is_true(local): - return name.id, result - return explanation, result - - def visit_Compare(self, comp): - left = comp.left - left_explanation, left_result = self.visit(left) From pypy.commits at gmail.com Wed Dec 21 05:58:56 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 21 Dec 2016 02:58:56 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: change test, use memoryview instead of buffer (need to ensure that the buffer is writable) Message-ID: <585a6070.0e0a1c0a.5228b.ebd1@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89213:aa59208c8d13 Date: 2016-12-21 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/aa59208c8d13/ Log: change test, use memoryview instead of buffer (need to ensure that the buffer is writable) diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -32,7 +32,7 @@ del a; gc.collect(); gc.collect(); gc.collect() self.assertEqual(x[:], expected) - self.assertRaises((TypeError, ValueError), + self.assertRaises(TypeError, (c_char * 16).from_buffer, "a" * 16) def test_fom_buffer_with_offset(self): diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -85,7 +85,9 @@ def from_buffer(self, obj, offset=0): size = self._sizeofinstances() - buf = buffer(obj, offset, size) + buf = memoryview(obj)[offset:] + if buf.readonly: + raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) if len(buf) < size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" From pypy.commits at gmail.com Thu Dec 22 05:30:57 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 02:30:57 -0800 (PST) Subject: [pypy-commit] cffi default: Document explicit calls to lib.malloc() Message-ID: <585bab61.4306c20a.d0b9e.0fea@mx.google.com> Author: Armin Rigo Branch: Changeset: r2835:4c4845c4385e Date: 2016-12-22 11:30 +0100 http://bitbucket.org/cffi/cffi/changeset/4c4845c4385e/ Log: Document explicit calls to lib.malloc() diff --git a/doc/source/ref.rst b/doc/source/ref.rst --- a/doc/source/ref.rst +++ b/doc/source/ref.rst @@ -342,7 +342,8 @@ **ffi.gc(cdata, destructor)**: return a new cdata object that points to the same data. Later, when this new cdata object is garbage-collected, ``destructor(old_cdata_object)`` will be called. Example of usage: -``ptr = ffi.gc(lib.malloc(42), lib.free)``. Note that like objects +``ptr = ffi.gc(lib.custom_malloc(42), lib.custom_free)``. +Note that like objects returned by ``ffi.new()``, the returned pointer objects have *ownership*, which means the destructor is called as soon as *this* exact returned object is garbage-collected. @@ -485,7 +486,24 @@ # then replace `p = ffi.new("char[]", bigsize)` with: p = new_nonzero("char[]", bigsize) - + +Note anyway that it might be a better idea to use explicit calls to +``lib.malloc()`` and ``lib.free()``, because the memory returned by +``new()`` or ``new_allocator()()`` is only freed when the garbage +collector runs (i.e. not always instantly after the reference to the +object goes away, particularly but not only on PyPy). Example:: + + ffibuilder.cdef(""" + void *malloc(size_t size); + void free(void *ptr); + """) + + # then in your code: + p = lib.malloc(bigsize) + try: + ... + finally: + lib.free(p) ffi.init_once() From pypy.commits at gmail.com Thu Dec 22 06:06:16 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 03:06:16 -0800 (PST) Subject: [pypy-commit] cffi default: Fix/test/expand the error messages we get by using structs/unions as Message-ID: <585bb3a8.2669c20a.2af85.3cda@mx.google.com> Author: Armin Rigo Branch: Changeset: r2836:6e21f10eead4 Date: 2016-12-22 12:05 +0100 http://bitbucket.org/cffi/cffi/changeset/6e21f10eead4/ Log: Fix/test/expand the error messages we get by using structs/unions as arguments/return values of functions diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4629,6 +4629,22 @@ } } +#define SUPPORTED_IN_API_MODE \ + " are only supported as %s if the function is " \ + "'API mode' and non-variadic (i.e. declared inside ffibuilder" \ + ".cdef()+ffibuilder.set_source() and not taking a final '...' " \ + "argument)" + +static ffi_type *fb_unsupported(CTypeDescrObject *ct, const char *place, + const char *detail) +{ + PyErr_Format(PyExc_NotImplementedError, + "ctype '%s' not supported as %s. %s. " + "Such structs" SUPPORTED_IN_API_MODE, + ct->ct_name, place, detail, place); + return NULL; +} + static ffi_type *fb_fill_type(struct funcbuilder_s *fb, CTypeDescrObject *ct, int is_result_type) { @@ -4675,11 +4691,10 @@ if (ct->ct_flags & CT_CUSTOM_FIELD_POS) { /* these NotImplementedErrors may be caught and ignored until a real call is made to a function of this type */ - PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' not supported as %s (it is a struct declared " - "with \"...;\", but the C calling convention may depend " - "on the missing fields)", ct->ct_name, place); - return NULL; + return fb_unsupported(ct, place, + "It can be a struct declared with \"...;\": then the C " + "calling convention may depend on the missing fields. " + "Or, it can be a struct with nested anonymous structs/unions"); } n = PyDict_Size(ct->ct_stuff); @@ -4693,11 +4708,8 @@ CTypeDescrObject *ct1; assert(cf != NULL); if (cf->cf_bitshift >= 0) { - PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' not supported as %s" - " (it is a struct with bit fields)", - ct->ct_name, place); - return NULL; + return fb_unsupported(ct, place, + "It is a struct with bit fields"); } flat = 1; ct1 = cf->cf_type; @@ -4706,11 +4718,8 @@ ct1 = ct1->ct_itemdescr; } if (flat <= 0) { - PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' not supported as %s" - " (it is a struct with a zero-length array)", - ct->ct_name, place); - return NULL; + return fb_unsupported(ct, place, + "It is a struct with a zero-length array"); } nflat += flat; cf = cf->cf_next; @@ -4751,8 +4760,9 @@ } else { PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' (size %zd) not supported as %s", - ct->ct_name, ct->ct_size, place); + "ctype '%s' (size %zd) not supported as %s. " + "Unions" SUPPORTED_IN_API_MODE, + ct->ct_name, ct->ct_size, place, place); return NULL; } } diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -2012,3 +2012,79 @@ py.test.raises(ffi.error, ffi.sizeof, "vmat_t") p = ffi.new("vmat_t", 4) assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") + +def test_call_with_custom_field_pos(): + ffi = FFI() + ffi.cdef(""" + struct foo { int x; ...; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_custom_field_pos", """ + struct foo { int y, x; }; + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + print str(e.value) + +def test_call_with_bitfield(): + ffi = FFI() + ffi.cdef(""" + struct foo { int x:5; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_bitfield", """ + struct foo { int x:5; }; + struct foo f(void) { + struct foo s = { 11 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 11 + e = py.test.raises(NotImplementedError, lib.g, 0) + print str(e.value) + +def test_call_with_zero_length_field(): + ffi = FFI() + ffi.cdef(""" + struct foo { int a; int x[0]; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_zero_length_field", """ + struct foo { int a; int x[0]; }; + struct foo f(void) { + struct foo s = { 42 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = py.test.raises(NotImplementedError, lib.g, 0) + print str(e.value) + +def test_call_with_union(): + ffi = FFI() + ffi.cdef(""" + union foo { int a; char b; }; + union foo f(void); + union foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_union", """ + union foo { int a; char b; }; + union foo f(void) { + union foo s = { 42 }; + return s; + } + union foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = py.test.raises(NotImplementedError, lib.g, 0) + print str(e.value) From pypy.commits at gmail.com Thu Dec 22 06:29:25 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 03:29:25 -0800 (PST) Subject: [pypy-commit] cffi default: More carefulness in the error messages Message-ID: <585bb915.6737c20a.6877a.5518@mx.google.com> Author: Armin Rigo Branch: Changeset: r2837:2214fb22286b Date: 2016-12-22 12:29 +0100 http://bitbucket.org/cffi/cffi/changeset/2214fb22286b/ Log: More carefulness in the error messages diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4758,11 +4758,17 @@ } return ffistruct; } + else if (ct->ct_flags & CT_UNION) { + PyErr_Format(PyExc_NotImplementedError, + "ctype '%s' not supported as %s. " + "Unions" SUPPORTED_IN_API_MODE, + ct->ct_name, place, place); + return NULL; + } else { PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' (size %zd) not supported as %s. " - "Unions" SUPPORTED_IN_API_MODE, - ct->ct_name, ct->ct_size, place, place); + "ctype '%s' (size %zd) not supported as %s", + ct->ct_name, ct->ct_size, place); return NULL; } } From pypy.commits at gmail.com Thu Dec 22 06:41:00 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 03:41:00 -0800 (PST) Subject: [pypy-commit] cffi default: A failing test Message-ID: <585bbbcc.46bb1c0a.a84b0.f38e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2838:b6dd9a07cdf4 Date: 2016-12-22 12:40 +0100 http://bitbucket.org/cffi/cffi/changeset/b6dd9a07cdf4/ Log: A failing test diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -2088,3 +2088,25 @@ assert lib.f().a == 42 e = py.test.raises(NotImplementedError, lib.g, 0) print str(e.value) + +def test_call_with_packed_struct(): + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi = FFI() + ffi.cdef(""" + struct foo { char y; int x; }; + struct foo f(void); + struct foo g(int, ...); + """, packed=True) + lib = verify(ffi, "test_call_with_packed_struct", """ + struct foo { char y; int x; } __attribute__((packed)); + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().y == chr(40) + assert lib.f().x == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + print str(e.value) From pypy.commits at gmail.com Thu Dec 22 11:53:54 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 08:53:54 -0800 (PST) Subject: [pypy-commit] cffi default: Detect packed structs. Improve error messages and test them. Message-ID: <585c0522.86cbc20a.960e5.1d9d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2839:b88f27223be8 Date: 2016-12-22 15:04 +0100 http://bitbucket.org/cffi/cffi/changeset/b88f27223be8/ Log: Detect packed structs. Improve error messages and test them. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -141,6 +141,7 @@ #define CT_WITH_VAR_ARRAY 1048576 #define CT_IS_UNSIZED_CHAR_A 2097152 #define CT_LAZY_FIELD_LIST 4194304 +#define CT_WITH_PACKED_CHANGE 8388608 #define CT_PRIMITIVE_ANY (CT_PRIMITIVE_SIGNED | \ CT_PRIMITIVE_UNSIGNED | \ CT_PRIMITIVE_CHAR | \ @@ -4280,7 +4281,7 @@ CTypeDescrObject *ct; PyObject *fields, *interned_fields, *ignored; int is_union, alignment; - Py_ssize_t boffset, i, nb_fields, boffsetmax, alignedsize; + Py_ssize_t boffset, i, nb_fields, boffsetmax, alignedsize, boffsetorg; Py_ssize_t totalsize = -1; int totalalignment = -1; CFieldObject **previous; @@ -4308,7 +4309,7 @@ "first arg must be a non-initialized struct or union ctype"); return NULL; } - ct->ct_flags &= ~CT_CUSTOM_FIELD_POS; + ct->ct_flags &= ~(CT_CUSTOM_FIELD_POS | CT_WITH_PACKED_CHANGE); alignment = 1; boffset = 0; /* this number is in *bits*, not bytes! */ @@ -4325,7 +4326,7 @@ for (i=0; ict_flags |= CT_WITH_PACKED_CHANGE; + } if (foffset >= 0) { /* a forced field position: ignore the offset just computed, @@ -4398,6 +4404,7 @@ if (PyText_GetSize(fname) == 0 && ftype->ct_flags & (CT_STRUCT|CT_UNION)) { /* a nested anonymous struct or union */ + /* note: it seems we only get here with ffi.verify() */ CFieldObject *cfsrc = (CFieldObject *)ftype->ct_extra; for (; cfsrc != NULL; cfsrc = cfsrc->cf_next) { /* broken complexity in the call to get_field_name(), @@ -4684,7 +4691,8 @@ Another reason for CT_CUSTOM_FIELD_POS would be anonymous nested structures: we lost the information about having it here, so better safe (and forbid it) than sorry (and maybe - crash). + crash). Note: it seems we only get in this case with + ffi.verify(). */ if (force_lazy_struct(ct) < 0) return NULL; @@ -4692,9 +4700,16 @@ /* these NotImplementedErrors may be caught and ignored until a real call is made to a function of this type */ return fb_unsupported(ct, place, - "It can be a struct declared with \"...;\": then the C " - "calling convention may depend on the missing fields. " - "Or, it can be a struct with nested anonymous structs/unions"); + "It is a struct declared with \"...;\", but the C " + "calling convention may depend on the missing fields; " + "or, it contains anonymous struct/unions"); + } + /* Another reason: __attribute__((packed)) is not supported by libffi. + */ + if (ct->ct_flags & CT_WITH_PACKED_CHANGE) { + return fb_unsupported(ct, place, + "It is a 'packed' structure, with a different layout than " + "expected by libffi"); } n = PyDict_Size(ct->ct_stuff); @@ -4709,7 +4724,8 @@ assert(cf != NULL); if (cf->cf_bitshift >= 0) { return fb_unsupported(ct, place, - "It is a struct with bit fields"); + "It is a struct with bit fields, which libffi does not " + "support"); } flat = 1; ct1 = cf->cf_type; @@ -4719,7 +4735,8 @@ } if (flat <= 0) { return fb_unsupported(ct, place, - "It is a struct with a zero-length array"); + "It is a struct with a zero-length array, which libffi " + "does not support"); } nflat += flat; cf = cf->cf_next; @@ -4760,7 +4777,7 @@ } else if (ct->ct_flags & CT_UNION) { PyErr_Format(PyExc_NotImplementedError, - "ctype '%s' not supported as %s. " + "ctype '%s' not supported as %s by libffi. " "Unions" SUPPORTED_IN_API_MODE, ct->ct_name, place, place); return NULL; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1095,9 +1095,13 @@ BFunc = new_function_type((BStruct,), BDouble) # internally not callable dummy_func = cast(BFunc, 42) e = py.test.raises(NotImplementedError, dummy_func, "?") - msg = ("ctype \'struct foo\' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend on ' - 'the missing fields)') + msg = ("ctype 'struct foo' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend ' + "on the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder." + "cdef()+ffibuilder.set_source() and not taking a final '...' " + "argument)") assert str(e.value) == msg def test_new_charp(): diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -1075,9 +1075,13 @@ int (*foo)(struct foo_s s) = &foo1; """) e = py.test.raises(NotImplementedError, lib.foo, "?") - msg = ("ctype 'struct foo_s' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend ' - 'on the missing fields)') + msg = ("ctype 'struct foo_s' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend on ' + "the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument " + "if the function is 'API mode' and non-variadic (i.e. declared " + "inside ffibuilder.cdef()+ffibuilder.set_source() and not taking " + "a final '...' argument)") assert str(e.value) == msg def test_func_returns_struct(): @@ -2146,14 +2150,23 @@ # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as argument") + "ctype 'Data' not supported as argument by libffi. Unions are only " + "supported as argument if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, bazptr) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as return value") + "ctype 'Data' not supported as return value by libffi. Unions are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( - "ctype 'MyStr' not supported as return value " - "(it is a struct with bit fields)") + "ctype 'MyStr' not supported as return value. It is a struct with " + "bit fields, which libffi does not support. Such structs are only " + "supported as return value if the function is 'API mode' and non-" + "variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") def test_verify_extra_arguments(): ffi = FFI() diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -2030,7 +2030,43 @@ """) assert lib.f().x == 200 e = py.test.raises(NotImplementedError, lib.g, 0) - print str(e.value) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + +def test_call_with_nested_anonymous_struct(): + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi = FFI() + ffi.cdef(""" + struct foo { int a; union { int b, c; }; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_nested_anonymous_struct", """ + struct foo { int a; union { int b, c; }; }; + struct foo f(void) { + struct foo s = { 40 }; + s.b = 200; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().b == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') def test_call_with_bitfield(): ffi = FFI() @@ -2049,7 +2085,12 @@ """) assert lib.f().x == 11 e = py.test.raises(NotImplementedError, lib.g, 0) - print str(e.value) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a struct " + "with bit fields, which libffi does not support. Such structs are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") def test_call_with_zero_length_field(): ffi = FFI() @@ -2068,7 +2109,12 @@ """) assert lib.f().a == 42 e = py.test.raises(NotImplementedError, lib.g, 0) - print str(e.value) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a " + "struct with a zero-length array, which libffi does not support." + " Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") def test_call_with_union(): ffi = FFI() @@ -2087,7 +2133,11 @@ """) assert lib.f().a == 42 e = py.test.raises(NotImplementedError, lib.g, 0) - print str(e.value) + assert str(e.value) == ( + "ctype 'union foo' not supported as return value by libffi. " + "Unions are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") def test_call_with_packed_struct(): if sys.platform == 'win32': @@ -2104,9 +2154,17 @@ struct foo s = { 40, 200 }; return s; } - struct foo g(int a, ...) { } + struct foo g(int a, ...) { + struct foo s = { 41, 201 }; + return s; + } """) assert lib.f().y == chr(40) assert lib.f().x == 200 e = py.test.raises(NotImplementedError, lib.g, 0) - print str(e.value) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a " + "'packed' structure, with a different layout than expected by libffi." + " Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -1042,9 +1042,13 @@ int (*foo)(struct foo_s s) = &foo1; """) e = py.test.raises(NotImplementedError, lib.foo, "?") - msg = ("ctype 'struct foo_s' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend ' - 'on the missing fields)') + msg = ("ctype 'struct foo_s' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend on ' + "the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument " + "if the function is 'API mode' and non-variadic (i.e. declared " + "inside ffibuilder.cdef()+ffibuilder.set_source() and not taking " + "a final '...' argument)") assert str(e.value) == msg def test_func_returns_struct(): @@ -2114,14 +2118,23 @@ # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as argument") + "ctype 'Data' not supported as argument by libffi. Unions are only " + "supported as argument if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, bazptr) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as return value") + "ctype 'Data' not supported as return value by libffi. Unions are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( - "ctype 'MyStr' not supported as return value " - "(it is a struct with bit fields)") + "ctype 'MyStr' not supported as return value. It is a struct with " + "bit fields, which libffi does not support. Such structs are only " + "supported as return value if the function is 'API mode' and non-" + "variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") def test_verify_extra_arguments(): ffi = FFI() From pypy.commits at gmail.com Thu Dec 22 11:53:56 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 08:53:56 -0800 (PST) Subject: [pypy-commit] cffi default: update version number to 1.9.2 Message-ID: <585c0524.e626c20a.f471c.dbe8@mx.google.com> Author: Armin Rigo Branch: Changeset: r2840:16f9a09216d5 Date: 2016-12-22 17:28 +0100 http://bitbucket.org/cffi/cffi/changeset/16f9a09216d5/ Log: update version number to 1.9.2 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2,7 +2,7 @@ #include #include "structmember.h" -#define CFFI_VERSION "1.9.1" +#define CFFI_VERSION "1.9.2" #ifdef MS_WIN32 #include diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.9.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.9.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.9.1" -__version_info__ = (1, 9, 1) +__version__ = "1.9.2" +__version_info__ = (1, 9, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.9.1" + "\ncompiled with cffi version: 1.9.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.9' # The full version, including alpha/beta/rc tags. -release = '1.9.1' +release = '1.9.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,13 +51,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.9.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.9.2.tar.gz - - MD5: b8fa7ccb87790531db3316ab17aa8244 + - MD5: ... - - SHA: 16265a4b305d433fb9089b19278502e904b0cb43 + - SHA: ... - - SHA256: 563e0bd53fda03c151573217b3a49b3abad8813de9dd0632e10090f6190fdaf8 + - SHA256: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -167,7 +167,7 @@ `Mailing list `_ """, - version='1.9.1', + version='1.9.2', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h', '_embedding.h']} From pypy.commits at gmail.com Thu Dec 22 11:54:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 08:54:38 -0800 (PST) Subject: [pypy-commit] pypy default: update to cffi 1.9.2 Message-ID: <585c054e.c9b3c20a.bbe06.ebea@mx.google.com> Author: Armin Rigo Branch: Changeset: r89214:29df4aac463b Date: 2016-12-22 17:51 +0100 http://bitbucket.org/pypy/pypy/changeset/29df4aac463b/ Log: update to cffi 1.9.2 diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.9.1" -__version_info__ = (1, 9, 1) +__version__ = "1.9.2" +__version_info__ = (1, 9, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.9.1" + "\ncompiled with cffi version: 1.9.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi -VERSION = "1.9.1" +VERSION = "1.9.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -17,7 +17,7 @@ from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer from pypy.module._cffi_backend.ctypevoid import W_CTypeVoid -from pypy.module._cffi_backend.ctypestruct import W_CTypeStruct +from pypy.module._cffi_backend.ctypestruct import W_CTypeStruct, W_CTypeUnion from pypy.module._cffi_backend.ctypeprim import (W_CTypePrimitiveSigned, W_CTypePrimitiveUnsigned, W_CTypePrimitiveCharOrUniChar, W_CTypePrimitiveFloat, W_CTypePrimitiveLongDouble) @@ -231,6 +231,11 @@ return cifbuilder.fb_struct_ffi_type(self, is_result_type) return _missing_ffi_type(self, cifbuilder, is_result_type) +def _union_ffi_type(self, cifbuilder, is_result_type): + if self.size >= 0: # only for a better error message + return cifbuilder.fb_union_ffi_type(self, is_result_type) + return _missing_ffi_type(self, cifbuilder, is_result_type) + def _primsigned_ffi_type(self, cifbuilder, is_result_type): size = self.size if size == 1: return clibffi.ffi_type_sint8 @@ -266,6 +271,7 @@ W_CType._get_ffi_type = _missing_ffi_type W_CTypeStruct._get_ffi_type = _struct_ffi_type +W_CTypeUnion._get_ffi_type = _union_ffi_type W_CTypePrimitiveSigned._get_ffi_type = _primsigned_ffi_type W_CTypePrimitiveCharOrUniChar._get_ffi_type = _primunsigned_ffi_type W_CTypePrimitiveUnsigned._get_ffi_type = _primunsigned_ffi_type @@ -276,6 +282,12 @@ # ---------- +_SUPPORTED_IN_API_MODE = ( + " are only supported as %s if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder" + ".cdef()+ffibuilder.set_source() and not taking a final '...' " + "argument)") + class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) @@ -297,6 +309,20 @@ def fb_fill_type(self, ctype, is_result_type): return ctype._get_ffi_type(self, is_result_type) + def fb_unsupported(self, ctype, is_result_type, detail): + place = "return value" if is_result_type else "argument" + raise oefmt(self.space.w_NotImplementedError, + "ctype '%s' not supported as %s. %s. " + "Such structs" + _SUPPORTED_IN_API_MODE, + ctype.name, place, detail, place) + + def fb_union_ffi_type(self, ctype, is_result_type=False): + place = "return value" if is_result_type else "argument" + raise oefmt(self.space.w_NotImplementedError, + "ctype '%s' not supported as %s by libffi. " + "Unions" + _SUPPORTED_IN_API_MODE, + ctype.name, place, place) + def fb_struct_ffi_type(self, ctype, is_result_type=False): # We can't pass a struct that was completed by verify(). # Issue: assume verify() is given "struct { long b; ...; }". @@ -309,37 +335,40 @@ # Another reason for 'custom_field_pos' would be anonymous # nested structures: we lost the information about having it # here, so better safe (and forbid it) than sorry (and maybe - # crash). + # crash). Note: it seems we only get in this case with + # ffi.verify(). space = self.space ctype.force_lazy_struct() if ctype._custom_field_pos: # these NotImplementedErrors may be caught and ignored until # a real call is made to a function of this type - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s (it is a struct declared " - "with \"...;\", but the C calling convention may depend " - "on the missing fields)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct declared with \"...;\", but the C " + "calling convention may depend on the missing fields; " + "or, it contains anonymous struct/unions") + # Another reason: __attribute__((packed)) is not supported by libffi. + if ctype._with_packed_change: + raise self.fb_unsupported(ctype, is_result_type, + "It is a 'packed' structure, with a different layout than " + "expected by libffi") # walk the fields, expanding arrays into repetitions; first, # only count how many flattened fields there are nflat = 0 for i, cf in enumerate(ctype._fields_list): if cf.is_bitfield(): - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s" - " (it is a struct with bit fields)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct with bit fields, which libffi does not " + "support") flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s (it is a struct" - " with a zero-length array)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct with a zero-length array, which libffi " + "does not support") nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -34,6 +34,7 @@ _fields_dict = None _custom_field_pos = False _with_var_array = False + _with_packed_changed = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -303,6 +303,7 @@ fields_dict = {} w_ctype._custom_field_pos = False with_var_array = False + with_packed_change = False for i in range(len(fields_w)): w_field = fields_w[i] @@ -333,7 +334,8 @@ # # update the total alignment requirement, but skip it if the # field is an anonymous bitfield or if SF_PACKED - falign = 1 if sflags & SF_PACKED else ftype.alignof() + falignorg = ftype.alignof() + falign = 1 if sflags & SF_PACKED else falignorg do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -359,7 +361,10 @@ bs_flag = ctypestruct.W_CField.BS_REGULAR # align this field to its own 'falign' by inserting padding + boffsetorg = (boffset + falignorg*8-1) & ~(falignorg*8-1) boffset = (boffset + falign*8-1) & ~(falign*8-1) + if boffsetorg != boffset: + with_packed_change = True if foffset >= 0: # a forced field position: ignore the offset just computed, @@ -372,6 +377,7 @@ if (fname == '' and isinstance(ftype, ctypestruct.W_CTypeStructOrUnion)): # a nested anonymous struct or union + # note: it seems we only get here with ffi.verify() srcfield2names = {} ftype.force_lazy_struct() for name, srcfld in ftype._fields_dict.items(): @@ -530,6 +536,7 @@ w_ctype._fields_dict = fields_dict #w_ctype._custom_field_pos = ...set above already w_ctype._with_var_array = with_var_array + w_ctype._with_packed_change = with_packed_change # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.9.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.9.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -1084,9 +1084,13 @@ BFunc = new_function_type((BStruct,), BDouble) # internally not callable dummy_func = cast(BFunc, 42) e = py.test.raises(NotImplementedError, dummy_func, "?") - msg = ("ctype \'struct foo\' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend on ' - 'the missing fields)') + msg = ("ctype 'struct foo' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend ' + "on the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder." + "cdef()+ffibuilder.set_source() and not taking a final '...' " + "argument)") assert str(e.value) == msg def test_new_charp(): diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -6,9 +6,9 @@ import pypy.module.cpyext.api # side-effect of pre-importing it - at unwrap_spec(cdef=str, module_name=str, source=str) + at unwrap_spec(cdef=str, module_name=str, source=str, packed=int) def prepare(space, cdef, module_name, source, w_includes=None, - w_extra_source=None, w_min_version=None): + w_extra_source=None, w_min_version=None, packed=False): try: import cffi from cffi import FFI # <== the system one, which @@ -47,7 +47,7 @@ ffi = FFI() for include_ffi_object in includes: ffi.include(include_ffi_object._test_recompiler_source_ffi) - ffi.cdef(cdef) + ffi.cdef(cdef, packed=packed) ffi.set_source(module_name, source) ffi.emit_c_code(c_file) @@ -1838,3 +1838,149 @@ raises(ffi.error, ffi.sizeof, "vmat_t") p = ffi.new("vmat_t", 4) assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") + + def test_call_with_custom_field_pos(self): + ffi, lib = self.prepare(""" + struct foo { int x; ...; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_custom_field_pos", """ + struct foo { int y, x; }; + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 200 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + + def test_call_with_nested_anonymous_struct(self): + import sys + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi, lib = self.prepare(""" + struct foo { int a; union { int b, c; }; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_nested_anonymous_struct", """ + struct foo { int a; union { int b, c; }; }; + struct foo f(void) { + struct foo s = { 40 }; + s.b = 200; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().b == 200 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + + def test_call_with_bitfield(self): + ffi, lib = self.prepare(""" + struct foo { int x:5; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_bitfield", """ + struct foo { int x:5; }; + struct foo f(void) { + struct foo s = { 11 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 11 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a struct " + "with bit fields, which libffi does not support. Such structs are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") + + def test_call_with_zero_length_field(self): + ffi, lib = self.prepare(""" + struct foo { int a; int x[0]; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_zero_length_field", """ + struct foo { int a; int x[0]; }; + struct foo f(void) { + struct foo s = { 42 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a " + "struct with a zero-length array, which libffi does not support. " + "Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") + + def test_call_with_union(self): + ffi, lib = self.prepare(""" + union foo { int a; char b; }; + union foo f(void); + union foo g(int, ...); + """, "test_call_with_union", """ + union foo { int a; char b; }; + union foo f(void) { + union foo s = { 42 }; + return s; + } + union foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'union foo' not supported as return value by libffi. " + "Unions are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") + + def test_call_with_packed_struct(self): + import sys + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi, lib = self.prepare(""" + struct foo { char y; int x; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_packed_struct", """ + struct foo { char y; int x; } __attribute__((packed)); + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { + struct foo s = { 41, 201 }; + return s; + } + """, packed=True, min_version=(1, 8, 3)) + assert lib.f().y == chr(40) + assert lib.f().x == 200 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a 'packed'" + " structure, with a different layout than expected by libffi. " + "Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1076,9 +1076,13 @@ int (*foo)(struct foo_s s) = &foo1; """) e = py.test.raises(NotImplementedError, lib.foo, "?") - msg = ("ctype 'struct foo_s' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend ' - 'on the missing fields)') + msg = ("ctype 'struct foo_s' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend on ' + "the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument " + "if the function is 'API mode' and non-variadic (i.e. declared " + "inside ffibuilder.cdef()+ffibuilder.set_source() and not taking " + "a final '...' argument)") assert str(e.value) == msg def test_func_returns_struct(): @@ -2147,14 +2151,23 @@ # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as argument") + "ctype 'Data' not supported as argument by libffi. Unions are only " + "supported as argument if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, bazptr) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as return value") + "ctype 'Data' not supported as return value by libffi. Unions are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( - "ctype 'MyStr' not supported as return value " - "(it is a struct with bit fields)") + "ctype 'MyStr' not supported as return value. It is a struct with " + "bit fields, which libffi does not support. Such structs are only " + "supported as return value if the function is 'API mode' and non-" + "variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") def test_verify_extra_arguments(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -2013,3 +2013,159 @@ py.test.raises(ffi.error, ffi.sizeof, "vmat_t") p = ffi.new("vmat_t", 4) assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") + +def test_call_with_custom_field_pos(): + ffi = FFI() + ffi.cdef(""" + struct foo { int x; ...; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_custom_field_pos", """ + struct foo { int y, x; }; + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + +def test_call_with_nested_anonymous_struct(): + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi = FFI() + ffi.cdef(""" + struct foo { int a; union { int b, c; }; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_nested_anonymous_struct", """ + struct foo { int a; union { int b, c; }; }; + struct foo f(void) { + struct foo s = { 40 }; + s.b = 200; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().b == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + +def test_call_with_bitfield(): + ffi = FFI() + ffi.cdef(""" + struct foo { int x:5; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_bitfield", """ + struct foo { int x:5; }; + struct foo f(void) { + struct foo s = { 11 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 11 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a struct " + "with bit fields, which libffi does not support. Such structs are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") + +def test_call_with_zero_length_field(): + ffi = FFI() + ffi.cdef(""" + struct foo { int a; int x[0]; }; + struct foo f(void); + struct foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_zero_length_field", """ + struct foo { int a; int x[0]; }; + struct foo f(void) { + struct foo s = { 42 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a " + "struct with a zero-length array, which libffi does not support." + " Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") + +def test_call_with_union(): + ffi = FFI() + ffi.cdef(""" + union foo { int a; char b; }; + union foo f(void); + union foo g(int, ...); + """) + lib = verify(ffi, "test_call_with_union", """ + union foo { int a; char b; }; + union foo f(void) { + union foo s = { 42 }; + return s; + } + union foo g(int a, ...) { } + """) + assert lib.f().a == 42 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'union foo' not supported as return value by libffi. " + "Unions are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") + +def test_call_with_packed_struct(): + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi = FFI() + ffi.cdef(""" + struct foo { char y; int x; }; + struct foo f(void); + struct foo g(int, ...); + """, packed=True) + lib = verify(ffi, "test_call_with_packed_struct", """ + struct foo { char y; int x; } __attribute__((packed)); + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { + struct foo s = { 41, 201 }; + return s; + } + """) + assert lib.f().y == chr(40) + assert lib.f().x == 200 + e = py.test.raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + "ctype 'struct foo' not supported as return value. It is a " + "'packed' structure, with a different layout than expected by libffi." + " Such structs are only supported as return value if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder.cdef()" + "+ffibuilder.set_source() and not taking a final '...' argument)") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1043,9 +1043,13 @@ int (*foo)(struct foo_s s) = &foo1; """) e = py.test.raises(NotImplementedError, lib.foo, "?") - msg = ("ctype 'struct foo_s' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend ' - 'on the missing fields)') + msg = ("ctype 'struct foo_s' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend on ' + "the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument " + "if the function is 'API mode' and non-variadic (i.e. declared " + "inside ffibuilder.cdef()+ffibuilder.set_source() and not taking " + "a final '...' argument)") assert str(e.value) == msg def test_func_returns_struct(): @@ -2115,14 +2119,23 @@ # assert did not crash so far e = py.test.raises(NotImplementedError, fooptr, ffi.new("Data *")) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as argument") + "ctype 'Data' not supported as argument by libffi. Unions are only " + "supported as argument if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, bazptr) assert str(e.value) == ( - "ctype 'Data' (size 4) not supported as return value") + "ctype 'Data' not supported as return value by libffi. Unions are " + "only supported as return value if the function is 'API mode' and " + "non-variadic (i.e. declared inside ffibuilder.cdef()+" + "ffibuilder.set_source() and not taking a final '...' argument)") e = py.test.raises(NotImplementedError, barptr) assert str(e.value) == ( - "ctype 'MyStr' not supported as return value " - "(it is a struct with bit fields)") + "ctype 'MyStr' not supported as return value. It is a struct with " + "bit fields, which libffi does not support. Such structs are only " + "supported as return value if the function is 'API mode' and non-" + "variadic (i.e. declared inside ffibuilder.cdef()+ffibuilder." + "set_source() and not taking a final '...' argument)") def test_verify_extra_arguments(): ffi = FFI() From pypy.commits at gmail.com Thu Dec 22 16:39:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 22 Dec 2016 13:39:38 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <585c481a.ce181c0a.bb41f.d4e9@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r838:15e71df1f27e Date: 2016-12-22 22:39 +0100 http://bitbucket.org/pypy/pypy.org/changeset/15e71df1f27e/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $66474 of $105000 (63.3%) + $66493 of $105000 (63.3%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Fri Dec 23 12:31:40 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 23 Dec 2016 09:31:40 -0800 (PST) Subject: [pypy-commit] pypy py3.5: apply Phil Connell's 7b3b3f6 Message-ID: <585d5f7c.820bc30a.5970b.c3a1@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89215:743a0fcaa4eb Date: 2016-12-23 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/743a0fcaa4eb/ Log: apply Phil Connell's 7b3b3f6 diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -950,6 +950,7 @@ class 日本: pass assert 日本.__name__ == '日本' + assert 日本.__qualname__ == 'test_class_nonascii..日本' assert '日本' in repr(日本) """ From pypy.commits at gmail.com Fri Dec 23 17:45:14 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 23 Dec 2016 14:45:14 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Handle pointers to primitive types and fixed-size arrays Message-ID: <585da8fa.c64bc20a.84589.45f3@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89217:2d79420b0e58 Date: 2016-12-18 01:24 +0000 http://bitbucket.org/pypy/pypy/changeset/2d79420b0e58/ Log: Handle pointers to primitive types and fixed-size arrays diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -730,6 +730,8 @@ TO = self.convert_type(obj.totype) if TO is lltype.Void: return rffi.VOIDP + elif isinstance(obj.totype, model.PrimitiveType): + return rffi.CArrayPtr(TO) return lltype.Ptr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: @@ -739,6 +741,8 @@ return lltype.Ptr(lltype.FuncType(args, res)) elif isinstance(obj, model.VoidType): return lltype.Void + elif isinstance(obj, model.ArrayType): + return rffi.CFixedArray(self.convert_type(obj.item), obj.length) else: raise NotImplementedError From pypy.commits at gmail.com Fri Dec 23 17:45:18 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 23 Dec 2016 14:45:18 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Improve test_include() Message-ID: <585da8fe.61c9c20a.d7016.a369@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89219:65ef8cf5625f Date: 2016-12-23 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/65ef8cf5625f/ Log: Improve test_include() diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -46,7 +46,7 @@ assert 'PyFloatObject' in hdr.definitions assert 'PyObject_HEAD' in hdr.macros -def test_include(): +def test_include(tmpdir): cdef1 = """ typedef ssize_t Py_ssize_t; @@ -58,7 +58,8 @@ char *name; } Type; """ - hdr1 = parse_source(cdef1) + base_name = tmpdir / 'base.h' + base_name.write(cdef1) cdef2 = """ typedef struct { PyObject_HEAD @@ -66,6 +67,16 @@ Type *type; } Object; """ - hdr2 = parse_source(cdef2, includes=[hdr1]) - assert 'Object' in hdr2.definitions + (tmpdir / 'object.h').write(cdef2) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], + includes=['sys/types.h', 'base.h', 'object.h']) + hdr1 = parse_source(cdef1, eci=eci) + hdr1.configure_types() + Type = hdr1.definitions['Type'].OF + assert isinstance(Type, lltype.Struct) + hdr2 = parse_source(cdef2, includes=[hdr1], eci=eci) + hdr2.configure_types() assert 'Type' not in hdr2.definitions + Object = hdr2.definitions['Object'].OF + assert Object.c_type.TO is Type diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -31,6 +31,7 @@ initproc = P(FT([PyO, PyO, PyO], rffi.INT_real)) newfunc = P(FT([PyTypeObjectPtr, PyO, PyO], PyO)) allocfunc = P(FT([PyTypeObjectPtr, Py_ssize_t], PyO)) + unaryfunc = P(FT([PyO], PyO)) binaryfunc = P(FT([PyO, PyO], PyO)) ternaryfunc = P(FT([PyO, PyO, PyO], PyO)) From pypy.commits at gmail.com Fri Dec 23 17:45:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 23 Dec 2016 14:45:16 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Refactor includes Message-ID: <585da8fc.8d071c0a.32208.d25d@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89218:66787841431b Date: 2016-12-23 12:51 +0100 http://bitbucket.org/pypy/pypy/changeset/66787841431b/ Log: Refactor includes diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -682,6 +682,12 @@ self._Config = type( 'Config', (object,), {'_compilation_info_': eci}) self._TYPES = {} + self.includes = [] + + def include(self, other): + self.ctx.include(other.ctx) + self.structs.update(other.structs) + self.includes.append(other) def add_typedef(self, name, obj): assert name not in self.definitions @@ -749,12 +755,11 @@ def parse_source(source, includes=None, eci=None): ctx = Parser() + src = ParsedSource(source, ctx, eci=eci) if includes is not None: for header in includes: - ctx.include(header.ctx) - + src.include(header) ctx.parse(source) - src = ParsedSource(source, ctx, eci=eci) for name, (obj, quals) in ctx._declarations.iteritems(): if obj in ctx._included_declarations: continue diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -53,13 +53,19 @@ #define PyObject_HEAD \ Py_ssize_t ob_refcnt; \ Py_ssize_t ob_pypy_link; \ + + typedef struct { + char *name; + } Type; """ hdr1 = parse_source(cdef1) cdef2 = """ typedef struct { PyObject_HEAD Py_ssize_t ob_foo; + Type *type; } Object; """ hdr2 = parse_source(cdef2, includes=[hdr1]) assert 'Object' in hdr2.definitions + assert 'Type' not in hdr2.definitions From pypy.commits at gmail.com Fri Dec 23 17:45:19 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 23 Dec 2016 14:45:19 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Add 2 (failing) tests Message-ID: <585da8ff.4dd41c0a.c02f0.c1dd@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89220:ec3d3a2fd7d3 Date: 2016-12-23 19:36 +0100 http://bitbucket.org/pypy/pypy/changeset/ec3d3a2fd7d3/ Log: Add 2 (failing) tests diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -80,3 +80,57 @@ assert 'Type' not in hdr2.definitions Object = hdr2.definitions['Object'].OF assert Object.c_type.TO is Type + +def test_incomplete(tmpdir): + cdef = """ + typedef ssize_t Py_ssize_t; + + typedef struct { + Py_ssize_t ob_refcnt; + Py_ssize_t ob_pypy_link; + struct _typeobject *ob_type; + } Object; + + typedef struct { + void *buf; + Object *obj; + } Buffer; + + """ + (tmpdir / 'foo.h').write(cdef) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], + includes=['sys/types.h', 'foo.h']) + foo_h = parse_source(cdef, eci=eci) + foo_h.configure_types() + Object = foo_h.definitions['Object'].OF + assert isinstance(Object, lltype.ForwardReference) or hash(Object) + +def test_recursive(tmpdir): + cdef = """ + typedef ssize_t Py_ssize_t; + + typedef struct { + Py_ssize_t ob_refcnt; + Py_ssize_t ob_pypy_link; + struct _typeobject *ob_type; + } Object; + + typedef struct { + void *buf; + Object *obj; + } Buffer; + + typedef struct _typeobject { + Object *obj; + } Type; + """ + (tmpdir / 'foo.h').write(cdef) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], + includes=['sys/types.h', 'foo.h']) + foo_h = parse_source(cdef, eci=eci) + foo_h.configure_types() + Object = foo_h.definitions['Object'].OF + assert isinstance(Object, lltype.Struct) + hash(Object) From pypy.commits at gmail.com Fri Dec 23 17:45:12 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 23 Dec 2016 14:45:12 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Make ParsedSource.configure_types() testable Message-ID: <585da8f8.2350c20a.e5711.3137@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89216:2981e828d28d Date: 2016-12-23 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/2981e828d28d/ Log: Make ParsedSource.configure_types() testable diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -55,18 +55,19 @@ udir, ] -class CConfig: - _compilation_info_ = ExternalCompilationInfo( +configure_eci = ExternalCompilationInfo( include_dirs=include_dirs, includes=['Python.h', 'stdarg.h', 'structmember.h'], - compile_extra=['-DPy_BUILD_CORE'], - ) + compile_extra=['-DPy_BUILD_CORE']) + +class CConfig: + _compilation_info_ = configure_eci class CConfig2: - _compilation_info_ = CConfig._compilation_info_ + _compilation_info_ = configure_eci class CConfig_constants: - _compilation_info_ = CConfig._compilation_info_ + _compilation_info_ = configure_eci VA_LIST_P = rffi.VOIDP # rffi.COpaquePtr('va_list') CONST_STRING = lltype.Ptr(lltype.Array(lltype.Char, @@ -1484,7 +1485,7 @@ include_lines.append('RPY_EXPORTED %s %s;\n' % (typ, name)) lines.append('};\n') - eci2 = CConfig._compilation_info_.merge(ExternalCompilationInfo( + eci2 = configure_eci.merge(ExternalCompilationInfo( separate_module_sources = [''.join(lines)], post_include_bits = [''.join(include_lines)], )) diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -670,15 +670,17 @@ class ParsedSource(object): - def __init__(self, source, parser, definitions=None, macros=None): - from pypy.module.cpyext.api import CConfig + def __init__(self, source, parser, definitions=None, macros=None, eci=None): + from pypy.module.cpyext.api import configure_eci self.source = source self.definitions = definitions if definitions is not None else {} self.macros = macros if macros is not None else {} self.structs = {} self.ctx = parser - self._Config = type('Config', (object,), {}) - self._Config._compilation_info_ = CConfig._compilation_info_ + if eci is None: + eci = configure_eci + self._Config = type( + 'Config', (object,), {'_compilation_info_': eci}) self._TYPES = {} def add_typedef(self, name, obj): @@ -703,7 +705,6 @@ return DelayedStruct(obj.name, fields) def realize_struct(self, struct, type_name): - from pypy.module.cpyext.api import CConfig, TYPES configname = type_name.replace(' ', '__') setattr(self._Config, configname, rffi_platform.Struct(type_name, struct.fields)) @@ -742,14 +743,14 @@ raise NotImplementedError -def parse_source(source, includes=None): +def parse_source(source, includes=None, eci=None): ctx = Parser() if includes is not None: for header in includes: ctx.include(header.ctx) ctx.parse(source) - src = ParsedSource(source, ctx) + src = ParsedSource(source, ctx, eci=eci) for name, (obj, quals) in ctx._declarations.iteritems(): if obj in ctx._included_declarations: continue diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -1,7 +1,8 @@ -from rpython.rtyper.lltypesystem import rffi -from pypy.module.cpyext.cparser import Parser, cname_to_lltype, parse_source +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.module.cpyext.cparser import parse_source -def test_stuff(): +def test_configure(tmpdir): decl = """ typedef ssize_t Py_ssize_t; @@ -9,14 +10,19 @@ Py_ssize_t ob_refcnt; Py_ssize_t ob_pypy_link; double ob_fval; - } PyFloatObject; + } TestFloatObject; """ - ctx = Parser() - ctx.parse(decl) - obj = ctx._declarations['typedef PyFloatObject'][0] - assert [cname_to_lltype(tp.name) for tp in obj.fldtypes] == [ - rffi.SSIZE_T, rffi.SSIZE_T, rffi.DOUBLE] - res = parse_source(decl) + hdr = tmpdir / 'header.h' + hdr.write(decl) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], includes=['sys/types.h', 'header.h']) + res = parse_source(decl, eci=eci) + res.configure_types() + TestFloatObject = res.definitions['TestFloatObject'].OF + assert isinstance(TestFloatObject, lltype.Struct) + assert TestFloatObject.c_ob_refcnt == rffi.SSIZE_T + assert TestFloatObject.c_ob_pypy_link == rffi.SSIZE_T + assert TestFloatObject.c_ob_fval == rffi.DOUBLE def test_simple(): decl = "typedef ssize_t Py_ssize_t;" From pypy.commits at gmail.com Sat Dec 24 03:06:36 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sat, 24 Dec 2016 00:06:36 -0800 (PST) Subject: [pypy-commit] pypy vendor/stdlib-3.6: update stdlib to v3.6.0 (41df79263a11) Message-ID: <585e2c8c.e7b1c20a.678f4.b12d@mx.google.com> Author: Philip Jenvey Branch: vendor/stdlib-3.6 Changeset: r89221:ace7255d9a26 Date: 2016-12-23 22:39 -0800 http://bitbucket.org/pypy/pypy/changeset/ace7255d9a26/ Log: update stdlib to v3.6.0 (41df79263a11) diff too long, truncating to 2000 out of 129052 lines diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py --- a/lib-python/3/_collections_abc.py +++ b/lib-python/3/_collections_abc.py @@ -9,9 +9,10 @@ from abc import ABCMeta, abstractmethod import sys -__all__ = ["Awaitable", "Coroutine", "AsyncIterable", "AsyncIterator", - "Hashable", "Iterable", "Iterator", "Generator", - "Sized", "Container", "Callable", +__all__ = ["Awaitable", "Coroutine", + "AsyncIterable", "AsyncIterator", "AsyncGenerator", + "Hashable", "Iterable", "Iterator", "Generator", "Reversible", + "Sized", "Container", "Callable", "Collection", "Set", "MutableSet", "Mapping", "MutableMapping", "MappingView", "KeysView", "ItemsView", "ValuesView", @@ -29,8 +30,8 @@ # so that they will pass tests like: # it = iter(somebytearray) # assert isinstance(it, Iterable) -# Note: in other implementations, these types many not be distinct -# and they make have their own implementation specific types that +# Note: in other implementations, these types might not be distinct +# and they may have their own implementation specific types that # are not included on this list. bytes_iterator = type(iter(b'')) bytearray_iterator = type(iter(bytearray())) @@ -41,6 +42,7 @@ list_iterator = type(iter([])) list_reverseiterator = type(iter(reversed([]))) range_iterator = type(iter(range(0))) +longrange_iterator = type(iter(range(1 << 1000))) set_iterator = type(iter(set())) str_iterator = type(iter("")) tuple_iterator = type(iter(())) @@ -58,10 +60,27 @@ coroutine = type(_coro) _coro.close() # Prevent ResourceWarning del _coro +## asynchronous generator ## +async def _ag(): yield +_ag = _ag() +async_generator = type(_ag) +del _ag ### ONE-TRICK PONIES ### +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + class Hashable(metaclass=ABCMeta): __slots__ = () @@ -73,11 +92,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Hashable: - for B in C.__mro__: - if "__hash__" in B.__dict__: - if B.__dict__["__hash__"]: - return True - break + return _check_methods(C, "__hash__") return NotImplemented @@ -92,11 +107,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Awaitable: - for B in C.__mro__: - if "__await__" in B.__dict__: - if B.__dict__["__await__"]: - return True - break + return _check_methods(C, "__await__") return NotImplemented @@ -137,14 +148,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Coroutine: - mro = C.__mro__ - for method in ('__await__', 'send', 'throw', 'close'): - for base in mro: - if method in base.__dict__: - break - else: - return NotImplemented - return True + return _check_methods(C, '__await__', 'send', 'throw', 'close') return NotImplemented @@ -162,8 +166,7 @@ @classmethod def __subclasshook__(cls, C): if cls is AsyncIterable: - if any("__aiter__" in B.__dict__ for B in C.__mro__): - return True + return _check_methods(C, "__aiter__") return NotImplemented @@ -182,12 +185,61 @@ @classmethod def __subclasshook__(cls, C): if cls is AsyncIterator: - if (any("__anext__" in B.__dict__ for B in C.__mro__) and - any("__aiter__" in B.__dict__ for B in C.__mro__)): - return True + return _check_methods(C, "__anext__", "__aiter__") return NotImplemented +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented + + +AsyncGenerator.register(async_generator) + + class Iterable(metaclass=ABCMeta): __slots__ = () @@ -200,8 +252,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Iterable: - if any("__iter__" in B.__dict__ for B in C.__mro__): - return True + return _check_methods(C, "__iter__") return NotImplemented @@ -220,9 +271,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Iterator: - if (any("__next__" in B.__dict__ for B in C.__mro__) and - any("__iter__" in B.__dict__ for B in C.__mro__)): - return True + return _check_methods(C, '__iter__', '__next__') return NotImplemented Iterator.register(bytes_iterator) @@ -234,12 +283,29 @@ Iterator.register(list_iterator) Iterator.register(list_reverseiterator) Iterator.register(range_iterator) +Iterator.register(longrange_iterator) Iterator.register(set_iterator) Iterator.register(str_iterator) Iterator.register(tuple_iterator) Iterator.register(zip_iterator) +class Reversible(Iterable): + + __slots__ = () + + @abstractmethod + def __reversed__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Reversible: + return _check_methods(C, "__reversed__", "__iter__") + return NotImplemented + + class Generator(Iterator): __slots__ = () @@ -283,17 +349,10 @@ @classmethod def __subclasshook__(cls, C): if cls is Generator: - mro = C.__mro__ - for method in ('__iter__', '__next__', 'send', 'throw', 'close'): - for base in mro: - if method in base.__dict__: - break - else: - return NotImplemented - return True + return _check_methods(C, '__iter__', '__next__', + 'send', 'throw', 'close') return NotImplemented - Generator.register(generator) @@ -308,8 +367,7 @@ @classmethod def __subclasshook__(cls, C): if cls is Sized: - if any("__len__" in B.__dict__ for B in C.__mro__): - return True + return _check_methods(C, "__len__") return NotImplemented @@ -324,10 +382,18 @@ @classmethod def __subclasshook__(cls, C): if cls is Container: - if any("__contains__" in B.__dict__ for B in C.__mro__): - return True + return _check_methods(C, "__contains__") return NotImplemented +class Collection(Sized, Iterable, Container): + + __slots__ = () + + @classmethod + def __subclasshook__(cls, C): + if cls is Collection: + return _check_methods(C, "__len__", "__iter__", "__contains__") + return NotImplemented class Callable(metaclass=ABCMeta): @@ -340,15 +406,14 @@ @classmethod def __subclasshook__(cls, C): if cls is Callable: - if any("__call__" in B.__dict__ for B in C.__mro__): - return True + return _check_methods(C, "__call__") return NotImplemented ### SETS ### -class Set(Sized, Iterable, Container): +class Set(Collection): """A set is a finite, iterable container. @@ -573,7 +638,7 @@ ### MAPPINGS ### -class Mapping(Sized, Iterable, Container): +class Mapping(Collection): __slots__ = () @@ -621,6 +686,8 @@ return NotImplemented return dict(self.items()) == dict(other.items()) + __reversed__ = None + Mapping.register(mappingproxy) @@ -670,7 +737,7 @@ except KeyError: return False else: - return v == value + return v is value or v == value def __iter__(self): for key in self._mapping: @@ -685,7 +752,8 @@ def __contains__(self, value): for key in self._mapping: - if value == self._mapping[key]: + v = self._mapping[key] + if v is value or v == value: return True return False @@ -794,7 +862,7 @@ ### SEQUENCES ### -class Sequence(Sized, Iterable, Container): +class Sequence(Reversible, Collection): """All the operations on a read-only sequence. @@ -820,7 +888,7 @@ def __contains__(self, value): for v in self: - if v == value: + if v is value or v == value: return True return False diff --git a/lib-python/3/_compat_pickle.py b/lib-python/3/_compat_pickle.py --- a/lib-python/3/_compat_pickle.py +++ b/lib-python/3/_compat_pickle.py @@ -242,3 +242,10 @@ for excname in PYTHON3_OSERROR_EXCEPTIONS: REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') + +PYTHON3_IMPORTERROR_EXCEPTIONS = ( + 'ModuleNotFoundError', +) + +for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py --- a/lib-python/3/_osx_support.py +++ b/lib-python/3/_osx_support.py @@ -210,7 +210,7 @@ # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub(r'-arch\s+\w+\s', ' ', flags, re.ASCII) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _save_modified_value(_config_vars, cv, flags) @@ -232,7 +232,7 @@ if 'CC' in os.environ: return _config_vars - if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None: # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself status = os.system( @@ -251,7 +251,7 @@ for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] - flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags) _save_modified_value(_config_vars, cv, flags) return _config_vars @@ -267,7 +267,7 @@ for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and '-arch' in _config_vars[cv]: flags = _config_vars[cv] - flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _save_modified_value(_config_vars, cv, flags) @@ -465,7 +465,7 @@ machine = 'fat' - archs = re.findall('-arch\s+(\S+)', cflags) + archs = re.findall(r'-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py --- a/lib-python/3/_pydecimal.py +++ b/lib-python/3/_pydecimal.py @@ -148,7 +148,7 @@ __name__ = 'decimal' # For pickling __version__ = '1.70' # Highest version of the spec this complies with # See http://speleotrove.com/decimal/ -__libmpdec_version__ = "2.4.1" # compatible libmpdec version +__libmpdec_version__ = "2.4.2" # compatible libmpdec version import math as _math import numbers as _numbers @@ -589,7 +589,7 @@ # From a string # REs insist on real strings, so we can too. if isinstance(value, str): - m = _parser(value.strip()) + m = _parser(value.strip().replace("_", "")) if m is None: if context is None: context = getcontext() @@ -1010,6 +1010,56 @@ """ return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp) + def as_integer_ratio(self): + """Express a finite Decimal instance in the form n / d. + + Returns a pair (n, d) of integers. When called on an infinity + or NaN, raises OverflowError or ValueError respectively. + + >>> Decimal('3.14').as_integer_ratio() + (157, 50) + >>> Decimal('-123e5').as_integer_ratio() + (-12300000, 1) + >>> Decimal('0.00').as_integer_ratio() + (0, 1) + + """ + if self._is_special: + if self.is_nan(): + raise ValueError("cannot convert NaN to integer ratio") + else: + raise OverflowError("cannot convert Infinity to integer ratio") + + if not self: + return 0, 1 + + # Find n, d in lowest terms such that abs(self) == n / d; + # we'll deal with the sign later. + n = int(self._int) + if self._exp >= 0: + # self is an integer. + n, d = n * 10**self._exp, 1 + else: + # Find d2, d5 such that abs(self) = n / (2**d2 * 5**d5). + d5 = -self._exp + while d5 > 0 and n % 5 == 0: + n //= 5 + d5 -= 1 + + # (n & -n).bit_length() - 1 counts trailing zeros in binary + # representation of n (provided n is nonzero). + d2 = -self._exp + shift2 = min((n & -n).bit_length() - 1, d2) + if shift2: + n >>= shift2 + d2 -= shift2 + + d = 5**d5 << d2 + + if self._sign: + n = -n + return n, d + def __repr__(self): """Represents the number as an instance of Decimal.""" # Invariant: eval(repr(d)) == d @@ -1068,12 +1118,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -4076,7 +4125,7 @@ This will make it round up for that operation. """ rounding = self.rounding - self.rounding= type + self.rounding = type return rounding def create_decimal(self, num='0'): @@ -4085,10 +4134,10 @@ This method implements the to-number operation of the IBM Decimal specification.""" - if isinstance(num, str) and num != num.strip(): + if isinstance(num, str) and (num != num.strip() or '_' in num): return self._raise_error(ConversionSyntax, - "no trailing or leading whitespace is " - "permitted.") + "trailing or leading whitespace and " + "underscores are not permitted.") d = Decimal(num, context=self) if d._isnan() and len(d._int) > self.prec - self.clamp: @@ -4107,7 +4156,7 @@ >>> context.create_decimal_from_float(3.1415926535897932) Traceback (most recent call last): ... - decimal.Inexact + decimal.Inexact: None """ d = Decimal.from_float(f) # An exact conversion @@ -5502,9 +5551,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py --- a/lib-python/3/_pyio.py +++ b/lib-python/3/_pyio.py @@ -6,7 +6,6 @@ import abc import codecs import errno -import array import stat import sys # Import _thread instead of threading to reduce startup cost @@ -161,6 +160,8 @@ opened in a text mode, and for bytes a BytesIO can be used like a file opened in a binary mode. """ + if not isinstance(file, int): + file = os.fspath(file) if not isinstance(file, (str, bytes, int)): raise TypeError("invalid file: %r" % file) if not isinstance(mode, str): @@ -182,8 +183,8 @@ text = "t" in modes binary = "b" in modes if "U" in modes: - if creating or writing or appending: - raise ValueError("can't use U and writing mode at once") + if creating or writing or appending or updating: + raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'") import warnings warnings.warn("'U' mode is deprecated", DeprecationWarning, 2) @@ -1516,7 +1517,7 @@ if self._fd >= 0 and self._closefd and not self.closed: import warnings warnings.warn('unclosed file %r' % (self,), ResourceWarning, - stacklevel=2) + stacklevel=2, source=self) self.close() def __getstate__(self): diff --git a/lib-python/3/_strptime.py b/lib-python/3/_strptime.py --- a/lib-python/3/_strptime.py +++ b/lib-python/3/_strptime.py @@ -199,12 +199,15 @@ 'f': r"(?P[0-9]{1,6})", 'H': r"(?P2[0-3]|[0-1]\d|\d)", 'I': r"(?P1[0-2]|0[1-9]|[1-9])", + 'G': r"(?P\d\d\d\d)", 'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", 'm': r"(?P1[0-2]|0[1-9]|[1-9])", 'M': r"(?P[0-5]\d|\d)", 'S': r"(?P6[0-1]|[0-5]\d|\d)", 'U': r"(?P5[0-3]|[0-4]\d|\d)", 'w': r"(?P[0-6])", + 'u': r"(?P[1-7])", + 'V': r"(?P5[0-3]|0[1-9]|[1-4]\d|\d)", # W is set below by using 'U' 'y': r"(?P\d\d)", #XXX: Does 'Y' need to worry about having less or more than @@ -299,6 +302,22 @@ return 1 + days_to_week + day_of_week +def _calc_julian_from_V(iso_year, iso_week, iso_weekday): + """Calculate the Julian day based on the ISO 8601 year, week, and weekday. + ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. + ISO week days range from 1 (Monday) to 7 (Sunday). + """ + correction = datetime_date(iso_year, 1, 4).isoweekday() + 3 + ordinal = (iso_week * 7) + iso_weekday - correction + # ordinal may be negative or 0 now, which means the date is in the previous + # calendar year + if ordinal < 1: + ordinal += datetime_date(iso_year, 1, 1).toordinal() + iso_year -= 1 + ordinal -= datetime_date(iso_year, 1, 1).toordinal() + return iso_year, ordinal + + def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the @@ -345,15 +364,15 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = None + iso_year = year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 tzoffset = None # Default to -1 to signify that values not known; not critical to have, # though - week_of_year = -1 - week_of_year_start = -1 + iso_week = week_of_year = None + week_of_year_start = None # weekday and julian defaulted to None so as to signal need to calculate # values weekday = julian = None @@ -375,6 +394,8 @@ year += 1900 elif group_key == 'Y': year = int(found_dict['Y']) + elif group_key == 'G': + iso_year = int(found_dict['G']) elif group_key == 'm': month = int(found_dict['m']) elif group_key == 'B': @@ -420,6 +441,9 @@ weekday = 6 else: weekday -= 1 + elif group_key == 'u': + weekday = int(found_dict['u']) + weekday -= 1 elif group_key == 'j': julian = int(found_dict['j']) elif group_key in ('U', 'W'): @@ -430,6 +454,8 @@ else: # W starts week on Monday. week_of_year_start = 0 + elif group_key == 'V': + iso_week = int(found_dict['V']) elif group_key == 'z': z = found_dict['z'] tzoffset = int(z[1:3]) * 60 + int(z[3:5]) @@ -450,32 +476,61 @@ else: tz = value break + # Deal with the cases where ambiguities arize + # don't assume default values for ISO week/year + if year is None and iso_year is not None: + if iso_week is None or weekday is None: + raise ValueError("ISO year directive '%G' must be used with " + "the ISO week directive '%V' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + if julian is not None: + raise ValueError("Day of the year directive '%j' is not " + "compatible with ISO year directive '%G'. " + "Use '%Y' instead.") + elif week_of_year is None and iso_week is not None: + if weekday is None: + raise ValueError("ISO week directive '%V' must be used with " + "the ISO year directive '%G' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + else: + raise ValueError("ISO week directive '%V' is incompatible with " + "the year directive '%Y'. Use the ISO year '%G' " + "instead.") + leap_year_fix = False if year is None and month == 2 and day == 29: year = 1904 # 1904 is first leap year of 20th century leap_year_fix = True elif year is None: year = 1900 + + # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. - if julian is None and week_of_year != -1 and weekday is not None: - week_starts_Mon = True if week_of_year_start == 0 else False - julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, - week_starts_Mon) - if julian <= 0: + if julian is None and weekday is not None: + if week_of_year is not None: + week_starts_Mon = True if week_of_year_start == 0 else False + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + elif iso_year is not None and iso_week is not None: + year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) + if julian is not None and julian <= 0: year -= 1 yday = 366 if calendar.isleap(year) else 365 julian += yday - # Cannot pre-calculate datetime_date() since can change in Julian - # calculation and thus could have different value for the day of the week - # calculation. + if julian is None: + # Cannot pre-calculate datetime_date() since can change in Julian + # calculation and thus could have different value for the day of + # the week calculation. # Need to add 1 to result since first day of the year is 1, not 0. julian = datetime_date(year, month, day).toordinal() - \ datetime_date(year, 1, 1).toordinal() + 1 - else: # Assume that if they bothered to include Julian day it will - # be accurate. - datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) + else: # Assume that if they bothered to include Julian day (or if it was + # calculated above with year/week/weekday) it will be accurate. + datetime_result = datetime_date.fromordinal( + (julian - 1) + + datetime_date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py --- a/lib-python/3/aifc.py +++ b/lib-python/3/aifc.py @@ -257,6 +257,15 @@ _aifc_params = namedtuple('_aifc_params', 'nchannels sampwidth framerate nframes comptype compname') +_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)' +_aifc_params.sampwidth.__doc__ = 'Sample width in bytes' +_aifc_params.framerate.__doc__ = 'Sampling frequency' +_aifc_params.nframes.__doc__ = 'Number of audio frames' +_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)' +_aifc_params.compname.__doc__ = ("""\ +A human-readable version of the compression type +('not compressed' for AIFF files)""") + class Aifc_read: # Variables used in this class: diff --git a/lib-python/3/antigravity.py b/lib-python/3/antigravity.py --- a/lib-python/3/antigravity.py +++ b/lib-python/3/antigravity.py @@ -2,7 +2,7 @@ import webbrowser import hashlib -webbrowser.open("http://xkcd.com/353/") +webbrowser.open("https://xkcd.com/353/") def geohash(latitude, longitude, datedow): '''Compute geohash() using the Munroe algorithm. diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py --- a/lib-python/3/argparse.py +++ b/lib-python/3/argparse.py @@ -118,10 +118,16 @@ def __repr__(self): type_name = type(self).__name__ arg_strings = [] + star_args = {} for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): - arg_strings.append('%s=%r' % (name, value)) + if name.isidentifier(): + arg_strings.append('%s=%r' % (name, value)) + else: + star_args[name] = value + if star_args: + arg_strings.append('**%s' % repr(star_args)) return '%s(%s)' % (type_name, ', '.join(arg_strings)) def _get_kwargs(self): @@ -204,8 +210,6 @@ if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts - for func, args in self.items: - func(*args) item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() diff --git a/lib-python/3/ast.py b/lib-python/3/ast.py --- a/lib-python/3/ast.py +++ b/lib-python/3/ast.py @@ -35,6 +35,8 @@ return compile(source, filename, mode, PyCF_ONLY_AST) +_NUM_TYPES = (int, float, complex) + def literal_eval(node_or_string): """ Safely evaluate an expression node or a string containing a Python @@ -47,7 +49,9 @@ if isinstance(node_or_string, Expression): node_or_string = node_or_string.body def _convert(node): - if isinstance(node, (Str, Bytes)): + if isinstance(node, Constant): + return node.value + elif isinstance(node, (Str, Bytes)): return node.s elif isinstance(node, Num): return node.n @@ -62,24 +66,21 @@ in zip(node.keys, node.values)) elif isinstance(node, NameConstant): return node.value - elif isinstance(node, UnaryOp) and \ - isinstance(node.op, (UAdd, USub)) and \ - isinstance(node.operand, (Num, UnaryOp, BinOp)): + elif isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)): operand = _convert(node.operand) - if isinstance(node.op, UAdd): - return + operand - else: - return - operand - elif isinstance(node, BinOp) and \ - isinstance(node.op, (Add, Sub)) and \ - isinstance(node.right, (Num, UnaryOp, BinOp)) and \ - isinstance(node.left, (Num, UnaryOp, BinOp)): + if isinstance(operand, _NUM_TYPES): + if isinstance(node.op, UAdd): + return + operand + else: + return - operand + elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)): left = _convert(node.left) right = _convert(node.right) - if isinstance(node.op, Add): - return left + right - else: - return left - right + if isinstance(left, _NUM_TYPES) and isinstance(right, _NUM_TYPES): + if isinstance(node.op, Add): + return left + right + else: + return left - right raise ValueError('malformed node or string: ' + repr(node)) return _convert(node_or_string) @@ -196,12 +197,19 @@ """ if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)): raise TypeError("%r can't have docstrings" % node.__class__.__name__) - if node.body and isinstance(node.body[0], Expr) and \ - isinstance(node.body[0].value, Str): - if clean: - import inspect - return inspect.cleandoc(node.body[0].value.s) - return node.body[0].value.s + if not(node.body and isinstance(node.body[0], Expr)): + return + node = node.body[0].value + if isinstance(node, Str): + text = node.s + elif isinstance(node, Constant) and isinstance(node.value, str): + text = node.value + else: + return + if clean: + import inspect + text = inspect.cleandoc(text) + return text def walk(node): diff --git a/lib-python/3/asynchat.py b/lib-python/3/asynchat.py --- a/lib-python/3/asynchat.py +++ b/lib-python/3/asynchat.py @@ -285,35 +285,6 @@ return result -class fifo: - def __init__(self, list=None): - import warnings - warnings.warn('fifo class will be removed in Python 3.6', - DeprecationWarning, stacklevel=2) - if not list: - self.list = deque() - else: - self.list = deque(list) - - def __len__(self): - return len(self.list) - - def is_empty(self): - return not self.list - - def first(self): - return self.list[0] - - def push(self, data): - self.list.append(data) - - def pop(self): - if self.list: - return (1, self.list.popleft()) - else: - return (0, None) - - # Given 'haystack', see if any prefix of 'needle' is at its end. This # assumes an exact match has already been checked. Return the number of # characters matched. diff --git a/lib-python/3/asyncio/base_events.py b/lib-python/3/asyncio/base_events.py --- a/lib-python/3/asyncio/base_events.py +++ b/lib-python/3/asyncio/base_events.py @@ -13,7 +13,6 @@ to modify the meaning of the API call itself. """ - import collections import concurrent.futures import heapq @@ -28,6 +27,7 @@ import traceback import sys import warnings +import weakref from . import compat from . import coroutines @@ -41,9 +41,6 @@ __all__ = ['BaseEventLoop'] -# Argument for default thread pool executor creation. -_MAX_WORKERS = 5 - # Minimum number of _scheduled timer handles before cleanup of # cancelled handles is performed. _MIN_SCHEDULED_TIMER_HANDLES = 100 @@ -60,7 +57,7 @@ def _format_handle(handle): cb = handle._callback - if inspect.ismethod(cb) and isinstance(cb.__self__, tasks.Task): + if isinstance(getattr(cb, '__self__', None), tasks.Task): # format the task return repr(cb.__self__) else: @@ -76,12 +73,29 @@ return repr(fd) -# Linux's sock.type is a bitmask that can include extra info about socket. -_SOCKET_TYPE_MASK = 0 -if hasattr(socket, 'SOCK_NONBLOCK'): - _SOCKET_TYPE_MASK |= socket.SOCK_NONBLOCK -if hasattr(socket, 'SOCK_CLOEXEC'): - _SOCKET_TYPE_MASK |= socket.SOCK_CLOEXEC +def _set_reuseport(sock): + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError('reuse_port not supported by socket module') + else: + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except OSError: + raise ValueError('reuse_port not supported by socket module, ' + 'SO_REUSEPORT defined but not implemented.') + + +def _is_stream_socket(sock): + # Linux's socket.type is a bitmask that can include extra info + # about socket, therefore we can't do simple + # `sock_type == socket.SOCK_STREAM`. + return (sock.type & socket.SOCK_STREAM) == socket.SOCK_STREAM + + +def _is_dgram_socket(sock): + # Linux's socket.type is a bitmask that can include extra info + # about socket, therefore we can't do simple + # `sock_type == socket.SOCK_DGRAM`. + return (sock.type & socket.SOCK_DGRAM) == socket.SOCK_DGRAM def _ipaddr_info(host, port, family, type, proto): @@ -94,8 +108,12 @@ host is None: return None - type &= ~_SOCKET_TYPE_MASK if type == socket.SOCK_STREAM: + # Linux only: + # getaddrinfo() can raise when socket.type is a bit mask. + # So if socket.type is a bit mask of SOCK_STREAM, and say + # SOCK_NONBLOCK, we simply return None, which will trigger + # a call to getaddrinfo() letting it process this request. proto = socket.IPPROTO_TCP elif type == socket.SOCK_DGRAM: proto = socket.IPPROTO_UDP @@ -104,27 +122,21 @@ if port is None: port = 0 - elif isinstance(port, bytes): - if port == b'': - port = 0 - else: - try: - port = int(port) - except ValueError: - # Might be a service name like b"http". - port = socket.getservbyname(port.decode('ascii')) - elif isinstance(port, str): - if port == '': - port = 0 - else: - try: - port = int(port) - except ValueError: - # Might be a service name like "http". - port = socket.getservbyname(port) + elif isinstance(port, bytes) and port == b'': + port = 0 + elif isinstance(port, str) and port == '': + port = 0 + else: + # If port's a service name like "http", don't skip getaddrinfo. + try: + port = int(port) + except (TypeError, ValueError): + return None if family == socket.AF_UNSPEC: - afs = [socket.AF_INET, socket.AF_INET6] + afs = [socket.AF_INET] + if hasattr(socket, 'AF_INET6'): + afs.append(socket.AF_INET6) else: afs = [family] @@ -242,6 +254,17 @@ self._task_factory = None self._coroutine_wrapper_set = False + if hasattr(sys, 'get_asyncgen_hooks'): + # Python >= 3.6 + # A weak set of all asynchronous generators that are + # being iterated by the loop. + self._asyncgens = weakref.WeakSet() + else: + self._asyncgens = None + + # Set to True when `loop.shutdown_asyncgens` is called. + self._asyncgens_shutdown_called = False + def __repr__(self): return ('<%s running=%s closed=%s debug=%s>' % (self.__class__.__name__, self.is_running(), @@ -333,14 +356,67 @@ if self._closed: raise RuntimeError('Event loop is closed') + def _asyncgen_finalizer_hook(self, agen): + self._asyncgens.discard(agen) + if not self.is_closed(): + self.create_task(agen.aclose()) + # Wake up the loop if the finalizer was called from + # a different thread. + self._write_to_self() + + def _asyncgen_firstiter_hook(self, agen): + if self._asyncgens_shutdown_called: + warnings.warn( + "asynchronous generator {!r} was scheduled after " + "loop.shutdown_asyncgens() call".format(agen), + ResourceWarning, source=self) + + self._asyncgens.add(agen) + + @coroutine + def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if self._asyncgens is None or not len(self._asyncgens): + # If Python version is <3.6 or we don't have any asynchronous + # generators alive. + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + shutdown_coro = tasks.gather( + *[ag.aclose() for ag in closing_agens], + return_exceptions=True, + loop=self) + + results = yield from shutdown_coro + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler({ + 'message': 'an error occurred during closing of ' + 'asynchronous generator {!r}'.format(agen), + 'exception': result, + 'asyncgen': agen + }) + def run_forever(self): """Run until stop() is called.""" self._check_closed() if self.is_running(): - raise RuntimeError('Event loop is running.') + raise RuntimeError('This event loop is already running') + if events._get_running_loop() is not None: + raise RuntimeError( + 'Cannot run the event loop while another loop is running') self._set_coroutine_wrapper(self._debug) self._thread_id = threading.get_ident() + if self._asyncgens is not None: + old_agen_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook) try: + events._set_running_loop(self) while True: self._run_once() if self._stopping: @@ -348,7 +424,10 @@ finally: self._stopping = False self._thread_id = None + events._set_running_loop(None) self._set_coroutine_wrapper(False) + if self._asyncgens is not None: + sys.set_asyncgen_hooks(*old_agen_hooks) def run_until_complete(self, future): """Run until the Future is done. @@ -363,7 +442,7 @@ """ self._check_closed() - new_task = not isinstance(future, futures.Future) + new_task = not futures.isfuture(future) future = tasks.ensure_future(future, loop=self) if new_task: # An exception is raised if the future didn't complete, so there @@ -426,7 +505,8 @@ if compat.PY34: def __del__(self): if not self.is_closed(): - warnings.warn("unclosed event loop %r" % self, ResourceWarning) + warnings.warn("unclosed event loop %r" % self, ResourceWarning, + source=self) if not self.is_running(): self.close() @@ -469,12 +549,10 @@ Absolute time corresponds to the event loop's time() method. """ - if (coroutines.iscoroutine(callback) - or coroutines.iscoroutinefunction(callback)): - raise TypeError("coroutines cannot be used with call_at()") self._check_closed() if self._debug: self._check_thread() + self._check_callback(callback, 'call_at') timer = events.TimerHandle(when, callback, args, self) if timer._source_traceback: del timer._source_traceback[-1] @@ -492,18 +570,27 @@ Any positional arguments after the callback will be passed to the callback when it is called. """ + self._check_closed() if self._debug: self._check_thread() + self._check_callback(callback, 'call_soon') handle = self._call_soon(callback, args) if handle._source_traceback: del handle._source_traceback[-1] return handle + def _check_callback(self, callback, method): + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError( + "coroutines cannot be used with {}()".format(method)) + if not callable(callback): + raise TypeError( + 'a callable object was expected by {}(), got {!r}'.format( + method, callback)) + + def _call_soon(self, callback, args): - if (coroutines.iscoroutine(callback) - or coroutines.iscoroutinefunction(callback)): - raise TypeError("coroutines cannot be used with call_soon()") - self._check_closed() handle = events.Handle(callback, args, self) if handle._source_traceback: del handle._source_traceback[-1] @@ -529,6 +616,9 @@ def call_soon_threadsafe(self, callback, *args): """Like call_soon(), but thread-safe.""" + self._check_closed() + if self._debug: + self._check_callback(callback, 'call_soon_threadsafe') handle = self._call_soon(callback, args) if handle._source_traceback: del handle._source_traceback[-1] @@ -536,22 +626,13 @@ return handle def run_in_executor(self, executor, func, *args): - if (coroutines.iscoroutine(func) - or coroutines.iscoroutinefunction(func)): - raise TypeError("coroutines cannot be used with run_in_executor()") self._check_closed() - if isinstance(func, events.Handle): - assert not args - assert not isinstance(func, events.TimerHandle) - if func._cancelled: - f = self.create_future() - f.set_result(None) - return f - func, args = func._callback, func._args + if self._debug: + self._check_callback(func, 'run_in_executor') if executor is None: executor = self._default_executor if executor is None: - executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS) + executor = concurrent.futures.ThreadPoolExecutor() self._default_executor = executor return futures.wrap_future(executor.submit(func, *args), loop=self) @@ -703,11 +784,19 @@ raise OSError('Multiple exceptions: {}'.format( ', '.join(str(exc) for exc in exceptions))) - elif sock is None: - raise ValueError( - 'host and port was not specified and no sock specified') - - sock.setblocking(False) + else: + if sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + if not _is_stream_socket(sock): + # We allow AF_INET, AF_INET6, AF_UNIX as long as they + # are SOCK_STREAM. + # We support passing AF_UNIX sockets even though we have + # a dedicated API for that: create_unix_connection. + # Disallowing AF_UNIX in this method, breaks backwards + # compatibility. + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) transport, protocol = yield from self._create_connection_transport( sock, protocol_factory, ssl, server_hostname) @@ -721,14 +810,17 @@ @coroutine def _create_connection_transport(self, sock, protocol_factory, ssl, - server_hostname): + server_hostname, server_side=False): + + sock.setblocking(False) + protocol = protocol_factory() waiter = self.create_future() if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( sock, protocol, sslcontext, waiter, - server_side=False, server_hostname=server_hostname) + server_side=server_side, server_hostname=server_hostname) else: transport = self._make_socket_transport(sock, protocol, waiter) @@ -748,6 +840,9 @@ allow_broadcast=None, sock=None): """Create datagram connection.""" if sock is not None: + if not _is_dgram_socket(sock): + raise ValueError( + 'A UDP Socket was expected, got {!r}'.format(sock)) if (local_addr or remote_addr or family or proto or flags or reuse_address or reuse_port or allow_broadcast): @@ -813,12 +908,7 @@ sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if reuse_port: - if not hasattr(socket, 'SO_REUSEPORT'): - raise ValueError( - 'reuse_port not supported by socket module') - else: - sock.setsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + _set_reuseport(sock) if allow_broadcast: sock.setsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST, 1) @@ -941,12 +1031,7 @@ sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, True) if reuse_port: - if not hasattr(socket, 'SO_REUSEPORT'): - raise ValueError( - 'reuse_port not supported by socket module') - else: - sock.setsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT, True) + _set_reuseport(sock) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. @@ -968,18 +1053,44 @@ else: if sock is None: raise ValueError('Neither host/port nor sock were specified') + if not _is_stream_socket(sock): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) sockets = [sock] server = Server(self, sockets) for sock in sockets: sock.listen(backlog) sock.setblocking(False) - self._start_serving(protocol_factory, sock, ssl, server) + self._start_serving(protocol_factory, sock, ssl, server, backlog) if self._debug: logger.info("%r is serving", server) return server @coroutine + def connect_accepted_socket(self, protocol_factory, sock, *, ssl=None): + """Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + """ + if not _is_stream_socket(sock): + raise ValueError( + 'A Stream Socket was expected, got {!r}'.format(sock)) + + transport, protocol = yield from self._create_connection_transport( + sock, protocol_factory, ssl, '', server_side=True) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r handled: (%r, %r)", sock, transport, protocol) + return transport, protocol + + @coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() waiter = self.create_future() @@ -1048,7 +1159,7 @@ transport = yield from self._make_subprocess_transport( protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) if self._debug: - logger.info('%s: %r' % (debug_log, transport)) + logger.info('%s: %r', debug_log, transport) return transport, protocol @coroutine @@ -1078,7 +1189,7 @@ protocol, popen_args, False, stdin, stdout, stderr, bufsize, **kwargs) if self._debug: - logger.info('%s: %r' % (debug_log, transport)) + logger.info('%s: %r', debug_log, transport) return transport, protocol def get_exception_handler(self): @@ -1158,7 +1269,9 @@ - 'handle' (optional): Handle instance; - 'protocol' (optional): Protocol instance; - 'transport' (optional): Transport instance; - - 'socket' (optional): Socket instance. + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. New keys maybe introduced in the future. diff --git a/lib-python/3/asyncio/base_futures.py b/lib-python/3/asyncio/base_futures.py new file mode 100644 --- /dev/null +++ b/lib-python/3/asyncio/base_futures.py @@ -0,0 +1,71 @@ +__all__ = [] + +import concurrent.futures._base +import reprlib + +from . import events + +Error = concurrent.futures._base.Error +CancelledError = concurrent.futures.CancelledError +TimeoutError = concurrent.futures.TimeoutError + + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + + +def isfuture(obj): + """Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + """ + return (hasattr(obj.__class__, '_asyncio_future_blocking') and + obj._asyncio_future_blocking is not None) + + +def _format_callbacks(cb): + """helper function for Future.__repr__""" + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return events._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0]) + elif size == 2: + cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1])) + elif size > 2: + cb = '{}, <{} more>, {}'.format(format_cb(cb[0]), + size - 2, + format_cb(cb[-1])) + return 'cb=[%s]' % cb + + +def _future_repr_info(future): + # (Future) -> str + """helper function for Future.__repr__""" + info = [future._state.lower()] + if future._state == _FINISHED: + if future._exception is not None: + info.append('exception={!r}'.format(future._exception)) + else: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(future._result) + info.append('result={}'.format(result)) + if future._callbacks: + info.append(_format_callbacks(future._callbacks)) + if future._source_traceback: + frame = future._source_traceback[-1] + info.append('created at %s:%s' % (frame[0], frame[1])) + return info diff --git a/lib-python/3/asyncio/base_subprocess.py b/lib-python/3/asyncio/base_subprocess.py --- a/lib-python/3/asyncio/base_subprocess.py +++ b/lib-python/3/asyncio/base_subprocess.py @@ -3,7 +3,6 @@ import warnings from . import compat -from . import futures from . import protocols from . import transports from .coroutines import coroutine @@ -87,6 +86,12 @@ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): raise NotImplementedError + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + def is_closing(self): return self._closed @@ -122,7 +127,8 @@ if compat.PY34: def __del__(self): if not self._closed: - warnings.warn("unclosed transport %r" % self, ResourceWarning) + warnings.warn("unclosed transport %r" % self, ResourceWarning, + source=self) self.close() def get_pid(self): diff --git a/lib-python/3/asyncio/base_tasks.py b/lib-python/3/asyncio/base_tasks.py new file mode 100644 --- /dev/null +++ b/lib-python/3/asyncio/base_tasks.py @@ -0,0 +1,76 @@ +import linecache +import traceback + +from . import base_futures +from . import coroutines + + +def _task_repr_info(task): + info = base_futures._future_repr_info(task) + + if task._must_cancel: + # replace status + info[0] = 'cancelling' + + coro = coroutines._format_coroutine(task._coro) + info.insert(1, 'coro=<%s>' % coro) + + if task._fut_waiter is not None: + info.insert(2, 'wait_for=%r' % task._fut_waiter) + return info + + +def _task_get_stack(task, limit): + frames = [] + try: + # 'async def' coroutines + f = task._coro.cr_frame + except AttributeError: + f = task._coro.gi_frame + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif task._exception is not None: + tb = task._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + +def _task_print_stack(task, limit, file): + extracted_list = [] + checked = set() + for f in task.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + exc = task._exception + if not extracted_list: + print('No stack for %r' % task, file=file) + elif exc is not None: + print('Traceback for %r (most recent call last):' % task, + file=file) + else: + print('Stack for %r (most recent call last):' % task, + file=file) + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') diff --git a/lib-python/3/asyncio/coroutines.py b/lib-python/3/asyncio/coroutines.py --- a/lib-python/3/asyncio/coroutines.py +++ b/lib-python/3/asyncio/coroutines.py @@ -11,7 +11,7 @@ from . import compat from . import events -from . import futures +from . import base_futures from .log import logger @@ -33,12 +33,16 @@ try: _types_coroutine = types.coroutine + _types_CoroutineType = types.CoroutineType except AttributeError: + # Python 3.4 _types_coroutine = None + _types_CoroutineType = None try: _inspect_iscoroutinefunction = inspect.iscoroutinefunction except AttributeError: + # Python 3.4 _inspect_iscoroutinefunction = lambda func: False try: @@ -120,8 +124,8 @@ def send(self, value): return self.gen.send(value) - def throw(self, exc): - return self.gen.throw(exc) + def throw(self, type, value=None, traceback=None): + return self.gen.throw(type, value, traceback) def close(self): return self.gen.close() @@ -204,8 +208,8 @@ @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) - if isinstance(res, futures.Future) or inspect.isgenerator(res) or \ - isinstance(res, CoroWrapper): + if (base_futures.isfuture(res) or inspect.isgenerator(res) or + isinstance(res, CoroWrapper)): res = yield from res elif _AwaitableABC is not None: # If 'func' returns an Awaitable (new in 3.5) we @@ -238,19 +242,27 @@ w.__qualname__ = getattr(func, '__qualname__', None) return w - wrapper._is_coroutine = True # For iscoroutinefunction(). + wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). return wrapper +# A marker for iscoroutinefunction. +_is_coroutine = object() + + def iscoroutinefunction(func): """Return True if func is a decorated coroutine function.""" - return (getattr(func, '_is_coroutine', False) or + return (getattr(func, '_is_coroutine', None) is _is_coroutine or _inspect_iscoroutinefunction(func)) _COROUTINE_TYPES = (types.GeneratorType, CoroWrapper) if _CoroutineABC is not None: _COROUTINE_TYPES += (_CoroutineABC,) +if _types_CoroutineType is not None: + # Prioritize native coroutine check to speed-up + # asyncio.iscoroutine. + _COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES def iscoroutine(obj): @@ -261,6 +273,29 @@ def _format_coroutine(coro): assert iscoroutine(coro) + if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'): + # Most likely a built-in type or a Cython coroutine. + + # Built-in types might not have __qualname__ or __name__. + coro_name = getattr( + coro, '__qualname__', + getattr(coro, '__name__', type(coro).__name__)) + coro_name = '{}()'.format(coro_name) + + running = False + try: + running = coro.cr_running + except AttributeError: + try: + running = coro.gi_running + except AttributeError: + pass + + if running: + return '{} running'.format(coro_name) + else: + return coro_name + coro_name = None if isinstance(coro, CoroWrapper): func = coro.func @@ -271,7 +306,7 @@ func = coro if coro_name is None: - coro_name = events._format_callback(func, ()) + coro_name = events._format_callback(func, (), {}) try: coro_code = coro.gi_code diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py --- a/lib-python/3/asyncio/events.py +++ b/lib-python/3/asyncio/events.py @@ -6,6 +6,7 @@ 'get_event_loop_policy', 'set_event_loop_policy', 'get_event_loop', 'set_event_loop', 'new_event_loop', 'get_child_watcher', 'set_child_watcher', + '_set_running_loop', '_get_running_loop', ] import functools @@ -35,23 +36,25 @@ return None -def _format_args(args): - """Format function arguments. +def _format_args_and_kwargs(args, kwargs): + """Format function arguments and keyword arguments. Special case for a single parameter: ('hello',) is formatted as ('hello'). """ # use reprlib to limit the length of the output - args_repr = reprlib.repr(args) - if len(args) == 1 and args_repr.endswith(',)'): - args_repr = args_repr[:-2] + ')' - return args_repr + items = [] + if args: + items.extend(reprlib.repr(arg) for arg in args) + if kwargs: + items.extend('{}={}'.format(k, reprlib.repr(v)) + for k, v in kwargs.items()) + return '(' + ', '.join(items) + ')' -def _format_callback(func, args, suffix=''): +def _format_callback(func, args, kwargs, suffix=''): if isinstance(func, functools.partial): - if args is not None: - suffix = _format_args(args) + suffix - return _format_callback(func.func, func.args, suffix) + suffix = _format_args_and_kwargs(args, kwargs) + suffix + return _format_callback(func.func, func.args, func.keywords, suffix) if hasattr(func, '__qualname__'): func_repr = getattr(func, '__qualname__') @@ -60,14 +63,13 @@ else: func_repr = repr(func) - if args is not None: - func_repr += _format_args(args) + func_repr += _format_args_and_kwargs(args, kwargs) if suffix: func_repr += suffix return func_repr def _format_callback_source(func, args): - func_repr = _format_callback(func, args) + func_repr = _format_callback(func, args, None) source = _get_function_source(func) if source: func_repr += ' at %s:%s' % source @@ -81,7 +83,6 @@ '_source_traceback', '_repr', '__weakref__') def __init__(self, callback, args, loop): - assert not isinstance(callback, Handle), 'A Handle is not a callback' self._loop = loop self._callback = callback self._args = args @@ -248,6 +249,10 @@ """ raise NotImplementedError + def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + raise NotImplementedError + # Methods scheduling callbacks. All these return Handles. def _timer_handle_cancelled(self, handle): @@ -603,6 +608,30 @@ _lock = threading.Lock() +# A TLS for the running event loop, used by _get_running_loop. +class _RunningLoop(threading.local): + _loop = None +_running_loop = _RunningLoop() + + +def _get_running_loop(): + """Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + return _running_loop._loop + + +def _set_running_loop(loop): + """Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + _running_loop._loop = loop + + def _init_event_loop_policy(): global _event_loop_policy with _lock: @@ -628,7 +657,17 @@ def get_event_loop(): - """Equivalent to calling get_event_loop_policy().get_event_loop().""" + """Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + """ + current_loop = _get_running_loop() + if current_loop is not None: + return current_loop return get_event_loop_policy().get_event_loop() diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py --- a/lib-python/3/asyncio/futures.py +++ b/lib-python/3/asyncio/futures.py @@ -1,35 +1,32 @@ """A Future class similar to the one in PEP 3148.""" -__all__ = ['CancelledError', 'TimeoutError', - 'InvalidStateError', - 'Future', 'wrap_future', - ] +__all__ = ['CancelledError', 'TimeoutError', 'InvalidStateError', + 'Future', 'wrap_future', 'isfuture'] -import concurrent.futures._base +import concurrent.futures import logging -import reprlib import sys import traceback +from . import base_futures from . import compat from . import events -# States for Future. -_PENDING = 'PENDING' -_CANCELLED = 'CANCELLED' -_FINISHED = 'FINISHED' -Error = concurrent.futures._base.Error -CancelledError = concurrent.futures.CancelledError -TimeoutError = concurrent.futures.TimeoutError +CancelledError = base_futures.CancelledError +InvalidStateError = base_futures.InvalidStateError +TimeoutError = base_futures.TimeoutError +isfuture = base_futures.isfuture + + +_PENDING = base_futures._PENDING +_CANCELLED = base_futures._CANCELLED +_FINISHED = base_futures._FINISHED + STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging -class InvalidStateError(Error): - """The operation is not allowed in this state.""" - - class _TracebackLogger: """Helper to log a traceback upon destruction if not cleared. @@ -134,7 +131,15 @@ _loop = None _source_traceback = None - _blocking = False # proper use of future (yield vs yield from) + # This field is used for a dual purpose: + # - Its presence is a marker to declare that a class implements + # the Future protocol (i.e. is intended to be duck-type compatible). + # The value must also be not-None, to enable a subclass to declare + # that it is not compatible by setting this to None. + # - It is set by __iter__() below so that Task._step() can tell + # the difference between `yield from Future()` (correct) vs. + # `yield Future()` (incorrect). + _asyncio_future_blocking = False _log_traceback = False # Used for Python 3.4 and later _tb_logger = None # Used for Python 3.3 only @@ -154,45 +159,10 @@ if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) - def __format_callbacks(self): - cb = self._callbacks - size = len(cb) - if not size: - cb = '' - - def format_cb(callback): - return events._format_callback_source(callback, ()) - - if size == 1: - cb = format_cb(cb[0]) - elif size == 2: - cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1])) - elif size > 2: - cb = '{}, <{} more>, {}'.format(format_cb(cb[0]), - size-2, - format_cb(cb[-1])) - return 'cb=[%s]' % cb - - def _repr_info(self): - info = [self._state.lower()] - if self._state == _FINISHED: - if self._exception is not None: - info.append('exception={!r}'.format(self._exception)) - else: - # use reprlib to limit the length of the output, especially - # for very long strings - result = reprlib.repr(self._result) - info.append('result={}'.format(result)) - if self._callbacks: - info.append(self.__format_callbacks()) From pypy.commits at gmail.com Sun Dec 25 03:23:49 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 25 Dec 2016 00:23:49 -0800 (PST) Subject: [pypy-commit] cffi default: typo Message-ID: <585f8215.e576c20a.37a92.5c48@mx.google.com> Author: Armin Rigo Branch: Changeset: r2841:3425f640febb Date: 2016-12-25 09:23 +0100 http://bitbucket.org/cffi/cffi/changeset/3425f640febb/ Log: typo diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -902,7 +902,7 @@ from package.foo_build import ffi extra_args = dict( ext_modules=[ffi.verifier.get_extension()], - ext_packages="...", # if needed + ext_package="...", # if needed ) else: extra_args = dict( From pypy.commits at gmail.com Sun Dec 25 04:02:28 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 25 Dec 2016 01:02:28 -0800 (PST) Subject: [pypy-commit] cffi default: Improve the error in case of version mismatch Message-ID: <585f8b24.05371c0a.31ce.f2fe@mx.google.com> Author: Armin Rigo Branch: Changeset: r2842:f06bbfeeb823 Date: 2016-12-25 10:02 +0100 http://bitbucket.org/cffi/cffi/changeset/f06bbfeeb823/ Log: Improve the error in case of version mismatch diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -55,8 +55,17 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert backend.__version__ == __version__, \ - "version mismatch, %s != %s" % (backend.__version__, __version__) + if backend.__version__ != __version__: + # bad version! Try to be as explicit as possible. + if hasattr(backend, '__file__'): + # CPython + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % ( + __version__, __file__, + backend.__version__, backend.__file__)) + else: + # PyPy + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % ( + __version__, __file__, backend.__version__)) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) From pypy.commits at gmail.com Mon Dec 26 04:03:26 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 26 Dec 2016 01:03:26 -0800 (PST) Subject: [pypy-commit] pypy py3.5: Remove this bug-to-bug compatibility, it seems CPython will fix it too Message-ID: <5860dcde.e644c20a.60f79.fb48@mx.google.com> Author: Armin Rigo Branch: py3.5 Changeset: r89225:c85ac392093d Date: 2016-12-26 10:02 +0100 http://bitbucket.org/pypy/pypy/changeset/c85ac392093d/ Log: Remove this bug-to-bug compatibility, it seems CPython will fix it too diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -976,11 +976,12 @@ if not rposix.HAVE_FCHMODAT: if not follow_symlinks: raise argument_unavailable(space, "chmod", "follow_symlinks") - try: - dispatch_filename(rposix.chmod)(space, w_path, mode) - return - except OSError as e: - raise wrap_oserror2(space, e, w_path, eintr_retry=False) + while True: + try: + dispatch_filename(rposix.chmod)(space, w_path, mode) + return + except OSError as e: + wrap_oserror2(space, e, w_path, eintr_retry=True) try: path = space.fsencode_w(w_path) @@ -989,21 +990,25 @@ raise oefmt(space.w_TypeError, "argument should be string, bytes or integer, not %T", w_path) fd = unwrap_fd(space, w_path) - # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR - # to app-level, but os.fchmod(fd) retries automatically - try: - os.fchmod(fd, mode) - except OSError as e: - raise wrap_oserror(space, e, eintr_retry=False) - else: + # NB. in CPython 3.5.2, os.chmod(fd) propagates EINTR to app-level, + # but os.fchmod(fd) retries automatically. This might be fixed in + # more recent CPythons. + while True: + try: + os.fchmod(fd, mode) + return + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + while True: try: _chmod_path(path, mode, dir_fd, follow_symlinks) + break except OSError as e: if not follow_symlinks and e.errno in (ENOTSUP, EOPNOTSUPP): # fchmodat() doesn't actually implement follow_symlinks=False # so raise NotImplementedError in this case raise argument_unavailable(space, "chmod", "follow_symlinks") - raise wrap_oserror2(space, e, w_path, eintr_retry=False) + wrap_oserror2(space, e, w_path, eintr_retry=True) def _chmod_path(path, mode, dir_fd, follow_symlinks): if dir_fd != DEFAULT_DIR_FD or not follow_symlinks: @@ -1016,8 +1021,6 @@ """\ Change the access permissions of the file given by file descriptor fd. """ - # NB. CPython 3.5.2: unclear why os.chmod(fd) propagates EINTR - # to app-level, but os.fchmod(fd) retries automatically while True: try: os.fchmod(fd, mode) @@ -2009,11 +2012,16 @@ if not follow_symlinks: raise oefmt(space.w_ValueError, "chown: cannnot use fd and follow_symlinks together") - try: - os.fchown(fd, uid, gid) - except OSError as e: - raise wrap_oserror(space, e, eintr_retry=False) - else: + # NB. in CPython 3.5.2, os.chown(fd) propagates EINTR to app-level, + # but os.fchown(fd) retries automatically. This might be fixed in + # more recent CPythons. + while True: + try: + os.fchown(fd, uid, gid) + return + except OSError as e: + wrap_oserror(space, e, eintr_retry=True) + while True: # String case try: if (rposix.HAVE_LCHOWN and @@ -2026,8 +2034,9 @@ assert follow_symlinks assert dir_fd == DEFAULT_DIR_FD os.chown(path, uid, gid) + break except OSError as e: - raise wrap_oserror2(space, e, w_path, eintr_retry=False) + wrap_oserror2(space, e, w_path, eintr_retry=True) @unwrap_spec(uid=c_uid_t, gid=c_gid_t) @@ -2049,7 +2058,6 @@ Change the owner and group id of the file given by file descriptor fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" - # same comment than about os.chmod(fd) vs. os.fchmod(fd) fd = space.c_filedescriptor_w(w_fd) while True: try: From pypy.commits at gmail.com Mon Dec 26 09:59:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:09 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Avoid infinite recursion in recursive definitions Message-ID: <5861303d.8b9a1c0a.e728f.2081@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89226:08804887d145 Date: 2016-12-24 11:30 +0100 http://bitbucket.org/pypy/pypy/changeset/08804887d145/ Log: Avoid infinite recursion in recursive definitions diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -666,7 +666,7 @@ self.fields = fields def __repr__(self): - return "".format(vars(self)) + return "".format(**vars(self)) class ParsedSource(object): @@ -703,12 +703,16 @@ def new_struct(self, obj): if obj.fldtypes is None: - return lltype.ForwardReference() + struct = lltype.ForwardReference() else: - fields = zip( - obj.fldnames, - [self.convert_type(field) for field in obj.fldtypes]) - return DelayedStruct(obj.name, fields) + struct = DelayedStruct(obj.name, None) + # Cache it early, to avoid infinite recursion + self.structs[obj] = struct + if obj.fldtypes is not None: + struct.fields = zip( + obj.fldnames, + [self.convert_type(field) for field in obj.fldtypes]) + return struct def realize_struct(self, struct, type_name): configname = type_name.replace(' ', '__') @@ -729,9 +733,7 @@ elif isinstance(obj, model.StructType): if obj in self.structs: return self.structs[obj] - result = self.new_struct(obj) - self.structs[obj] = result - return result + return self.new_struct(obj) elif isinstance(obj, model.PointerType): TO = self.convert_type(obj.totype) if TO is lltype.Void: From pypy.commits at gmail.com Mon Dec 26 09:59:11 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:11 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: pointers to DelayedStruct Message-ID: <5861303f.c4251c0a.6f1d.0e4f@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89227:c72102095ceb Date: 2016-12-18 01:16 +0000 http://bitbucket.org/pypy/pypy/changeset/c72102095ceb/ Log: pointers to DelayedStruct diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -661,9 +661,10 @@ return CNAME_TO_LLTYPE[name] class DelayedStruct(object): - def __init__(self, name, fields): + def __init__(self, name, fields, TYPE): self.struct_name = name self.fields = fields + self.TYPE = TYPE def __repr__(self): return "".format(**vars(self)) @@ -702,10 +703,7 @@ self.macros[name] = value def new_struct(self, obj): - if obj.fldtypes is None: - struct = lltype.ForwardReference() - else: - struct = DelayedStruct(obj.name, None) + struct = DelayedStruct(obj.name, None, lltype.ForwardReference()) # Cache it early, to avoid infinite recursion self.structs[obj] = struct if obj.fldtypes is not None: @@ -740,6 +738,8 @@ return rffi.VOIDP elif isinstance(obj.totype, model.PrimitiveType): return rffi.CArrayPtr(TO) + elif isinstance(TO, DelayedStruct): + TO = TO.TYPE return lltype.Ptr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: From pypy.commits at gmail.com Mon Dec 26 09:59:18 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:18 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Add configure_now flag Message-ID: <58613046.876ec20a.e50e2.63bf@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89231:7757afd4a8a2 Date: 2016-12-26 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/7757afd4a8a2/ Log: Add configure_now flag diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -666,15 +666,6 @@ self.fields = fields self.TYPE = TYPE - def is_ready(self): - if self.fields is None: - return False - try: - [hash(fld[1]) for fld in self.fields] - return True - except TypeError: - return False - def config_fields(self): result = [] for name, value in self.fields: @@ -708,11 +699,11 @@ self.structs.update(other.structs) self.includes.append(other) - def add_typedef(self, name, obj): + def add_typedef(self, name, obj, configure_now=False): assert name not in self.definitions tp = self.convert_type(obj) if isinstance(tp, DelayedStruct): - tp = self.realize_struct(tp, name) + tp = self.realize_struct(tp, name, configure_now=configure_now) self.definitions[name] = tp def add_macro(self, name, value): @@ -729,10 +720,10 @@ [self.convert_type(field) for field in obj.fldtypes]) return struct - def realize_struct(self, struct, type_name): + def realize_struct(self, struct, type_name, configure_now=False): from pypy.module.cpyext.api import cpython_struct configname = type_name.replace(' ', '__') - if struct.is_ready(): + if configure_now: setattr(self._Config, configname, rffi_platform.Struct(type_name, struct.config_fields())) self._TYPES[configname] = struct.TYPE @@ -740,13 +731,13 @@ cpython_struct(type_name, struct.fields, forward=struct.TYPE) return struct.TYPE - def configure_types(self): + def configure_types(self, configure_now=False): for name, (obj, quals) in self.ctx._declarations.iteritems(): if obj in self.ctx._included_declarations: continue if name.startswith('typedef '): name = name[8:] - self.add_typedef(name, obj) + self.add_typedef(name, obj, configure_now=configure_now) elif name.startswith('macro '): name = name[6:] self.add_macro(name, obj) @@ -784,12 +775,12 @@ raise NotImplementedError -def parse_source(source, includes=None, eci=None): +def parse_source(source, includes=None, eci=None, configure_now=False): ctx = Parser() src = ParsedSource(source, ctx, eci=eci) if includes is not None: for header in includes: src.include(header) ctx.parse(source) - src.configure_types() + src.configure_types(configure_now=configure_now) return src diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -16,7 +16,7 @@ hdr.write(decl) eci = ExternalCompilationInfo( include_dirs=[str(tmpdir)], includes=['sys/types.h', 'header.h']) - res = parse_source(decl, eci=eci) + res = parse_source(decl, eci=eci, configure_now=True) TestFloatObject = res.definitions['TestFloatObject'] assert isinstance(TestFloatObject, lltype.Struct) assert TestFloatObject.c_ob_refcnt == rffi.SSIZE_T @@ -70,10 +70,10 @@ eci = ExternalCompilationInfo( include_dirs=[str(tmpdir)], includes=['sys/types.h', 'base.h', 'object.h']) - hdr1 = parse_source(cdef1, eci=eci) + hdr1 = parse_source(cdef1, eci=eci, configure_now=True) Type = hdr1.definitions['Type'] assert isinstance(Type, lltype.Struct) - hdr2 = parse_source(cdef2, includes=[hdr1], eci=eci) + hdr2 = parse_source(cdef2, includes=[hdr1], eci=eci, configure_now=True) assert 'Type' not in hdr2.definitions Object = hdr2.definitions['Object'] assert Object.c_type.TO is Type @@ -125,7 +125,7 @@ eci = ExternalCompilationInfo( include_dirs=[str(tmpdir)], includes=['sys/types.h', 'foo.h']) - foo_h = parse_source(cdef, eci=eci) + foo_h = parse_source(cdef, eci=eci, configure_now=True) Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.Struct) hash(Object) From pypy.commits at gmail.com Mon Dec 26 09:59:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:15 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Export the type itself in ParsedSource().definitions Message-ID: <58613043.031f1c0a.bd093.06eb@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89229:609e4f904c01 Date: 2016-12-26 11:04 +0100 http://bitbucket.org/pypy/pypy/changeset/609e4f904c01/ Log: Export the type itself in ParsedSource().definitions diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -627,7 +627,7 @@ """) h.configure_types() -Py_ssize_t = h.definitions['Py_ssize_t'] +Py_ssize_t = lltype.Typedef(h.definitions['Py_ssize_t'], 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -635,15 +635,15 @@ # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = h.definitions['PyTypeObject'].OF +PyTypeObject = h.definitions['PyTypeObject'] PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = h.definitions['PyObject'].OF +PyObjectStruct = h.definitions['PyObject'] PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -PyVarObjectStruct = h.definitions['PyVarObject'].OF +PyVarObjectStruct = h.definitions['PyVarObject'] PyVarObject = lltype.Ptr(PyVarObjectStruct) Py_buffer = cpython_struct( diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -705,7 +705,7 @@ if isinstance(tp, DelayedStruct): tp = self.realize_struct(tp, name) self.structs[obj] = tp - self.definitions[name] = lltype.Typedef(tp, name) + self.definitions[name] = tp def add_macro(self, name, value): assert name not in self.macros diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -18,7 +18,7 @@ include_dirs=[str(tmpdir)], includes=['sys/types.h', 'header.h']) res = parse_source(decl, eci=eci) res.configure_types() - TestFloatObject = res.definitions['TestFloatObject'].OF + TestFloatObject = res.definitions['TestFloatObject'] assert isinstance(TestFloatObject, lltype.Struct) assert TestFloatObject.c_ob_refcnt == rffi.SSIZE_T assert TestFloatObject.c_ob_pypy_link == rffi.SSIZE_T @@ -73,12 +73,12 @@ includes=['sys/types.h', 'base.h', 'object.h']) hdr1 = parse_source(cdef1, eci=eci) hdr1.configure_types() - Type = hdr1.definitions['Type'].OF + Type = hdr1.definitions['Type'] assert isinstance(Type, lltype.Struct) hdr2 = parse_source(cdef2, includes=[hdr1], eci=eci) hdr2.configure_types() assert 'Type' not in hdr2.definitions - Object = hdr2.definitions['Object'].OF + Object = hdr2.definitions['Object'] assert Object.c_type.TO is Type def test_incomplete(tmpdir): @@ -103,7 +103,7 @@ includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) foo_h.configure_types() - Object = foo_h.definitions['Object'].OF + Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.ForwardReference) or hash(Object) def test_recursive(tmpdir): @@ -131,6 +131,6 @@ includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) foo_h.configure_types() - Object = foo_h.definitions['Object'].OF + Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.Struct) hash(Object) From pypy.commits at gmail.com Mon Dec 26 09:59:17 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:17 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: refactor .configure_types() Message-ID: <58613045.c11d1c0a.5313b.28cc@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89230:16dc5f310fe9 Date: 2016-12-26 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/16dc5f310fe9/ Log: refactor .configure_types() diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -18,7 +18,7 @@ method = pypy.module.cpyext.typeobject.get_new_method_def(space) w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space, method, space.wrap('')) space.appexec([space.type(w_obj)], """(methodtype): - from pickle import Pickler + from pickle import Pickler Pickler.dispatch[methodtype] = Pickler.save_global """) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -625,7 +625,6 @@ typedef void (*freefunc)(void *); """) -h.configure_types() Py_ssize_t = lltype.Typedef(h.definitions['Py_ssize_t'], 'Py_ssize_t') Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -675,6 +675,15 @@ except TypeError: return False + def config_fields(self): + result = [] + for name, value in self.fields: + if isinstance(value, DelayedStruct): + result.append((name, value.TYPE)) + else: + result.append((name, value)) + return result + def __repr__(self): return "".format(**vars(self)) @@ -704,7 +713,6 @@ tp = self.convert_type(obj) if isinstance(tp, DelayedStruct): tp = self.realize_struct(tp, name) - self.structs[obj] = tp self.definitions[name] = tp def add_macro(self, name, value): @@ -726,13 +734,22 @@ configname = type_name.replace(' ', '__') if struct.is_ready(): setattr(self._Config, configname, - rffi_platform.Struct(type_name, struct.fields)) + rffi_platform.Struct(type_name, struct.config_fields())) self._TYPES[configname] = struct.TYPE else: cpython_struct(type_name, struct.fields, forward=struct.TYPE) return struct.TYPE def configure_types(self): + for name, (obj, quals) in self.ctx._declarations.iteritems(): + if obj in self.ctx._included_declarations: + continue + if name.startswith('typedef '): + name = name[8:] + self.add_typedef(name, obj) + elif name.startswith('macro '): + name = name[6:] + self.add_macro(name, obj) for name, TYPE in rffi_platform.configure(self._Config).iteritems(): if name in self._TYPES: self._TYPES[name].become(TYPE) @@ -774,13 +791,5 @@ for header in includes: src.include(header) ctx.parse(source) - for name, (obj, quals) in ctx._declarations.iteritems(): - if obj in ctx._included_declarations: - continue - if name.startswith('typedef '): - name = name[8:] - src.add_typedef(name, obj) - elif name.startswith('macro '): - name = name[6:] - src.add_macro(name, obj) + src.configure_types() return src diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -17,7 +17,6 @@ eci = ExternalCompilationInfo( include_dirs=[str(tmpdir)], includes=['sys/types.h', 'header.h']) res = parse_source(decl, eci=eci) - res.configure_types() TestFloatObject = res.definitions['TestFloatObject'] assert isinstance(TestFloatObject, lltype.Struct) assert TestFloatObject.c_ob_refcnt == rffi.SSIZE_T @@ -72,11 +71,9 @@ include_dirs=[str(tmpdir)], includes=['sys/types.h', 'base.h', 'object.h']) hdr1 = parse_source(cdef1, eci=eci) - hdr1.configure_types() Type = hdr1.definitions['Type'] assert isinstance(Type, lltype.Struct) hdr2 = parse_source(cdef2, includes=[hdr1], eci=eci) - hdr2.configure_types() assert 'Type' not in hdr2.definitions Object = hdr2.definitions['Object'] assert Object.c_type.TO is Type @@ -102,7 +99,6 @@ include_dirs=[str(tmpdir)], includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) - foo_h.configure_types() Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.ForwardReference) or hash(Object) @@ -130,7 +126,6 @@ include_dirs=[str(tmpdir)], includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) - foo_h.configure_types() Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.Struct) hash(Object) From pypy.commits at gmail.com Mon Dec 26 09:59:13 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 26 Dec 2016 06:59:13 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Don't configure incomplete structs Message-ID: <58613041.2919c20a.cae20.de50@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89228:a2b33bc5a642 Date: 2016-12-24 12:39 +0100 http://bitbucket.org/pypy/pypy/changeset/a2b33bc5a642/ Log: Don't configure incomplete structs diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -666,6 +666,15 @@ self.fields = fields self.TYPE = TYPE + def is_ready(self): + if self.fields is None: + return False + try: + [hash(fld[1]) for fld in self.fields] + return True + except TypeError: + return False + def __repr__(self): return "".format(**vars(self)) @@ -713,12 +722,15 @@ return struct def realize_struct(self, struct, type_name): + from pypy.module.cpyext.api import cpython_struct configname = type_name.replace(' ', '__') - setattr(self._Config, configname, - rffi_platform.Struct(type_name, struct.fields)) - forward = lltype.ForwardReference() - self._TYPES[configname] = forward - return forward + if struct.is_ready(): + setattr(self._Config, configname, + rffi_platform.Struct(type_name, struct.fields)) + self._TYPES[configname] = struct.TYPE + else: + cpython_struct(type_name, struct.fields, forward=struct.TYPE) + return struct.TYPE def configure_types(self): for name, TYPE in rffi_platform.configure(self._Config).iteritems(): From pypy.commits at gmail.com Mon Dec 26 13:32:38 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:38 -0800 (PST) Subject: [pypy-commit] pypy issue2444: rewrite c-level tests, find a misisng decref, always run finalizer to decref Message-ID: <58616246.0b561c0a.9ea67.4e83@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89232:b3bfb5b6f7e2 Date: 2016-12-25 22:35 +0200 http://bitbucket.org/pypy/pypy/changeset/b3bfb5b6f7e2/ Log: rewrite c-level tests, find a misisng decref, always run finalizer to decref diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -408,10 +408,9 @@ py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type releasebuffer = rffi.cast(rffi.VOIDP, 0) - need_finalizer = False if py_type.c_tp_as_buffer: releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) - need_finalizer = True + decref(space, py_obj) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) @@ -419,19 +418,17 @@ space.fromcache(State).check_and_raise_exception(always=True) buf = CPyBuffer(space, ptr[0], size, w_self, releasebuffer=releasebuffer) - if need_finalizer: - fq.register_finalizer(buf) + fq.register_finalizer(buf) return space.newbuffer(buf) def wrap_getwritebuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type + decref(space, py_obj) releasebuffer = rffi.cast(rffi.VOIDP, 0) - need_finalizer = False if py_type.c_tp_as_buffer: releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) - need_finalizer = True with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) @@ -439,8 +436,7 @@ space.fromcache(State).check_and_raise_exception(always=True) buf = CPyBuffer(space, ptr[0], size, w_self, readonly=False, releasebuffer=releasebuffer) - if need_finalizer: - fq.register_finalizer(buf) + fq.register_finalizer(buf) return space.newbuffer(buf) def wrap_getbuffer(space, w_self, w_args, func): @@ -448,10 +444,9 @@ py_obj = make_ref(space, w_self) py_type = py_obj.c_ob_type releasebuffer = rffi.cast(rffi.VOIDP, 0) - need_finalizer = False if py_type.c_tp_as_buffer: releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) - need_finalizer = True + decref(space, py_obj) with lltype.scoped_alloc(Py_buffer) as pybuf: _flags = 0 if space.len_w(w_args) > 0: @@ -477,8 +472,7 @@ itemsize=pybuf.c_itemsize, readonly=widen(pybuf.c_readonly), releasebuffer = releasebuffer) - if need_finalizer: - fq.register_finalizer(buf) + fq.register_finalizer(buf) return space.newbuffer(buf) def get_richcmp_func(OP_CONST): diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -185,19 +185,6 @@ (initproc)PyMyArray_init, /* tp_init */ }; -static PyObject* -test_buffer(PyObject* self, PyObject* args) -{ - Py_buffer* view = NULL; - PyObject* obj = PyTuple_GetItem(args, 0); - PyObject* memoryview = PyMemoryView_FromObject(obj); - if (memoryview == NULL) - return PyInt_FromLong(-1); - view = PyMemoryView_GET_BUFFER(memoryview); - Py_DECREF(memoryview); - return PyInt_FromLong(view->len); -} - /* Copied from numpy tests */ /* * Create python string from a FLAG and or the corresponding PyBuf flag @@ -308,7 +295,6 @@ static PyMethodDef buffer_functions[] = { - {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -20,16 +20,16 @@ def test_frombuffer(self, space, api): w_buf = space.newbuffer(StringBuffer("hello")) w_memoryview = api.PyMemoryView_FromObject(w_buf) - w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) - assert w_view.c_ndim == 1 - f = rffi.charp2str(w_view.c_format) + view = api.PyMemoryView_GET_BUFFER(w_memoryview) + assert view.c_ndim == 1 + f = rffi.charp2str(view.c_format) assert f == 'B' - assert w_view.c_shape[0] == 5 - assert w_view.c_strides[0] == 1 - assert w_view.c_len == 5 - o = rffi.charp2str(w_view.c_buf) + assert view.c_shape[0] == 5 + assert view.c_strides[0] == 1 + assert view.c_len == 5 + o = rffi.charp2str(view.c_buf) assert o == 'hello' - w_mv = api.PyMemoryView_FromBuffer(w_view) + w_mv = api.PyMemoryView_FromBuffer(view) for f in ('format', 'itemsize', 'ndim', 'readonly', 'shape', 'strides', 'suboffsets'): w_f = space.wrap(f) @@ -37,7 +37,7 @@ space.getattr(w_memoryview, w_f)) class AppTestBufferProtocol(AppTestCpythonExtensionBase): - def test_buffer_protocol(self): + def test_buffer_protocol_app(self): import struct module = self.import_module(name='buffer_test') arr = module.PyMyArray(10) @@ -48,8 +48,26 @@ s = y[3] assert len(s) == struct.calcsize('i') assert s == struct.pack('i', 3) - viewlen = module.test_buffer(arr) - assert viewlen == y.itemsize * len(y) + + def test_buffer_protocol_capi(self): + foo = self.import_extension('foo', [ + ("get_len", "METH_VARARGS", + """ + Py_buffer view; + PyObject* obj = PyTuple_GetItem(args, 0); + long ret, vlen; + memset(&view, 0, sizeof(Py_buffer)); + ret = PyObject_GetBuffer(obj, &view, PyBUF_FULL); + if (ret != 0) + return NULL; + vlen = view.len / view.itemsize; + PyBuffer_Release(&view); + return PyInt_FromLong(vlen); + """)]) + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + ten = foo.get_len(arr) + assert ten == 10 @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_buffer_info(self): From pypy.commits at gmail.com Mon Dec 26 13:32:40 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:40 -0800 (PST) Subject: [pypy-commit] pypy issue2444: add test that passes but in teardown calls CPyBuffer finalizer after space is nonvalid Message-ID: <58616248.43e61c0a.f871f.56a1@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89233:ac6b84467eb0 Date: 2016-12-25 22:38 +0200 http://bitbucket.org/pypy/pypy/changeset/ac6b84467eb0/ Log: add test that passes but in teardown calls CPyBuffer finalizer after space is nonvalid diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -63,11 +63,24 @@ vlen = view.len / view.itemsize; PyBuffer_Release(&view); return PyInt_FromLong(vlen); - """)]) + """), + ("test_buffer", "METH_VARARGS", + """ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len / view->itemsize); + """)]) module = self.import_module(name='buffer_test') arr = module.PyMyArray(10) ten = foo.get_len(arr) assert ten == 10 + ten = foo.test_buffer(arr) + assert ten == 10 @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_buffer_info(self): From pypy.commits at gmail.com Mon Dec 26 13:32:44 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:44 -0800 (PST) Subject: [pypy-commit] pypy issue2444: merge heads Message-ID: <5861624c.e644c20a.60f79.c1f4@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89235:a456561d0a22 Date: 2016-12-26 19:20 +0200 http://bitbucket.org/pypy/pypy/changeset/a456561d0a22/ Log: merge heads diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -630,25 +630,27 @@ PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = cpython_struct( - "Py_buffer", ( +Py_buffer = rffi.CStruct( "Py_buffer", ('buf', rffi.VOIDP), ('obj', PyObject), ('len', Py_ssize_t), ('itemsize', Py_ssize_t), - ('readonly', lltype.Signed), - ('ndim', lltype.Signed), + ('readonly', rffi.INT_real), + ('ndim', rffi.INT_real), ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('suboffsets', Py_ssize_tP), ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - ('internal', rffi.VOIDP) - )) + ('internal', rffi.VOIDP), + hints={'size': 6 * rffi.sizeof(Py_ssize_tP) + 2 * rffi.sizeof(Py_ssize_t) + + 2 * rffi.sizeof(rffi.INT_real) + rffi.sizeof(rffi.CCHARP) + + Py_MAX_FMT * rffi.sizeof(rffi.UCHAR) + + 2 * Py_MAX_NDIMS * rffi.sizeof(Py_ssize_t)}) Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( PyObjectFields, CANNOT_FAIL, - cpython_api, bootstrap_function, cpython_struct, build_type_checkers) + cpython_api, bootstrap_function, build_type_checkers) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref, Py_DecRef, make_typedescr from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.__builtin__.interp_classobj import W_ClassObject, W_InstanceObject diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,6 +5,14 @@ extern "C" { #endif +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; + diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -145,7 +145,7 @@ /* Py3k buffer interface, adapted for PyPy */ #define Py_MAX_NDIMS 32 #define Py_MAX_FMT 128 -typedef struct bufferinfo { +typedef struct Py_buffer { void *buf; PyObject *obj; /* owned reference */ Py_ssize_t len; diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,14 +1,57 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, make_ref, incref, from_ref + Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, PyObjectFields, cpython_struct, + bootstrap_function, Py_bufferP) +from pypy.module.cpyext.pyobject import (PyObject, make_ref, as_pyobj, incref, + decref, from_ref, make_typedescr) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen from pypy.objspace.std.memoryobject import W_MemoryView +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import -PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") +PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") - at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + +PyMemoryViewObjectStruct = lltype.ForwardReference() +PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) +PyMemoryViewObjectFields = PyObjectFields + \ + (("view", Py_buffer),) +cpython_struct("PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct) + + at bootstrap_function +def init_memoryobject(space): + "Type description of PyDictObject" + make_typedescr(W_MemoryView.typedef, + basestruct=PyMemoryViewObject.TO, + attach=memory_attach, + dealloc=memory_dealloc, + #realize=memory_realize, + ) + +def memory_attach(space, py_obj, w_obj, w_userdata=None): + """ + Fills a newly allocated PyMemoryViewObject with the given W_MemoryView object. + """ + py_obj = rffi.cast(PyMemoryViewObject, py_obj) + py_obj.c_view.c_obj = rffi.cast(PyObject, 0) + +def memory_realize(space, py_obj): + """ + Creates the memory object in the interpreter + """ + raise oefmt(space.w_NotImplementedError, "cannot call this yet") + + at cpython_api([PyObject], lltype.Void, header=None) +def memory_dealloc(space, py_obj): + mem_obj = rffi.cast(PyMemoryViewObject, py_obj) + if mem_obj.c_view.c_obj: + decref(space, mem_obj.c_view.c_obj) + mem_obj.c_view.c_obj = rffi.cast(PyObject, 0) + _dealloc(space, py_obj) + + + at cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, error=-1) def PyObject_GetBuffer(space, w_obj, view, flags): """Export obj into a Py_buffer, view. These arguments must @@ -38,14 +81,15 @@ view.c_obj = make_ref(space, w_obj) return ret - at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) + at cpython_api([PyObject], Py_bufferP, error=CANNOT_FAIL) def PyMemoryView_GET_BUFFER(space, w_obj): """Return a pointer to the buffer-info structure wrapped by the given object. The object must be a memoryview instance; this macro doesn't check its type, you must do it yourself or you will risk crashes.""" - view = lltype.malloc(Py_buffer, flavor='raw', zero=True) if not isinstance(w_obj, W_MemoryView): - return view + return lltype.nullptr(Py_buffer) + py_memobj = rffi.cast(PyMemoryViewObject, as_pyobj(space, w_obj)) # no inc_ref + view = py_memobj.c_view ndim = w_obj.buf.getndim() if ndim >= Py_MAX_NDIMS: # XXX warn? @@ -55,11 +99,13 @@ view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) view.c_obj = make_ref(space, w_obj) rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + isstr = False except ValueError: w_s = w_obj.descr_tobytes(space) view.c_obj = make_ref(space, w_s) view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) rffi.setintfield(view, 'c_readonly', 1) + isstr = True return view def fill_Py_buffer(space, buf, view): @@ -135,7 +181,7 @@ sd *= dim return 1 - at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([Py_bufferP, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fort is 'C') or Fortran-style (fort is 'F') contiguous or either one @@ -156,7 +202,7 @@ def PyMemoryView_FromObject(space, w_obj): return space.call_method(space.builtin, "memoryview", w_obj) - at cpython_api([lltype.Ptr(Py_buffer)], PyObject) + at cpython_api([Py_bufferP], PyObject) def PyMemoryView_FromBuffer(space, view): """Create a memoryview object wrapping the given buffer-info structure view. The memoryview object then owns the buffer, which means you shouldn't diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -356,7 +356,7 @@ with lltype.scoped_alloc(Py_buffer) as pybuf: pybuf.c_buf = self.ptr pybuf.c_len = self.size - pybuf.c_ndim = rffi.cast(rffi.INT, self.ndim) + pybuf.c_ndim = rffi.cast(rffi.INT_real, self.ndim) for i in range(self.ndim): pybuf.c_shape[i] = self.shape[i] pybuf.c_strides[i] = self.strides[i] diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -10,11 +10,7 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP -PyDateTime_Date = rffi.VOIDP -PyDateTime_DateTime = rffi.VOIDP -PyDateTime_Time = rffi.VOIDP wrapperbase = rffi.VOIDP FILE = rffi.VOIDP PyFileObject = rffi.VOIDP From pypy.commits at gmail.com Mon Dec 26 13:32:48 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:48 -0800 (PST) Subject: [pypy-commit] pypy issue2444: fix hack from f66ee40b4bc5 Message-ID: <58616250.07941c0a.61a51.4953@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89237:978a8d3f3300 Date: 2016-12-26 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/978a8d3f3300/ Log: fix hack from f66ee40b4bc5 diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -6,7 +6,6 @@ class Buffer(object): """Abstract base class for buffers.""" - __slots__ = ['readonly'] _immutable_ = True def getlength(self): @@ -79,7 +78,7 @@ pass class StringBuffer(Buffer): - __slots__ = ['value'] + __slots__ = ['readonly', 'value'] _immutable_ = True def __init__(self, value): @@ -110,7 +109,7 @@ class SubBuffer(Buffer): - __slots__ = ['buffer', 'offset', 'size'] + __slots__ = ['buffer', 'offset', 'size', 'readonly'] _immutable_ = True def __init__(self, buffer, offset, size): diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -450,7 +450,6 @@ "the object must have a __dict__" % (obj,)) assert (not hasattr(obj, '__slots__') or type(obj).__slots__ == () or - type(obj).__slots__ == ['readonly'] or type(obj).__slots__ == ('__weakref__',)), ( "%r: to run register_finalizer() untranslated, " "the object must not have __slots__" % (obj,)) From pypy.commits at gmail.com Mon Dec 26 13:32:51 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:51 -0800 (PST) Subject: [pypy-commit] pypy issue2444: merge default into branch Message-ID: <58616253.a285c20a.d98b9.d306@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89238:d70a136cc6ce Date: 2016-12-26 20:31 +0200 http://bitbucket.org/pypy/pypy/changeset/d70a136cc6ce/ Log: merge default into branch diff too long, truncating to 2000 out of 22898 lines diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() + res = re.search(br'\sSONAME\s+([^\s]+)', dump) if not res: return None return res.group(1) @@ -152,23 +157,30 @@ def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] - parts = libname.split(".") + parts = libname.split(b".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass - return nums or [ sys.maxint ] + return nums or [sys.maxint] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) - f = os.popen('/sbin/ldconfig -r 2>/dev/null') + + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + proc = subprocess.Popen(('/sbin/ldconfig', '-r'), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + data = b'' + else: + [data, _] = proc.communicate() + res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) @@ -181,16 +193,32 @@ if not os.path.exists('/usr/bin/crle'): return None + env = dict(os.environ) + env['LC_ALL'] = 'C' + if is64: - cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null' + args = ('/usr/bin/crle', '-64') else: - cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null' + args = ('/usr/bin/crle',) paths = None - for line in os.popen(cmd).readlines(): - line = line.strip() - if line.startswith('Default Library Path (ELF):'): - paths = line.split()[4] + null = open(os.devnull, 'wb') + try: + with null: + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=null, + env=env) + except OSError: # E.g. bad executable + return None + try: + for line in proc.stdout: + line = line.strip() + if line.startswith(b'Default Library Path (ELF):'): + paths = line.split()[4] + finally: + proc.stdout.close() + proc.wait() if not paths: return None @@ -224,11 +252,20 @@ # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) - f = os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null') + + env = dict(os.environ) + env['LC_ALL'] = 'C' + env['LANG'] = 'C' + null = open(os.devnull, 'wb') try: - data = f.read() - finally: - f.close() + with null: + p = subprocess.Popen(['/sbin/ldconfig', '-p'], + stderr=null, + stdout=subprocess.PIPE, + env=env) + except OSError: # E.g. command not found + return None + [data, _] = p.communicate() res = re.search(expr, data) if not res: return None diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py --- a/lib-python/2.7/curses/ascii.py +++ b/lib-python/2.7/curses/ascii.py @@ -54,13 +54,13 @@ def isalnum(c): return isalpha(c) or isdigit(c) def isalpha(c): return isupper(c) or islower(c) def isascii(c): return _ctoi(c) <= 127 # ? -def isblank(c): return _ctoi(c) in (8,32) -def iscntrl(c): return _ctoi(c) <= 31 +def isblank(c): return _ctoi(c) in (9, 32) +def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127 def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57 def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126 def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122 def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126 -def ispunct(c): return _ctoi(c) != 32 and not isalnum(c) +def ispunct(c): return isgraph(c) and not isalnum(c) def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32) def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90 def isxdigit(c): return isdigit(c) or \ diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py --- a/lib-python/2.7/decimal.py +++ b/lib-python/2.7/decimal.py @@ -1048,12 +1048,11 @@ return sign + intpart + fracpart + exp def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context) @@ -5339,9 +5338,29 @@ return r def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. + """Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -166,6 +166,7 @@ self.include_dirs.append(plat_py_include) self.ensure_string_list('libraries') + self.ensure_string_list('link_objects') # Life is easier if we're not forever checking for None, so # simplify these options to empty lists if unset diff --git a/lib-python/2.7/distutils/config.py b/lib-python/2.7/distutils/config.py --- a/lib-python/2.7/distutils/config.py +++ b/lib-python/2.7/distutils/config.py @@ -21,7 +21,7 @@ class PyPIRCCommand(Command): """Base command that knows how to handle the .pypirc file """ - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi' + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' DEFAULT_REALM = 'pypi' repository = None realm = None diff --git a/lib-python/2.7/distutils/cygwinccompiler.py b/lib-python/2.7/distutils/cygwinccompiler.py --- a/lib-python/2.7/distutils/cygwinccompiler.py +++ b/lib-python/2.7/distutils/cygwinccompiler.py @@ -350,7 +350,7 @@ # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by -# default, we should at least warn the user if he is using a unmodified +# default, we should at least warn the user if he is using an unmodified # version. CONFIG_H_OK = "ok" diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py --- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py +++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py @@ -8,6 +8,11 @@ from test.test_support import run_unittest +try: + import zlib +except ImportError: + zlib = None + from distutils.core import Distribution from distutils.command.bdist_rpm import bdist_rpm from distutils.tests import support @@ -44,6 +49,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') @unittest.skipIf(find_executable('rpmbuild') is None, @@ -86,6 +92,7 @@ # spurious sdtout/stderr output under Mac OS X @unittest.skipUnless(sys.platform.startswith('linux'), 'spurious sdtout/stderr output under Mac OS X') + @unittest.skipUnless(zlib, "requires zlib") # http://bugs.python.org/issue1533164 @unittest.skipIf(find_executable('rpm') is None, 'the rpm command is not found') diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py --- a/lib-python/2.7/distutils/tests/test_build_ext.py +++ b/lib-python/2.7/distutils/tests/test_build_ext.py @@ -168,6 +168,13 @@ cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) + # make sure cmd.link_objects is turned into a list + # if it's a string + cmd = build_ext(dist) + cmd.link_objects = 'one two,three' + cmd.finalize_options() + self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) + # XXX more tests to perform for win32 # make sure define is turned into 2-tuples @@ -215,7 +222,7 @@ self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' - # must be a ary (build info) + # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) diff --git a/lib-python/2.7/distutils/tests/test_config.py b/lib-python/2.7/distutils/tests/test_config.py --- a/lib-python/2.7/distutils/tests/test_config.py +++ b/lib-python/2.7/distutils/tests/test_config.py @@ -89,7 +89,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) @@ -99,7 +99,7 @@ config = config.items() config.sort() waited = [('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi'), + ('repository', 'https://upload.pypi.org/legacy/'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) diff --git a/lib-python/2.7/distutils/tests/test_msvc9compiler.py b/lib-python/2.7/distutils/tests/test_msvc9compiler.py --- a/lib-python/2.7/distutils/tests/test_msvc9compiler.py +++ b/lib-python/2.7/distutils/tests/test_msvc9compiler.py @@ -125,7 +125,7 @@ self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx') # looking for values that should exist on all - # windows registeries versions. + # windows registry versions. path = r'Control Panel\Desktop' v = Reg.get_value(path, u'dragfullwindows') self.assertIn(v, (u'0', u'1', u'2')) diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py --- a/lib-python/2.7/distutils/tests/test_upload.py +++ b/lib-python/2.7/distutils/tests/test_upload.py @@ -82,7 +82,7 @@ cmd.finalize_options() for attr, waited in (('username', 'me'), ('password', 'secret'), ('realm', 'pypi'), - ('repository', 'https://pypi.python.org/pypi')): + ('repository', 'https://upload.pypi.org/legacy/')): self.assertEqual(getattr(cmd, attr), waited) def test_saved_password(self): @@ -123,7 +123,7 @@ self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), - 'https://pypi.python.org/pypi') + 'https://upload.pypi.org/legacy/') self.assertIn('xxx', self.last_open.req.data) auth = self.last_open.req.headers['Authorization'] self.assertNotIn('\n', auth) diff --git a/lib-python/2.7/distutils/unixccompiler.py b/lib-python/2.7/distutils/unixccompiler.py --- a/lib-python/2.7/distutils/unixccompiler.py +++ b/lib-python/2.7/distutils/unixccompiler.py @@ -245,6 +245,8 @@ if sys.platform[:6] == "darwin": # MacOSX's linker doesn't understand the -R flag at all return "-L" + dir + elif sys.platform[:7] == "freebsd": + return "-Wl,-rpath=" + dir elif sys.platform[:5] == "hp-ux": if self._is_gcc(compiler): return ["-Wl,+s", "-L" + dir] diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py --- a/lib-python/2.7/doctest.py +++ b/lib-python/2.7/doctest.py @@ -219,7 +219,7 @@ with open(filename, 'U') as f: return f.read(), filename -# Use sys.stdout encoding for ouput. +# Use sys.stdout encoding for output. _encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): diff --git a/lib-python/2.7/dumbdbm.py b/lib-python/2.7/dumbdbm.py --- a/lib-python/2.7/dumbdbm.py +++ b/lib-python/2.7/dumbdbm.py @@ -45,8 +45,9 @@ _os = _os # for _commit() _open = _open # for _commit() - def __init__(self, filebasename, mode): + def __init__(self, filebasename, mode, flag='c'): self._mode = mode + self._readonly = (flag == 'r') # The directory file is a text file. Each line looks like # "%r, (%d, %d)\n" % (key, pos, siz) @@ -81,8 +82,9 @@ try: f = _open(self._dirfile) except IOError: - pass + self._modified = not self._readonly else: + self._modified = False with f: for line in f: line = line.rstrip() @@ -96,7 +98,7 @@ # CAUTION: It's vital that _commit() succeed, and _commit() can # be called from __del__(). Therefore we must never reference a # global in this routine. - if self._index is None: + if self._index is None or not self._modified: return # nothing to do try: @@ -159,6 +161,7 @@ def __setitem__(self, key, val): if not type(key) == type('') == type(val): raise TypeError, "keys and values must be strings" + self._modified = True if key not in self._index: self._addkey(key, self._addval(val)) else: @@ -184,6 +187,7 @@ # (so that _commit() never gets called). def __delitem__(self, key): + self._modified = True # The blocks used by the associated value are lost. del self._index[key] # XXX It's unclear why we do a _commit() here (the code always @@ -246,4 +250,4 @@ # Turn off any bits that are set in the umask mode = mode & (~um) - return _Database(file, mode) + return _Database(file, mode, flag) diff --git a/lib-python/2.7/email/base64mime.py b/lib-python/2.7/email/base64mime.py --- a/lib-python/2.7/email/base64mime.py +++ b/lib-python/2.7/email/base64mime.py @@ -166,7 +166,7 @@ decoding a text attachment. This function does not parse a full MIME header value encoded with - base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high level email.header class for that functionality. """ if not s: diff --git a/lib-python/2.7/email/quoprimime.py b/lib-python/2.7/email/quoprimime.py --- a/lib-python/2.7/email/quoprimime.py +++ b/lib-python/2.7/email/quoprimime.py @@ -329,7 +329,7 @@ """Decode a string encoded with RFC 2045 MIME header `Q' encoding. This function does not parse a full MIME header value encoded with - quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use the high level email.header class for that functionality. """ s = s.replace('_', ' ') diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -561,12 +561,12 @@ # Issue 5871: reject an attempt to embed a header inside a header value # (header injection attack). - def test_embeded_header_via_Header_rejected(self): + def test_embedded_header_via_Header_rejected(self): msg = Message() msg['Dummy'] = Header('dummy\nX-Injected-Header: test') self.assertRaises(Errors.HeaderParseError, msg.as_string) - def test_embeded_header_via_string_rejected(self): + def test_embedded_header_via_string_rejected(self): msg = Message() msg['Dummy'] = 'dummy\nX-Injected-Header: test' self.assertRaises(Errors.HeaderParseError, msg.as_string) @@ -1673,9 +1673,9 @@ def test_rfc2047_Q_invalid_digits(self): # issue 10004. - s = '=?iso-8659-1?Q?andr=e9=zz?=' + s = '=?iso-8859-1?Q?andr=e9=zz?=' self.assertEqual(decode_header(s), - [(b'andr\xe9=zz', 'iso-8659-1')]) + [(b'andr\xe9=zz', 'iso-8859-1')]) # Test the MIMEMessage class diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,23 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "20.10.1" +_SETUPTOOLS_VERSION = "28.8.0" -_PIP_VERSION = "8.1.1" - -# pip currently requires ssl support, so we try to provide a nicer -# error message when that is missing (http://bugs.python.org/issue19744) -_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION)) -try: - import ssl -except ImportError: - ssl = None - - def _require_ssl_for_pip(): - raise RuntimeError(_MISSING_SSL_MESSAGE) -else: - def _require_ssl_for_pip(): - pass +_PIP_VERSION = "9.0.1" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), @@ -77,7 +63,6 @@ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") - _require_ssl_for_pip() _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the @@ -143,7 +128,6 @@ print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return - _require_ssl_for_pip() _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command @@ -155,11 +139,6 @@ def _main(argv=None): - if ssl is None: - print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE), - file=sys.stderr) - return - import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( diff --git a/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-8.1.1-py2.py3-none-any.whl deleted file mode 100644 index 8632eb7af04c6337f0442a878ecb99cd2b1a67e0..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-9.0.1-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4b8ecc69db7e37fc6dd7b6dd8f690508f42866a1 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-20.10.1-py2.py3-none-any.whl deleted file mode 100644 index 9d1319a24aba103fe956ef6298e3649efacc0b93..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-28.8.0-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..502e3cb418c154872ad6e677ef8b63557b38ec35 GIT binary patch [cut] diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py --- a/lib-python/2.7/ftplib.py +++ b/lib-python/2.7/ftplib.py @@ -264,7 +264,7 @@ return self.voidcmd(cmd) def sendeprt(self, host, port): - '''Send a EPRT command with the current host and the given port number.''' + '''Send an EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 @@ -842,7 +842,7 @@ def parse229(resp, peer): - '''Parse the '229' response for a EPSV request. + '''Parse the '229' response for an EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' diff --git a/lib-python/2.7/gettext.py b/lib-python/2.7/gettext.py --- a/lib-python/2.7/gettext.py +++ b/lib-python/2.7/gettext.py @@ -59,74 +59,147 @@ _default_localedir = os.path.join(sys.prefix, 'share', 'locale') +# Expression parsing for plural form selection. +# +# The gettext library supports a small subset of C syntax. The only +# incompatible difference is that integer literals starting with zero are +# decimal. +# +# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms +# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y -def test(condition, true, false): - """ - Implements the C expression: +_token_pattern = re.compile(r""" + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + """, re.VERBOSE|re.DOTALL) - condition ? true : false +def _tokenize(plural): + for mo in re.finditer(_token_pattern, plural): + kind = mo.lastgroup + if kind == 'WHITESPACES': + continue + value = mo.group(kind) + if kind == 'INVALID': + raise ValueError('invalid token in plural form: %s' % value) + yield value + yield '' - Required to correctly interpret plural forms. - """ - if condition: - return true +def _error(value): + if value: + return ValueError('unexpected token in plural form: %s' % value) else: - return false + return ValueError('unexpected end of plural form') +_binary_ops = ( + ('||',), + ('&&',), + ('==', '!='), + ('<', '>', '<=', '>='), + ('+', '-'), + ('*', '/', '%'), +) +_binary_ops = {op: i for i, ops in enumerate(_binary_ops, 1) for op in ops} +_c2py_ops = {'||': 'or', '&&': 'and', '/': '//'} + +def _parse(tokens, priority=-1): + result = '' + nexttok = next(tokens) + while nexttok == '!': + result += 'not ' + nexttok = next(tokens) + + if nexttok == '(': + sub, nexttok = _parse(tokens) + result = '%s(%s)' % (result, sub) + if nexttok != ')': + raise ValueError('unbalanced parenthesis in plural form') + elif nexttok == 'n': + result = '%s%s' % (result, nexttok) + else: + try: + value = int(nexttok, 10) + except ValueError: + raise _error(nexttok) + result = '%s%d' % (result, value) + nexttok = next(tokens) + + j = 100 + while nexttok in _binary_ops: + i = _binary_ops[nexttok] + if i < priority: + break + # Break chained comparisons + if i in (3, 4) and j in (3, 4): # '==', '!=', '<', '>', '<=', '>=' + result = '(%s)' % result + # Replace some C operators by their Python equivalents + op = _c2py_ops.get(nexttok, nexttok) + right, nexttok = _parse(tokens, i + 1) + result = '%s %s %s' % (result, op, right) + j = i + if j == priority == 4: # '<', '>', '<=', '>=' + result = '(%s)' % result + + if nexttok == '?' and priority <= 0: + if_true, nexttok = _parse(tokens, 0) + if nexttok != ':': + raise _error(nexttok) + if_false, nexttok = _parse(tokens) + result = '%s if %s else %s' % (if_true, result, if_false) + if priority == 0: + result = '(%s)' % result + + return result, nexttok + +def _as_int(n): + try: + i = round(n) + except TypeError: + raise TypeError('Plural value must be an integer, got %s' % + (n.__class__.__name__,)) + return n def c2py(plural): """Gets a C expression as used in PO files for plural forms and returns a - Python lambda function that implements an equivalent expression. + Python function that implements an equivalent expression. """ - # Security check, allow only the "n" identifier + + if len(plural) > 1000: + raise ValueError('plural form expression is too long') try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - import token, tokenize - tokens = tokenize.generate_tokens(StringIO(plural).readline) - try: - danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n'] - except tokenize.TokenError: - raise ValueError, \ - 'plural forms expression error, maybe unbalanced parenthesis' - else: - if danger: - raise ValueError, 'plural forms expression could be dangerous' + result, nexttok = _parse(_tokenize(plural)) + if nexttok: + raise _error(nexttok) - # Replace some C operators by their Python equivalents - plural = plural.replace('&&', ' and ') - plural = plural.replace('||', ' or ') + depth = 0 + for c in result: + if c == '(': + depth += 1 + if depth > 20: + # Python compiler limit is about 90. + # The most complex example has 2. + raise ValueError('plural form expression is too complex') + elif c == ')': + depth -= 1 - expr = re.compile(r'\!([^=])') - plural = expr.sub(' not \\1', plural) - - # Regular expression and replacement function used to transform - # "a?b:c" to "test(a,b,c)". - expr = re.compile(r'(.*?)\?(.*?):(.*)') - def repl(x): - return "test(%s, %s, %s)" % (x.group(1), x.group(2), - expr.sub(repl, x.group(3))) - - # Code to transform the plural expression, taking care of parentheses - stack = [''] - for c in plural: - if c == '(': - stack.append('') - elif c == ')': - if len(stack) == 1: - # Actually, we never reach this code, because unbalanced - # parentheses get caught in the security check at the - # beginning. - raise ValueError, 'unbalanced parenthesis in plural form' - s = expr.sub(repl, stack.pop()) - stack[-1] += '(%s)' % s - else: - stack[-1] += c - plural = expr.sub(repl, stack.pop()) - - return eval('lambda n: int(%s)' % plural) - + ns = {'_as_int': _as_int} + exec('''if 1: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + ''' % result, ns) + return ns['func'] + except RuntimeError: + # Recursion error can be raised in _parse() or exec(). + raise ValueError('plural form expression is too complex') def _expand_lang(locale): diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -242,7 +242,7 @@ # # VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1 -# the patterns for both name and value are more leniant than RFC +# the patterns for both name and value are more lenient than RFC # definitions to allow for backwards compatibility _is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match _is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search @@ -273,9 +273,8 @@ Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. + included in the returned list. If an invalid line is found in the + header section, it is skipped, and further lines are processed. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a @@ -302,19 +301,17 @@ self.status = '' headerseen = "" firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: + tell = None + if not hasattr(self.fp, 'unread') and self.seekable: tell = self.fp.tell while True: if len(hlist) > _MAXHEADERS: raise HTTPException("got more than %d headers" % _MAXHEADERS) if tell: try: - startofline = tell() + tell() except IOError: - startofline = tell = None + tell = None self.seekable = 0 line = self.fp.readline(_MAXLINE + 1) if len(line) > _MAXLINE: @@ -345,26 +342,14 @@ # It's a legal header line, save it. hlist.append(line) self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue elif headerseen is not None: # An empty header name. These aren't allowed in HTTP, but it's # probably a benign mistake. Don't add the header, just keep # going. - continue + pass else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break + # It's not a header line; skip it and try the next line. + self.status = 'Non-header line where header expected' class HTTPResponse: diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -67,6 +67,8 @@ ('shell', [ ('_View Last Restart', '<>'), ('_Restart Shell', '<>'), + None, + ('_Interrupt Execution', '<>'), ]), ('debug', [ ('_Go to File/Line', '<>'), diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -9,7 +9,7 @@ HIDE_SEQUENCES = ("", "") CHECKHIDE_VIRTUAL_EVENT_NAME = "<>" CHECKHIDE_SEQUENCES = ("", "") -CHECKHIDE_TIME = 100 # miliseconds +CHECKHIDE_TIME = 100 # milliseconds MARK_RIGHT = "calltipwindowregion_right" diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -1384,7 +1384,7 @@ text.see("insert") text.undo_block_stop() - # Our editwin provides a is_char_in_string function that works + # Our editwin provides an is_char_in_string function that works # with a Tk text index, but PyParse only knows about offsets into # a string. This builds a function for PyParse that accepts an # offset. diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -13,6 +13,7 @@ import sys import tempfile +from Tkinter import * import tkFileDialog import tkMessageBox from SimpleDialog import SimpleDialog @@ -91,6 +92,7 @@ # l2['state'] = DISABLED l2.pack(side=TOP, anchor = W, fill=X) l3 = Label(top, text="to your file\n" + "See Language Reference, 2.1.4 Encoding declarations.\n" "Choose OK to save this file as %s\n" "Edit your general options to silence this warning" % enc) l3.pack(side=TOP, anchor = W) diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,6 +1,41 @@ +What's New in IDLE 2.7.13? +========================== +*Release date: 2017-01-01?* + +- Issue #27854: Make Help => IDLE Help work again on Windows. + Include idlelib/help.html in 2.7 Windows installer. + +- Issue #25507: Add back import needed for 2.x encoding warning box. + Add pointer to 'Encoding declaration' in Language Reference. + +- Issue #15308: Add 'interrupt execution' (^C) to Shell menu. + Patch by Roger Serwy, updated by Bayard Randel. + +- Issue #27922: Stop IDLE tests from 'flashing' gui widgets on the screen. + +- Issue #17642: add larger font sizes for classroom projection. + +- Add version to title of IDLE help window. + +- Issue #25564: In section on IDLE -- console differences, mention that + using exec means that __builtins__ is defined for each statement. + +- Issue #27714: text_textview and test_autocomplete now pass when re-run + in the same process. This occurs when test_idle fails when run with the + -w option but without -jn. Fix warning from test_config. + +- Issue #27452: add line counter and crc to IDLE configHandler test dump. + +- Issue #27365: Allow non-ascii chars in IDLE NEWS.txt, for contributor names. + +- Issue #27245: IDLE: Cleanly delete custom themes and key bindings. + Previously, when IDLE was started from a console or by import, a cascade + of warnings was emitted. Patch by Serhiy Storchaka. + + What's New in IDLE 2.7.12? ========================== -*Release date: 2015-06-30?* +*Release date: 2015-06-25* - Issue #5124: Paste with text selected now replaces the selection on X11. This matches how paste works on Windows, Mac, most modern Linux apps, @@ -174,7 +209,7 @@ Changes are written to HOME/.idlerc/config-extensions.cfg. Original patch by Tal Einat. -- Issue #16233: A module browser (File : Class Browser, Alt+C) requires a +- Issue #16233: A module browser (File : Class Browser, Alt+C) requires an editor window with a filename. When Class Browser is requested otherwise, from a shell, output window, or 'Untitled' editor, Idle no longer displays an error box. It now pops up an Open Module box (Alt+M). If a valid name diff --git a/lib-python/2.7/idlelib/ParenMatch.py b/lib-python/2.7/idlelib/ParenMatch.py --- a/lib-python/2.7/idlelib/ParenMatch.py +++ b/lib-python/2.7/idlelib/ParenMatch.py @@ -9,7 +9,7 @@ from idlelib.configHandler import idleConf _openers = {')':'(',']':'[','}':'{'} -CHECK_DELAY = 100 # miliseconds +CHECK_DELAY = 100 # milliseconds class ParenMatch: """Highlight matching parentheses diff --git a/lib-python/2.7/idlelib/README.txt b/lib-python/2.7/idlelib/README.txt --- a/lib-python/2.7/idlelib/README.txt +++ b/lib-python/2.7/idlelib/README.txt @@ -161,14 +161,15 @@ Show surrounding parens # ParenMatch (& Hyperparser) Shell # PyShell - View Last Restart # PyShell.? - Restart Shell # PyShell.? + View Last Restart # PyShell.PyShell.view_restart_mark + Restart Shell # PyShell.PyShell.restart_shell + Interrupt Execution # pyshell.PyShell.cancel_callback Debug (Shell only) Go to File/Line - Debugger # Debugger, RemoteDebugger - Stack Viewer # StackViewer - Auto-open Stack Viewer # StackViewer + Debugger # Debugger, RemoteDebugger, PyShell.toggle_debuger + Stack Viewer # StackViewer, PyShell.open_stack_viewer + Auto-open Stack Viewer # StackViewer Format (Editor only) Indent Region diff --git a/lib-python/2.7/idlelib/ReplaceDialog.py b/lib-python/2.7/idlelib/ReplaceDialog.py --- a/lib-python/2.7/idlelib/ReplaceDialog.py +++ b/lib-python/2.7/idlelib/ReplaceDialog.py @@ -59,7 +59,7 @@ def default_command(self, event=None): if self.do_find(self.ok): if self.do_replace(): # Only find next match if replace succeeded. - # A bad re can cause a it to fail. + # A bad re can cause it to fail. self.do_find(0) def _replace_expand(self, m, repl): diff --git a/lib-python/2.7/idlelib/SearchEngine.py b/lib-python/2.7/idlelib/SearchEngine.py --- a/lib-python/2.7/idlelib/SearchEngine.py +++ b/lib-python/2.7/idlelib/SearchEngine.py @@ -107,7 +107,7 @@ It directly return the result of that call. Text is a text widget. Prog is a precompiled pattern. - The ok parameteris a bit complicated as it has two effects. + The ok parameter is a bit complicated as it has two effects. If there is a selection, the search begin at either end, depending on the direction setting and ok, with ok meaning that diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py --- a/lib-python/2.7/idlelib/configDialog.py +++ b/lib-python/2.7/idlelib/configDialog.py @@ -767,6 +767,7 @@ if not tkMessageBox.askyesno( 'Delete Key Set', delmsg % keySetName, parent=self): return + self.DeactivateCurrentConfig() #remove key set from config idleConf.userCfg['keys'].remove_section(keySetName) if keySetName in self.changedItems['keys']: @@ -785,7 +786,8 @@ self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys', 'default')) self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetKeysType() def DeleteCustomTheme(self): @@ -794,6 +796,7 @@ if not tkMessageBox.askyesno( 'Delete Theme', delmsg % themeName, parent=self): return + self.DeactivateCurrentConfig() #remove theme from config idleConf.userCfg['highlight'].remove_section(themeName) if themeName in self.changedItems['highlight']: @@ -812,7 +815,8 @@ self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme', 'default')) self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme', 'name')) #user can't back out of these changes, they must be applied now - self.Apply() + self.SaveAllChangedConfigs() + self.ActivateConfigChanges() self.SetThemeType() def GetColour(self): @@ -1008,7 +1012,8 @@ pass ##font size dropdown self.optMenuFontSize.SetMenu(('7', '8', '9', '10', '11', '12', '13', - '14', '16', '18', '20', '22'), fontSize ) + '14', '16', '18', '20', '22', + '25', '29', '34', '40'), fontSize ) ##fontWeight self.fontBold.set(fontBold) ##font sample diff --git a/lib-python/2.7/idlelib/configHandler.py b/lib-python/2.7/idlelib/configHandler.py --- a/lib-python/2.7/idlelib/configHandler.py +++ b/lib-python/2.7/idlelib/configHandler.py @@ -741,21 +741,32 @@ idleConf = IdleConf() # TODO Revise test output, write expanded unittest -### module test +# if __name__ == '__main__': + from zlib import crc32 + line, crc = 0, 0 + + def sprint(obj): + global line, crc + txt = str(obj) + line += 1 + crc = crc32(txt.encode(encoding='utf-8'), crc) + print(txt) + #print('***', line, crc, '***') # uncomment for diagnosis + def dumpCfg(cfg): - print('\n', cfg, '\n') - for key in cfg: + print('\n', cfg, '\n') # has variable '0xnnnnnnnn' addresses + for key in sorted(cfg.keys()): sections = cfg[key].sections() - print(key) - print(sections) + sprint(key) + sprint(sections) for section in sections: options = cfg[key].options(section) - print(section) - print(options) + sprint(section) + sprint(options) for option in options: - print(option, '=', cfg[key].Get(section, option)) + sprint(option + ' = ' + cfg[key].Get(section, option)) + dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print(idleConf.userCfg['main'].Get('Theme', 'name')) - #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) + print('\nlines = ', line, ', crc = ', crc, sep='') diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html --- a/lib-python/2.7/idlelib/help.html +++ b/lib-python/2.7/idlelib/help.html @@ -6,7 +6,7 @@ - 24.6. IDLE — Python 2.7.11 documentation + 24.6. IDLE — Python 2.7.12 documentation @@ -14,7 +14,7 @@ - + @@ -60,7 +60,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -238,6 +238,8 @@
    Scroll the shell window to the last Shell restart.
    Restart Shell
    Restart the shell to clean the environment.
    +
    Interrupt Execution
    +
    Stop a running program.
    @@ -490,12 +492,12 @@ functions to be used from IDLE’s Python shell.

    24.6.3.1. Command line usage

    -
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
    +
    idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
     
     -c command  run command in the shell window
     -d          enable debugger and open shell window
     -e          open editor window
    --h          print help message with legal combinatios and exit
    +-h          print help message with legal combinations and exit
     -i          open shell window
     -r file     run file in shell window
     -s          run $IDLESTARTUP or $PYTHONSTARTUP first, in shell window
    @@ -527,7 +529,9 @@
     IDLE’s changes are lost and things like input, raw_input, and
     print will not work correctly.

    With IDLE’s Shell, one enters, edits, and recalls complete statements. -Some consoles only work with a single physical line at a time.

    +Some consoles only work with a single physical line at a time. IDLE uses +exec to run each statement. As a result, '__builtins__' is always +defined for each statement.

    24.6.3.3. Running without a subprocess

    @@ -688,7 +692,7 @@ style="vertical-align: middle; margin-top: -1px"/>
  • Python »
  • - Python 2.7.11 documentation » + Python 2.7.12 documentation »
  • @@ -701,10 +705,10 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on May 02, 2016. + Last updated on Sep 12, 2016. Found a bug?
    - Created using Sphinx 1.3.3. + Created using Sphinx 1.3.6.
    diff --git a/lib-python/2.7/idlelib/help.py b/lib-python/2.7/idlelib/help.py --- a/lib-python/2.7/idlelib/help.py +++ b/lib-python/2.7/idlelib/help.py @@ -26,6 +26,7 @@ """ from HTMLParser import HTMLParser from os.path import abspath, dirname, isdir, isfile, join +from platform import python_version from Tkinter import Tk, Toplevel, Frame, Text, Scrollbar, Menu, Menubutton import tkFont as tkfont from idlelib.configHandler import idleConf @@ -150,7 +151,8 @@ self.text.insert('end', d, (self.tags, self.chartags)) def handle_charref(self, name): - self.text.insert('end', unichr(int(name))) + if self.show: + self.text.insert('end', unichr(int(name))) class HelpText(Text): @@ -268,7 +270,7 @@ if not isfile(filename): # try copy_strip, present message return - HelpWindow(parent, filename, 'IDLE Help') + HelpWindow(parent, filename, 'IDLE Help (%s)' % python_version()) if __name__ == '__main__': from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/idle.py b/lib-python/2.7/idlelib/idle.py --- a/lib-python/2.7/idlelib/idle.py +++ b/lib-python/2.7/idlelib/idle.py @@ -1,11 +1,13 @@ import os.path import sys -# If we are working on a development version of IDLE, we need to prepend the -# parent of this idlelib dir to sys.path. Otherwise, importing idlelib gets -# the version installed with the Python used to call this module: +# Enable running IDLE with idlelib in a non-standard location. +# This was once used to run development versions of IDLE. +# Because PEP 434 declared idle.py a public interface, +# removal should require deprecation. idlelib_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.insert(0, idlelib_dir) +if idlelib_dir not in sys.path: + sys.path.insert(0, idlelib_dir) -import idlelib.PyShell -idlelib.PyShell.main() +from idlelib.PyShell import main # This is subject to change +main() diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py --- a/lib-python/2.7/idlelib/idle_test/mock_tk.py +++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py @@ -1,6 +1,6 @@ """Classes that replace tkinter gui objects used by an object being tested. -A gui object is anything with a master or parent paramenter, which is +A gui object is anything with a master or parent parameter, which is typically required in spite of what the doc strings say. """ diff --git a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py --- a/lib-python/2.7/idlelib/idle_test/test_autocomplete.py +++ b/lib-python/2.7/idlelib/idle_test/test_autocomplete.py @@ -4,7 +4,6 @@ import idlelib.AutoComplete as ac import idlelib.AutoCompleteWindow as acw -import idlelib.macosxSupport as mac from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Event @@ -27,7 +26,6 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - mac.setupApp(cls.root, None) cls.text = Text(cls.root) cls.editor = DummyEditwin(cls.root, cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_configdialog.py b/lib-python/2.7/idlelib/idle_test/test_configdialog.py --- a/lib-python/2.7/idlelib/idle_test/test_configdialog.py +++ b/lib-python/2.7/idlelib/idle_test/test_configdialog.py @@ -16,6 +16,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() _initializeTkVariantTests(cls.root) @classmethod diff --git a/lib-python/2.7/idlelib/idle_test/test_editmenu.py b/lib-python/2.7/idlelib/idle_test/test_editmenu.py --- a/lib-python/2.7/idlelib/idle_test/test_editmenu.py +++ b/lib-python/2.7/idlelib/idle_test/test_editmenu.py @@ -7,15 +7,18 @@ import unittest from idlelib import PyShell + class PasteTest(unittest.TestCase): '''Test pasting into widgets that allow pasting. On X11, replacing selections requires tk fix. ''' + @classmethod def setUpClass(cls): requires('gui') cls.root = root = tk.Tk() + root.withdraw() PyShell.fix_x11_paste(root) cls.text = tk.Text(root) cls.entry = tk.Entry(root) diff --git a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py --- a/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py +++ b/lib-python/2.7/idlelib/idle_test/test_formatparagraph.py @@ -159,7 +159,7 @@ class ReformatFunctionTest(unittest.TestCase): """Test the reformat_paragraph function without the editor window.""" - def test_reformat_paragrah(self): + def test_reformat_paragraph(self): Equal = self.assertEqual reform = fp.reformat_paragraph hw = "O hello world" diff --git a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py --- a/lib-python/2.7/idlelib/idle_test/test_hyperparser.py +++ b/lib-python/2.7/idlelib/idle_test/test_hyperparser.py @@ -36,6 +36,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) cls.editwin = DummyEditwin(cls.text) diff --git a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py --- a/lib-python/2.7/idlelib/idle_test/test_idlehistory.py +++ b/lib-python/2.7/idlelib/idle_test/test_idlehistory.py @@ -68,6 +68,7 @@ def setUpClass(cls): requires('gui') cls.root = tk.Tk() + cls.root.withdraw() def setUp(self): self.text = text = TextWrapper(self.root) diff --git a/lib-python/2.7/idlelib/idle_test/test_textview.py b/lib-python/2.7/idlelib/idle_test/test_textview.py --- a/lib-python/2.7/idlelib/idle_test/test_textview.py +++ b/lib-python/2.7/idlelib/idle_test/test_textview.py @@ -8,7 +8,11 @@ from idlelib.idle_test.mock_idle import Func from idlelib.idle_test.mock_tk import Mbox -orig_mbox = tv.tkMessageBox + +class TV(tv.TextViewer): # Use in TextViewTest + transient = Func() + grab_set = Func() + wait_window = Func() class textviewClassTest(unittest.TestCase): @@ -16,26 +20,19 @@ def setUpClass(cls): requires('gui') cls.root = Tk() - cls.TV = TV = tv.TextViewer - TV.transient = Func() - TV.grab_set = Func() - TV.wait_window = Func() + cls.root.withdraw() @classmethod def tearDownClass(cls): - del cls.TV cls.root.destroy() del cls.root def setUp(self): - TV = self.TV TV.transient.__init__() TV.grab_set.__init__() TV.wait_window.__init__() - def test_init_modal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text') self.assertTrue(TV.transient.called) self.assertTrue(TV.grab_set.called) @@ -43,7 +40,6 @@ view.Ok() def test_init_nonmodal(self): - TV = self.TV view = TV(self.root, 'Title', 'test text', modal=False) self.assertFalse(TV.transient.called) self.assertFalse(TV.grab_set.called) @@ -51,32 +47,36 @@ view.Ok() def test_ok(self): - view = self.TV(self.root, 'Title', 'test text', modal=False) + view = TV(self.root, 'Title', 'test text', modal=False) view.destroy = Func() view.Ok() self.assertTrue(view.destroy.called) - del view.destroy # unmask real function - view.destroy + del view.destroy # Unmask the real function. + view.destroy() -class textviewTest(unittest.TestCase): +class ViewFunctionTest(unittest.TestCase): @classmethod def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() + cls.orig_mbox = tv.tkMessageBox tv.tkMessageBox = Mbox @classmethod def tearDownClass(cls): cls.root.destroy() del cls.root - tv.tkMessageBox = orig_mbox + tv.tkMessageBox = cls.orig_mbox + del cls.orig_mbox def test_view_text(self): - # If modal True, tkinter will error with 'can't invoke "event" command' + # If modal True, get tkinter error 'can't invoke "event" command'. view = tv.view_text(self.root, 'Title', 'test text', modal=False) self.assertIsInstance(view, tv.TextViewer) + view.Ok() def test_view_file(self): test_dir = os.path.dirname(__file__) @@ -86,10 +86,11 @@ self.assertIn('Test', view.textView.get('1.0', '1.end')) view.Ok() - # Mock messagebox will be used and view_file will not return anything + # Mock messagebox will be used; view_file will return None. testfile = os.path.join(test_dir, '../notthere.py') view = tv.view_file(self.root, 'Title', testfile, modal=False) self.assertIsNone(view) + if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py --- a/lib-python/2.7/idlelib/idle_test/test_widgetredir.py +++ b/lib-python/2.7/idlelib/idle_test/test_widgetredir.py @@ -15,6 +15,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod @@ -44,6 +45,7 @@ def setUpClass(cls): requires('gui') cls.root = Tk() + cls.root.withdraw() cls.text = Text(cls.root) @classmethod diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py --- a/lib-python/2.7/inspect.py +++ b/lib-python/2.7/inspect.py @@ -155,9 +155,8 @@ def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. - Generator function objects provides same attributes as functions. - - See help(isfunction) for attributes listing.""" + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) diff --git a/lib-python/2.7/io.py b/lib-python/2.7/io.py --- a/lib-python/2.7/io.py +++ b/lib-python/2.7/io.py @@ -19,7 +19,7 @@ Another IOBase subclass, TextIOBase, deals with the encoding and decoding of streams into text. TextIOWrapper, which extends it, is a buffered text interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO -is a in-memory stream for text. +is an in-memory stream for text. Argument names are not part of the specification, and only the arguments of open() are intended to be used as keyword arguments. diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -138,7 +138,7 @@ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the output are escaped with ``\uXXXX`` sequences, and the result is a ``str`` instance consisting of ASCII characters only. If ``ensure_ascii`` is - ``False``, some chunks written to ``fp`` may be ``unicode`` instances. + false, some chunks written to ``fp`` may be ``unicode`` instances. This usually happens because the input contains unicode strings or the ``encoding`` parameter is used. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter``) this is likely to @@ -169,7 +169,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -234,7 +234,7 @@ ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. - If *sort_keys* is ``True`` (default: ``False``), then the output of + If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the @@ -330,7 +330,7 @@ for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. + following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -35,7 +35,7 @@ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) INFINITY = float('inf') -FLOAT_REPR = repr +FLOAT_REPR = float.__repr__ def raw_encode_basestring(s): """Return a JSON representation of a Python string diff --git a/lib-python/2.7/json/tests/test_decode.py b/lib-python/2.7/json/tests/test_decode.py --- a/lib-python/2.7/json/tests/test_decode.py +++ b/lib-python/2.7/json/tests/test_decode.py @@ -43,7 +43,7 @@ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) - # check that empty objects literals work (see #17368) + # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', diff --git a/lib-python/2.7/json/tests/test_float.py b/lib-python/2.7/json/tests/test_float.py --- a/lib-python/2.7/json/tests/test_float.py +++ b/lib-python/2.7/json/tests/test_float.py @@ -32,6 +32,17 @@ self.assertNotEqual(res[0], res[0]) self.assertRaises(ValueError, self.dumps, [val], allow_nan=False) + def test_float_subclasses_use_float_repr(self): + # Issue 27934. + class PeculiarFloat(float): + def __repr__(self): + return "I'm not valid JSON" + def __str__(self): + return "Neither am I" + + val = PeculiarFloat(3.2) + self.assertEqual(self.loads(self.dumps(val)), val) + class TestPyFloat(TestFloat, PyTest): pass class TestCFloat(TestFloat, CTest): pass diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py --- a/lib-python/2.7/lib-tk/Tix.py +++ b/lib-python/2.7/lib-tk/Tix.py @@ -26,8 +26,10 @@ # appreciate the advantages. # +import os +import Tkinter from Tkinter import * -from Tkinter import _flatten, _cnfmerge, _default_root +from Tkinter import _flatten, _cnfmerge # WARNING - TkVersion is a limited precision floating point number if TkVersion < 3.999: @@ -72,7 +74,6 @@ # BEWARE - this is implemented by copying some code from the Widget class # in Tkinter (to override Widget initialization) and is therefore # liable to break. -import Tkinter, os # Could probably add this to Tkinter.Misc class tixCommand: @@ -476,10 +477,14 @@ (multiple) Display Items""" def __init__(self, itemtype, cnf={}, **kw): - master = _default_root # global from Tkinter - if not master and 'refwindow' in cnf: master=cnf['refwindow'] - elif not master and 'refwindow' in kw: master= kw['refwindow'] - elif not master: raise RuntimeError, "Too early to create display style: no root window" + if 'refwindow' in kw: + master = kw['refwindow'] + elif 'refwindow' in cnf: + master = cnf['refwindow'] + else: + master = Tkinter._default_root + if not master: + raise RuntimeError("Too early to create display style: no root window") self.tk = master.tk self.stylename = self.tk.call('tixDisplayStyle', itemtype, *self._options(cnf,kw) ) @@ -923,7 +928,11 @@ return self.tk.call(self._w, 'header', 'cget', col, opt) def header_exists(self, col): - return self.tk.call(self._w, 'header', 'exists', col) + # A workaround to Tix library bug (issue #25464). + # The documented command is "exists", but only erroneous "exist" is + # accepted. + return self.tk.getboolean(self.tk.call(self._w, 'header', 'exist', col)) + header_exist = header_exists def header_delete(self, col): self.tk.call(self._w, 'header', 'delete', col) diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py --- a/lib-python/2.7/lib-tk/Tkinter.py From pypy.commits at gmail.com Mon Dec 26 13:32:42 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:42 -0800 (PST) Subject: [pypy-commit] pypy issue2444: move code Message-ID: <5861624a.46bb1c0a.5b512.4862@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89234:8ef3af61fe40 Date: 2016-12-25 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/8ef3af61fe40/ Log: move code diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -38,6 +38,30 @@ view.c_obj = make_ref(space, w_obj) return ret + at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) +def PyMemoryView_GET_BUFFER(space, w_obj): + """Return a pointer to the buffer-info structure wrapped by the given + object. The object must be a memoryview instance; this macro doesn't + check its type, you must do it yourself or you will risk crashes.""" + view = lltype.malloc(Py_buffer, flavor='raw', zero=True) + if not isinstance(w_obj, W_MemoryView): + return view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + fill_Py_buffer(space, w_obj.buf, view) + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + view.c_obj = make_ref(space, w_obj) + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) + return view + def fill_Py_buffer(space, buf, view): # c_buf, c_obj have been filled in ndim = buf.getndim() @@ -149,29 +173,3 @@ # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BASE') - at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) -def PyMemoryView_GET_BUFFER(space, w_obj): - """Return a pointer to the buffer-info structure wrapped by the given - object. The object must be a memoryview instance; this macro doesn't - check its type, you must do it yourself or you will risk crashes.""" - view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - if not isinstance(w_obj, W_MemoryView): - return view - ndim = w_obj.buf.getndim() - if ndim >= Py_MAX_NDIMS: - # XXX warn? - return view - fill_Py_buffer(space, w_obj.buf, view) - try: - view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) - view.c_obj = make_ref(space, w_obj) - rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) - isstr = False - except ValueError: - w_s = w_obj.descr_tobytes(space) - view.c_obj = make_ref(space, w_s) - view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) - rffi.setintfield(view, 'c_readonly', 1) - isstr = True - return view - From pypy.commits at gmail.com Mon Dec 26 13:32:46 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 10:32:46 -0800 (PST) Subject: [pypy-commit] pypy issue2444: remove cyclic reference, w_obj <-> pymemobj, view = pymemobj.c_view Message-ID: <5861624e.6a7ac20a.609ac.d5bd@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89236:b9a117c04b7c Date: 2016-12-26 19:51 +0200 http://bitbucket.org/pypy/pypy/changeset/b9a117c04b7c/ Log: remove cyclic reference, w_obj <-> pymemobj, view = pymemobj.c_view diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -97,7 +97,7 @@ fill_Py_buffer(space, w_obj.buf, view) try: view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) - view.c_obj = make_ref(space, w_obj) + #view.c_obj = make_ref(space, w_obj) # NO - this creates a ref cycle! rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) isstr = False except ValueError: From pypy.commits at gmail.com Mon Dec 26 14:23:24 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 11:23:24 -0800 (PST) Subject: [pypy-commit] pypy issue2444: use _attrs_ instead of __slots__ Message-ID: <58616e2c.96a61c0a.46f2d.7122@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89239:680bcb971f9b Date: 2016-12-26 21:22 +0200 http://bitbucket.org/pypy/pypy/changeset/680bcb971f9b/ Log: use _attrs_ instead of __slots__ diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -6,6 +6,7 @@ class Buffer(object): """Abstract base class for buffers.""" + _attrs_ = ['readonly'] _immutable_ = True def getlength(self): @@ -78,7 +79,7 @@ pass class StringBuffer(Buffer): - __slots__ = ['readonly', 'value'] + _attrs_ = ['readonly', 'value'] _immutable_ = True def __init__(self, value): @@ -109,7 +110,7 @@ class SubBuffer(Buffer): - __slots__ = ['buffer', 'offset', 'size', 'readonly'] + _attrs_ = ['buffer', 'offset', 'size', 'readonly'] _immutable_ = True def __init__(self, buffer, offset, size): From pypy.commits at gmail.com Mon Dec 26 15:30:40 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 26 Dec 2016 12:30:40 -0800 (PST) Subject: [pypy-commit] pypy issue2444: test, fix for issue 2453 Message-ID: <58617df0.07941c0a.61a51.7176@mx.google.com> Author: Matti Picus Branch: issue2444 Changeset: r89240:50865558868e Date: 2016-12-26 22:24 +0200 http://bitbucket.org/pypy/pypy/changeset/50865558868e/ Log: test, fix for issue 2453 diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -76,7 +76,11 @@ try: view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) except ValueError: - raise BufferError("could not create buffer from object") + if not space.isinstance_w(w_obj, space.w_str): + # XXX Python 3? + raise BufferError("could not create buffer from object") + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_obj), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) ret = fill_Py_buffer(space, buf, view) view.c_obj = make_ref(space, w_obj) return ret @@ -99,13 +103,11 @@ view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) #view.c_obj = make_ref(space, w_obj) # NO - this creates a ref cycle! rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) - isstr = False except ValueError: w_s = w_obj.descr_tobytes(space) view.c_obj = make_ref(space, w_s) view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) rffi.setintfield(view, 'c_readonly', 1) - isstr = True return view def fill_Py_buffer(space, buf, view): diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -57,7 +57,7 @@ PyObject* obj = PyTuple_GetItem(args, 0); long ret, vlen; memset(&view, 0, sizeof(Py_buffer)); - ret = PyObject_GetBuffer(obj, &view, PyBUF_FULL); + ret = PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO); if (ret != 0) return NULL; vlen = view.len / view.itemsize; @@ -79,6 +79,8 @@ arr = module.PyMyArray(10) ten = foo.get_len(arr) assert ten == 10 + ten = foo.get_len('1234567890') + assert ten == 10 ten = foo.test_buffer(arr) assert ten == 10 From pypy.commits at gmail.com Tue Dec 27 04:19:48 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 27 Dec 2016 01:19:48 -0800 (PST) Subject: [pypy-commit] pypy default: Remove the old, top-level ctypes_configure directory, which is (as far Message-ID: <58623234.542e1c0a.8fa96.34ea@mx.google.com> Author: Armin Rigo Branch: Changeset: r89241:d009d24b530c Date: 2016-12-27 10:19 +0100 http://bitbucket.org/pypy/pypy/changeset/d009d24b530c/ Log: Remove the old, top-level ctypes_configure directory, which is (as far as I can tell) fully unused for a while now. diff --git a/ctypes_configure/__init__.py b/ctypes_configure/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py deleted file mode 100644 --- a/ctypes_configure/cbuild.py +++ /dev/null @@ -1,456 +0,0 @@ - -import os, sys, inspect, re, imp, py -from ctypes_configure import stdoutcapture -import distutils - -debug = 0 - -configdir = py.path.local.make_numbered_dir(prefix='ctypes_configure-') - -class ExternalCompilationInfo(object): - - _ATTRIBUTES = ['pre_include_lines', 'includes', 'include_dirs', - 'post_include_lines', 'libraries', 'library_dirs', - 'separate_module_sources', 'separate_module_files'] - _AVOID_DUPLICATES = ['separate_module_files', 'libraries', 'includes', - 'include_dirs', 'library_dirs', 'separate_module_sources'] - - def __init__(self, - pre_include_lines = [], - includes = [], - include_dirs = [], - post_include_lines = [], - libraries = [], - library_dirs = [], - separate_module_sources = [], - separate_module_files = []): - """ - pre_include_lines: list of lines that should be put at the top - of the generated .c files, before any #include. They shouldn't - contain an #include themselves. - - includes: list of .h file names to be #include'd from the - generated .c files. - - include_dirs: list of dir names that is passed to the C compiler - - post_include_lines: list of lines that should be put at the top - of the generated .c files, after the #includes. - - libraries: list of library names that is passed to the linker - - library_dirs: list of dir names that is passed to the linker - - separate_module_sources: list of multiline strings that are - each written to a .c file and compiled separately and linked - later on. (If function prototypes are needed for other .c files - to access this, they can be put in post_include_lines.) - - separate_module_files: list of .c file names that are compiled - separately and linked later on. (If an .h file is needed for - other .c files to access this, it can be put in includes.) - """ - for name in self._ATTRIBUTES: - value = locals()[name] - assert isinstance(value, (list, tuple)) - setattr(self, name, tuple(value)) - - def _value(self): - return tuple([getattr(self, x) for x in self._ATTRIBUTES]) - - def __hash__(self): - return hash(self._value()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and \ - self._value() == other._value() - - def __ne__(self, other): - return not self == other - - def __repr__(self): - info = [] - for attr in self._ATTRIBUTES: - val = getattr(self, attr) - info.append("%s=%s" % (attr, repr(val))) - return "" % ", ".join(info) - - def merge(self, *others): - others = list(others) - attrs = {} - for name in self._ATTRIBUTES: - if name not in self._AVOID_DUPLICATES: - s = [] - for i in [self] + others: - s += getattr(i, name) - attrs[name] = s - else: - s = set() - attr = [] - for one in [self] + others: - for elem in getattr(one, name): - if elem not in s: - s.add(elem) - attr.append(elem) - attrs[name] = attr - return ExternalCompilationInfo(**attrs) - - def write_c_header(self, fileobj): - for line in self.pre_include_lines: - print >> fileobj, line - for path in self.includes: - print >> fileobj, '#include <%s>' % (path,) - for line in self.post_include_lines: - print >> fileobj, line - - def _copy_attributes(self): - d = {} - for attr in self._ATTRIBUTES: - d[attr] = getattr(self, attr) - return d - - def convert_sources_to_files(self, cache_dir=None, being_main=False): - if not self.separate_module_sources: - return self - if cache_dir is None: - cache_dir = configdir.join('module_cache').ensure(dir=1) - num = 0 - files = [] - for source in self.separate_module_sources: - while 1: - filename = cache_dir.join('module_%d.c' % num) - num += 1 - if not filename.check(): - break - f = filename.open("w") - if being_main: - f.write("#define PYPY_NOT_MAIN_FILE\n") - self.write_c_header(f) - source = str(source) - f.write(source) - if not source.endswith('\n'): - f.write('\n') - f.close() - files.append(str(filename)) - d = self._copy_attributes() - d['separate_module_sources'] = () - d['separate_module_files'] += tuple(files) - return ExternalCompilationInfo(**d) - - def compile_shared_lib(self): - self = self.convert_sources_to_files() - if not self.separate_module_files: - return self - lib = compile_c_module([], 'externmod', self) - d = self._copy_attributes() - d['libraries'] += (lib,) - d['separate_module_files'] = () - d['separate_module_sources'] = () - return ExternalCompilationInfo(**d) - -if sys.platform == 'win32': - so_ext = '.dll' -else: - so_ext = '.so' - -def compiler_command(): - # e.g. for tcc, you might set this to - # "tcc -shared -o %s.so %s.c" - return os.getenv('PYPY_CC') - -def enable_fast_compilation(): - if sys.platform == 'win32': - dash = '/' - else: - dash = '-' - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt: - opt = re.sub('%sO\d+' % dash, '%sO0' % dash, opt) - else: - opt = '%sO0' % dash - gcv['OPT'] = opt - -def ensure_correct_math(): - if sys.platform != 'win32': - return # so far - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt and '/Op' not in opt: - opt += '/Op' - gcv['OPT'] = opt - - -def try_compile(c_files, eci): - try: - build_executable(c_files, eci) - result = True - except (distutils.errors.CompileError, - distutils.errors.LinkError): - result = False - return result - -def compile_c_module(cfiles, modbasename, eci, tmpdir=None): - #try: - # from distutils.log import set_threshold - # set_threshold(10000) - #except ImportError: - # print "ERROR IMPORTING" - # pass - cfiles = [py.path.local(f) for f in cfiles] - if tmpdir is None: - tmpdir = configdir.join("module_cache").ensure(dir=1) - num = 0 - cfiles += eci.separate_module_files - include_dirs = list(eci.include_dirs) - library_dirs = list(eci.library_dirs) - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in include_dirs and \ - os.path.exists(s + 'include'): - include_dirs.append(s + 'include') - if s + 'lib' not in library_dirs and \ - os.path.exists(s + 'lib'): - library_dirs.append(s + 'lib') - - num = 0 - modname = modbasename - while 1: - if not tmpdir.join(modname + so_ext).check(): - break - num += 1 - modname = '%s_%d' % (modbasename, num) - - lastdir = tmpdir.chdir() - libraries = eci.libraries - ensure_correct_math() - try: - if debug: print "modname", modname - c = stdoutcapture.Capture(mixed_out_err = True) - try: - try: - if compiler_command(): - # GCC-ish options only - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - cmd = compiler_command().replace('%s', - str(tmpdir.join(modname))) - for dir in [gcv['INCLUDEPY']] + list(include_dirs): - cmd += ' -I%s' % dir - for dir in library_dirs: - cmd += ' -L%s' % dir - os.system(cmd) - else: - from distutils.dist import Distribution - from distutils.extension import Extension - from distutils.ccompiler import get_default_compiler - saved_environ = os.environ.items() - try: - # distutils.core.setup() is really meant for end-user - # interactive usage, because it eats most exceptions and - # turn them into SystemExits. Instead, we directly - # instantiate a Distribution, which also allows us to - # ignore unwanted features like config files. - extra_compile_args = [] - # ensure correct math on windows - if sys.platform == 'win32': - extra_compile_args.append('/Op') # get extra precision - if get_default_compiler() == 'unix': - old_version = False - try: - g = os.popen('gcc --version', 'r') - verinfo = g.read() - g.close() - except (OSError, IOError): - pass - else: - old_version = verinfo.startswith('2') - if not old_version: - extra_compile_args.extend(["-Wno-unused-label", - "-Wno-unused-variable"]) - attrs = { - 'name': "testmodule", - 'ext_modules': [ - Extension(modname, [str(cfile) for cfile in cfiles], - include_dirs=include_dirs, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - libraries=list(libraries),) - ], - 'script_name': 'setup.py', - 'script_args': ['-q', 'build_ext', '--inplace', '--force'], - } - dist = Distribution(attrs) - if not dist.parse_command_line(): - raise ValueError, "distutils cmdline parse error" - dist.run_commands() - finally: - for key, value in saved_environ: - if os.environ.get(key) != value: - os.environ[key] = value - finally: - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = open("%s.errors" % modname, "w") - fdump.write(data) - fdump.close() - # XXX do we need to do some check on fout/ferr? - # XXX not a nice way to import a module - except: - print >>sys.stderr, data - raise - finally: - lastdir.chdir() - return str(tmpdir.join(modname) + so_ext) - -def make_module_from_c(cfile, eci): - cfile = py.path.local(cfile) - modname = cfile.purebasename - compile_c_module([cfile], modname, eci) - return import_module_from_directory(cfile.dirpath(), modname) - -def import_module_from_directory(dir, modname): - file, pathname, description = imp.find_module(modname, [str(dir)]) - try: - mod = imp.load_module(modname, file, pathname, description) - finally: - if file: - file.close() - return mod - - -def log_spawned_cmd(spawn): - def spawn_and_log(cmd, *args, **kwds): - if debug: - print ' '.join(cmd) - return spawn(cmd, *args, **kwds) - return spawn_and_log - - -class ProfOpt(object): - #XXX assuming gcc style flags for now - name = "profopt" - - def __init__(self, compiler): - self.compiler = compiler - - def first(self): - self.build('-fprofile-generate') - - def probe(self, exe, args): - # 'args' is a single string typically containing spaces - # and quotes, which represents several arguments. - os.system("'%s' %s" % (exe, args)) - - def after(self): - self.build('-fprofile-use') - - def build(self, option): - compiler = self.compiler - compiler.compile_extra.append(option) - compiler.link_extra.append(option) - try: - compiler._build() - finally: - compiler.compile_extra.pop() - compiler.link_extra.pop() - -class CCompiler: - - def __init__(self, cfilenames, eci, outputfilename=None, - compiler_exe=None, profbased=None): - self.cfilenames = cfilenames - ext = '' - self.compile_extra = [] - self.link_extra = [] - self.libraries = list(eci.libraries) - self.include_dirs = list(eci.include_dirs) - self.library_dirs = list(eci.library_dirs) - self.compiler_exe = compiler_exe - self.profbased = profbased - if not sys.platform in ('win32', 'darwin', 'cygwin'): # xxx - if 'm' not in self.libraries: - self.libraries.append('m') - if 'pthread' not in self.libraries: - self.libraries.append('pthread') - self.compile_extra += ['-O3', '-fomit-frame-pointer', '-pthread'] - self.link_extra += ['-pthread'] - if sys.platform == 'win32': - self.link_extra += ['/DEBUG'] # generate .pdb file - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in self.include_dirs and \ - os.path.exists(s + 'include'): - self.include_dirs.append(s + 'include') - if s + 'lib' not in self.library_dirs and \ - os.path.exists(s + 'lib'): - self.library_dirs.append(s + 'lib') - self.compile_extra += ['-O3', '-fomit-frame-pointer'] - - if outputfilename is None: - self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) - else: - self.outputfilename = py.path.local(outputfilename) - - def build(self, noerr=False): - basename = self.outputfilename.new(ext='') - data = '' - try: - saved_environ = os.environ.copy() - c = stdoutcapture.Capture(mixed_out_err = True) - try: - self._build() - finally: - # workaround for a distutils bugs where some env vars can - # become longer and longer every time it is used - for key, value in saved_environ.items(): - if os.environ.get(key) != value: - os.environ[key] = value - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = basename.new(ext='errors').open("w") - fdump.write(data) - fdump.close() - except: - if not noerr: - print >>sys.stderr, data - raise - - def _build(self): - from distutils.ccompiler import new_compiler - compiler = new_compiler(force=1) - if self.compiler_exe is not None: - for c in '''compiler compiler_so compiler_cxx - linker_exe linker_so'''.split(): - compiler.executables[c][0] = self.compiler_exe - compiler.spawn = log_spawned_cmd(compiler.spawn) - objects = [] - for cfile in self.cfilenames: - cfile = py.path.local(cfile) - old = cfile.dirpath().chdir() - try: - res = compiler.compile([cfile.basename], - include_dirs=self.include_dirs, - extra_preargs=self.compile_extra) - assert len(res) == 1 - cobjfile = py.path.local(res[0]) - assert cobjfile.check() - objects.append(str(cobjfile)) - finally: - old.chdir() - compiler.link_executable(objects, str(self.outputfilename), - libraries=self.libraries, - extra_preargs=self.link_extra, - library_dirs=self.library_dirs) - -def build_executable(*args, **kwds): - noerr = kwds.pop('noerr', False) - compiler = CCompiler(*args, **kwds) - compiler.build(noerr=noerr) - return str(compiler.outputfilename) diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py deleted file mode 100755 --- a/ctypes_configure/configure.py +++ /dev/null @@ -1,621 +0,0 @@ -#! /usr/bin/env python - -import os, py, sys -import ctypes -from ctypes_configure.cbuild import build_executable, configdir, try_compile -from ctypes_configure.cbuild import ExternalCompilationInfo -import distutils - -# ____________________________________________________________ -# -# Helpers for simple cases - -def eci_from_header(c_header_source): - return ExternalCompilationInfo( - pre_include_lines=c_header_source.split("\n") - ) - - -def getstruct(name, c_header_source, interesting_fields): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - STRUCT = Struct(name, interesting_fields) - return configure(CConfig)['STRUCT'] - -def getsimpletype(name, c_header_source, ctype_hint=ctypes.c_int): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - TYPE = SimpleType(name, ctype_hint) - return configure(CConfig)['TYPE'] - -def getconstantinteger(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - CONST = ConstantInteger(name) - return configure(CConfig)['CONST'] - -def getdefined(macro, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - DEFINED = Defined(macro) - return configure(CConfig)['DEFINED'] - -def has(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - HAS = Has(name) - return configure(CConfig)['HAS'] - -def check_eci(eci): - """Check if a given ExternalCompilationInfo compiles and links.""" - class CConfig: - _compilation_info_ = eci - WORKS = Works() - return configure(CConfig)['WORKS'] - -def sizeof(name, eci, **kwds): - class CConfig: - _compilation_info_ = eci - SIZE = SizeOf(name) - for k, v in kwds.items(): - setattr(CConfig, k, v) - return configure(CConfig)['SIZE'] - -def memory_alignment(): - """Return the alignment (in bytes) of memory allocations. - This is enough to make sure a structure with pointers and 'double' - fields is properly aligned.""" - global _memory_alignment - if _memory_alignment is None: - S = getstruct('struct memory_alignment_test', """ - struct memory_alignment_test { - double d; - void* p; - }; - """, []) - result = ctypes.alignment(S) - assert result & (result-1) == 0, "not a power of two??" - _memory_alignment = result - return _memory_alignment -_memory_alignment = None - -# ____________________________________________________________ -# -# General interface - -class ConfigResult: - def __init__(self, CConfig, info, entries): - self.CConfig = CConfig - self.result = {} - self.info = info - self.entries = entries - - def get_entry_result(self, entry): - try: - return self.result[entry] - except KeyError: - pass - name = self.entries[entry] - info = self.info[name] - self.result[entry] = entry.build_result(info, self) - - def get_result(self): - return dict([(name, self.result[entry]) - for entry, name in self.entries.iteritems()]) - - -class _CWriter(object): - """ A simple class which aggregates config parts - """ - def __init__(self, CConfig): - self.path = uniquefilepath() - self.f = self.path.open("w") - self.config = CConfig - - def write_header(self): - f = self.f - CConfig = self.config - CConfig._compilation_info_.write_c_header(f) - print >> f, C_HEADER - print >> f - - def write_entry(self, key, entry): - f = self.f - print >> f, 'void dump_section_%s(void) {' % (key,) - for line in entry.prepare_code(): - if line and line[0] != '#': - line = '\t' + line - print >> f, line - print >> f, '}' - print >> f - - def write_entry_main(self, key): - print >> self.f, '\tprintf("-+- %s\\n");' % (key,) - print >> self.f, '\tdump_section_%s();' % (key,) - print >> self.f, '\tprintf("---\\n");' - - def start_main(self): - print >> self.f, 'int main(int argc, char *argv[]) {' - - def close(self): - f = self.f - print >> f, '\treturn 0;' - print >> f, '}' - f.close() - - def ask_gcc(self, question): - self.start_main() - self.f.write(question + "\n") - self.close() - eci = self.config._compilation_info_ - return try_compile([self.path], eci) - - -def configure(CConfig, noerr=False): - """Examine the local system by running the C compiler. - The CConfig class contains CConfigEntry attribues that describe - what should be inspected; configure() returns a dict mapping - names to the results. - """ - for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_', - '_include_dirs_', '_header_']: - assert not hasattr(CConfig, attr), "Found legacy attribut %s on CConfig" % (attr,) - entries = [] - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigEntry): - entries.append((key, value)) - - if entries: # can be empty if there are only CConfigSingleEntries - writer = _CWriter(CConfig) - writer.write_header() - for key, entry in entries: - writer.write_entry(key, entry) - - f = writer.f - writer.start_main() - for key, entry in entries: - writer.write_entry_main(key) - writer.close() - - eci = CConfig._compilation_info_ - infolist = list(run_example_code(writer.path, eci, noerr=noerr)) - assert len(infolist) == len(entries) - - resultinfo = {} - resultentries = {} - for info, (key, entry) in zip(infolist, entries): - resultinfo[key] = info - resultentries[entry] = key - - result = ConfigResult(CConfig, resultinfo, resultentries) - for name, entry in entries: - result.get_entry_result(entry) - res = result.get_result() - else: - res = {} - - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigSingleEntry): - writer = _CWriter(CConfig) - writer.write_header() - res[key] = value.question(writer.ask_gcc) - return res - -# ____________________________________________________________ - - -class CConfigEntry(object): - "Abstract base class." - -class Struct(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined structure. - """ - def __init__(self, name, interesting_fields, ifdef=None): - self.name = name - self.interesting_fields = interesting_fields - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield 'typedef struct {' - yield ' char c;' - yield ' ctypesplatcheck_t s;' - yield '} ctypesplatcheck2_t;' - yield '' - yield 'ctypesplatcheck_t s;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("align", offsetof(ctypesplatcheck2_t, s));' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - for fieldname, fieldtype in self.interesting_fields: - yield 'dump("fldofs %s", offsetof(ctypesplatcheck_t, %s));'%( - fieldname, fieldname) - yield 'dump("fldsize %s", sizeof(s.%s));' % ( - fieldname, fieldname) - if fieldtype in integer_class: - yield 's.%s = 0; s.%s = ~s.%s;' % (fieldname, - fieldname, - fieldname) - yield 'dump("fldunsigned %s", s.%s > 0);' % (fieldname, - fieldname) - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None: - if not info['defined']: - return None - alignment = 1 - layout = [None] * info['size'] - for fieldname, fieldtype in self.interesting_fields: - if isinstance(fieldtype, Struct): - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - c_fieldtype = config_result.get_entry_result(fieldtype) - layout_addfield(layout, offset, c_fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(c_fieldtype)) - else: - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != size_and_sign(fieldtype): - fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) - layout_addfield(layout, offset, fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(fieldtype)) - - # try to enforce the same alignment as the one of the original - # structure - if alignment < info['align']: - choices = [ctype for ctype in alignment_types - if ctype_alignment(ctype) == info['align']] - assert choices, "unsupported alignment %d" % (info['align'],) - choices = [(ctypes.sizeof(ctype), i, ctype) - for i, ctype in enumerate(choices)] - csize, _, ctype = min(choices) - for i in range(0, info['size'] - csize + 1, info['align']): - if layout[i:i+csize] == [None] * csize: - layout_addfield(layout, i, ctype, '_alignment') - break - else: - raise AssertionError("unenforceable alignment %d" % ( - info['align'],)) - - n = 0 - for i, cell in enumerate(layout): - if cell is not None: - continue - layout_addfield(layout, i, ctypes.c_char, '_pad%d' % (n,)) - n += 1 - - # build the ctypes Structure - seen = {} - fields = [] - for cell in layout: - if cell in seen: - continue - fields.append((cell.name, cell.ctype)) - seen[cell] = True - - class S(ctypes.Structure): - _fields_ = fields - name = self.name - if name.startswith('struct '): - name = name[7:] - S.__name__ = name - return S - -class SimpleType(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined simple numeric type. - """ - def __init__(self, name, ctype_hint=ctypes.c_int, ifdef=None): - self.name = name - self.ctype_hint = ctype_hint - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield '' - yield 'ctypesplatcheck_t x;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - if self.ctype_hint in integer_class: - yield 'x = 0; x = ~x;' - yield 'dump("unsigned", x > 0);' - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None and not info['defined']: - return None - size = info['size'] - sign = info.get('unsigned', False) - ctype = self.ctype_hint - if (size, sign) != size_and_sign(ctype): - ctype = fixup_ctype(ctype, self.name, (size, sign)) - return ctype - -class ConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'if ((%s) < 0) {' % (self.name,) - yield ' long long x = (long long)(%s);' % (self.name,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.name,) - yield ' printf("value: %llu\\n", x);' - yield '}' - - def build_result(self, info, config_result): - return info['value'] - -class DefinedConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. If not #defined the value will be None. - """ - def __init__(self, macro): - self.name = self.macro = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'dump("defined", 1);' - yield 'if ((%s) < 0) {' % (self.macro,) - yield ' long long x = (long long)(%s);' % (self.macro,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.macro,) - yield ' printf("value: %llu\\n", x);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - return info['value'] - return None - - -class DefinedConstantString(CConfigEntry): - """ - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'int i;' - yield 'char *p = %s;' % self.macro - yield 'dump("defined", 1);' - yield 'for (i = 0; p[i] != 0; i++ ) {' - yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - string = '' - d = 0 - while info.has_key('value_%d' % d): - string += chr(info['value_%d' % d]) - d += 1 - return string - return None - - -class Defined(CConfigEntry): - """A boolean, corresponding to an #ifdef. - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % (self.macro,) - yield 'dump("defined", 1);' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - return bool(info['defined']) - -class CConfigSingleEntry(object): - """ An abstract class of type which requires - gcc succeeding/failing instead of only asking - """ - pass - -class Has(CConfigSingleEntry): - def __init__(self, name): - self.name = name - - def question(self, ask_gcc): - return ask_gcc(self.name + ';') - -class Works(CConfigSingleEntry): - def question(self, ask_gcc): - return ask_gcc("") - -class SizeOf(CConfigEntry): - """An entry in a CConfig class that stands for - some external opaque type - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'dump("size", sizeof(%s));' % self.name - - def build_result(self, info, config_result): - return info['size'] - -# ____________________________________________________________ -# -# internal helpers - -def ctype_alignment(c_type): - if issubclass(c_type, ctypes.Structure): - return max([ctype_alignment(fld_type) - for fld_name, fld_type in c_type._fields_]) - - return ctypes.alignment(c_type) - -def uniquefilepath(LAST=[0]): - i = LAST[0] - LAST[0] += 1 - return configdir.join('ctypesplatcheck_%d.c' % i) - -alignment_types = [ - ctypes.c_short, - ctypes.c_int, - ctypes.c_long, - ctypes.c_float, - ctypes.c_double, - ctypes.c_char_p, - ctypes.c_void_p, - ctypes.c_longlong, - ctypes.c_wchar, - ctypes.c_wchar_p, - ] - -integer_class = [ctypes.c_byte, ctypes.c_ubyte, - ctypes.c_short, ctypes.c_ushort, - ctypes.c_int, ctypes.c_uint, - ctypes.c_long, ctypes.c_ulong, - ctypes.c_longlong, ctypes.c_ulonglong, - ] -float_class = [ctypes.c_float, ctypes.c_double] - -class Field(object): - def __init__(self, name, ctype): - self.name = name - self.ctype = ctype - def __repr__(self): - return '' % (self.name, self.ctype) - -def layout_addfield(layout, offset, ctype, prefix): - size = ctypes.sizeof(ctype) - name = prefix - i = 0 - while name in layout: - i += 1 - name = '%s_%d' % (prefix, i) - field = Field(name, ctype) - for i in range(offset, offset+size): - assert layout[i] is None, "%s overlaps %r" % (fieldname, layout[i]) - layout[i] = field - return field - -def size_and_sign(ctype): - return (ctypes.sizeof(ctype), - ctype in integer_class and ctype(-1).value > 0) - -def fixup_ctype(fieldtype, fieldname, expected_size_and_sign): - for typeclass in [integer_class, float_class]: - if fieldtype in typeclass: - for ctype in typeclass: - if size_and_sign(ctype) == expected_size_and_sign: - return ctype - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_char): - # for now, assume it is an array of chars; otherwise we'd also - # have to check the exact integer type of the elements of the array - size, sign = expected_size_and_sign - return ctypes.c_char * size - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_ubyte): - # grumble, fields of type 'c_char array' have automatic cast-to- - # Python-string behavior in ctypes, which may not be what you - # want, so here is the same with c_ubytes instead... - size, sign = expected_size_and_sign - return ctypes.c_ubyte * size - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) - - -C_HEADER = """ -#include -#include /* for offsetof() */ -#ifndef _WIN32 -# include /* FreeBSD: for uint64_t */ -#endif - -void dump(char* key, int value) { - printf("%s: %d\\n", key, value); -} -""" - -def run_example_code(filepath, eci, noerr=False): - executable = build_executable([filepath], eci, noerr=noerr) - output = py.process.cmdexec(executable) - section = None - for line in output.splitlines(): - line = line.strip() - if line.startswith('-+- '): # start of a new section - section = {} - elif line == '---': # section end - assert section is not None - yield section - section = None - elif line: - assert section is not None - key, value = line.split(': ') - section[key] = int(value) - -# ____________________________________________________________ - -def get_python_include_dir(): - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - return gcv['INCLUDEPY'] - -if __name__ == '__main__': - doc = """Example: - - ctypes_platform.py -h sys/types.h -h netinet/in.h - 'struct sockaddr_in' - sin_port c_int - """ - import sys, getopt - opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:') - if not args: - print >> sys.stderr, doc - else: - assert len(args) % 2 == 1 - headers = [] - for opt, value in opts: - if opt == '-h': - headers.append('#include <%s>' % (value,)) - name = args[0] - fields = [] - for i in range(1, len(args), 2): - ctype = getattr(ctypes, args[i+1]) - fields.append((args[i], ctype)) - - S = getstruct(name, '\n'.join(headers), fields) - - for key, value in S._fields_: - print key, value diff --git a/ctypes_configure/doc/configure.html b/ctypes_configure/doc/configure.html deleted file mode 100644 --- a/ctypes_configure/doc/configure.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - -ctypes configure - - -
    -

    ctypes configure

    -
    -

    idea

    -

    One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py).

    -
    -
    -

    installation

    -

    easy_install ctypes_configure

    -
    -
    -

    usage

    -

    sample.py explains in details how to use it.

    -
    -
    - - diff --git a/ctypes_configure/doc/configure.txt b/ctypes_configure/doc/configure.txt deleted file mode 100644 --- a/ctypes_configure/doc/configure.txt +++ /dev/null @@ -1,22 +0,0 @@ -================= -ctypes configure -================= - -idea -==== - -One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size\_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py). - -installation -============ - -``easy_install ctypes_configure`` - -usage -===== - -:source:`sample.py ` explains in details how to use it. diff --git a/ctypes_configure/doc/sample.py b/ctypes_configure/doc/sample.py deleted file mode 100644 --- a/ctypes_configure/doc/sample.py +++ /dev/null @@ -1,72 +0,0 @@ - -from ctypes_configure import configure -import ctypes - -class CConfigure: - _compilation_info_ = configure.ExternalCompilationInfo( - - # all lines landing in C header before includes - pre_include_lines = [], - - # list of .h files to include - includes = ['time.h', 'sys/time.h', 'unistd.h'], - - # list of directories to search for include files - include_dirs = [], - - # all lines landing in C header after includes - post_include_lines = [], - - # libraries to link with - libraries = [], - - # library directories - library_dirs = [], - - # additional C sources to compile with (that go to - # created .c files) - separate_module_sources = [], - - # additional existing C source file names - separate_module_files = [], - ) - - # get real int type out of hint and name - size_t = configure.SimpleType('size_t', ctypes.c_int) - - # grab value of numerical #define - NULL = configure.ConstantInteger('NULL') - - # grab #define, whether it's defined or not - EXISTANT = configure.Defined('NULL') - NOT_EXISTANT = configure.Defined('XXXNOTNULL') - - # check for existance of C functions - has_write = configure.Has('write') - no_xxxwrite = configure.Has('xxxwrite') - - # check for size of type - sizeof_size_t = configure.SizeOf('size_t') - - # structure, with given hints for interesting fields, - # types does not need to be too specific. - # all interesting fields would end up with right offset - # size and order - struct_timeval = configure.Struct('struct timeval',[ - ('tv_sec', ctypes.c_int), - ('tv_usec', ctypes.c_int)]) - -info = configure.configure(CConfigure) - -assert info['has_write'] -assert not info['no_xxxwrite'] -assert info['NULL'] == 0 -size_t = info['size_t'] -print "size_t in ctypes is ", size_t -assert ctypes.sizeof(size_t) == info['sizeof_size_t'] -assert info['EXISTANT'] -assert not info['NOT_EXISTANT'] -print -print "fields of struct timeval are " -for name, value in info['struct_timeval']._fields_: - print " ", name, " ", value diff --git a/ctypes_configure/dumpcache.py b/ctypes_configure/dumpcache.py deleted file mode 100644 --- a/ctypes_configure/dumpcache.py +++ /dev/null @@ -1,46 +0,0 @@ -import os, sys -import ctypes - - -def dumpcache(referencefilename, filename, config): - dirname = os.path.dirname(referencefilename) - filename = os.path.join(dirname, filename) - f = open(filename, 'w') - print >> f, 'import ctypes' - print >> f - names = config.keys() - names.sort() - print >> f, '__all__ = %r' % (tuple(names),) - print >> f - for key in names: - val = config[key] - if isinstance(val, (int, long)): - f.write("%s = %d\n" % (key, val)) - elif val is None: - f.write("%s = None\n" % key) - elif isinstance(val, ctypes.Structure.__class__): - f.write("class %s(ctypes.Structure):\n" % key) - f.write(" _fields_ = [\n") - for k, v in val._fields_: - f.write(" ('%s', %s),\n" % (k, ctypes_repr(v))) - f.write(" ]\n") - elif isinstance(val, (tuple, list)): - for x in val: - assert isinstance(x, (int, long, str)), \ - "lists of integers or strings only" - f.write("%s = %r\n" % (key, val)) - else: - # a simple type, hopefully - f.write("%s = %s\n" % (key, ctypes_repr(val))) - f.close() - print 'Wrote %s.' % (filename,) - sys.stdout.flush() - -def ctypes_repr(cls): - # ctypes_configure does not support nested structs so far - # so let's ignore it - if isinstance(cls, ctypes._SimpleCData.__class__): - return "ctypes." + cls.__name__ - if hasattr(cls, '_length_') and hasattr(cls, '_type_'): # assume an array - return '%s*%d' % (ctypes_repr(cls._type_), cls._length_) - raise NotImplementedError("saving of object with type %r" % type(cls)) diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py deleted file mode 100644 --- a/ctypes_configure/stdoutcapture.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -A quick hack to capture stdout/stderr. -""" - -import os, sys - - -class Capture: - - def __init__(self, mixed_out_err = False): - "Start capture of the Unix-level stdout and stderr." - if (not hasattr(os, 'tmpfile') or - not hasattr(os, 'dup') or - not hasattr(os, 'dup2') or - not hasattr(os, 'fdopen')): - self.dummy = 1 - else: - try: - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() - except OSError: # bah? on at least one Windows box - self.dummy = 1 - return - self.dummy = 0 - # make new stdout/stderr files if needed - self.localoutfd = os.dup(1) - self.localerrfd = os.dup(2) - if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: - self.saved_stdout = sys.stdout - sys.stdout = os.fdopen(self.localoutfd, 'w', 1) - else: - self.saved_stdout = None - if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: - self.saved_stderr = sys.stderr - sys.stderr = os.fdopen(self.localerrfd, 'w', 0) - else: - self.saved_stderr = None - os.dup2(self.tmpout.fileno(), 1) - os.dup2(self.tmperr.fileno(), 2) - - def done(self): - "End capture and return the captured text (stdoutfile, stderrfile)." - if self.dummy: - import cStringIO - return cStringIO.StringIO(), cStringIO.StringIO() - else: - os.dup2(self.localoutfd, 1) - os.dup2(self.localerrfd, 2) - if self.saved_stdout is not None: - f = sys.stdout - sys.stdout = self.saved_stdout - f.close() - else: - os.close(self.localoutfd) - if self.saved_stderr is not None: - f = sys.stderr - sys.stderr = self.saved_stderr - f.close() - else: - os.close(self.localerrfd) - self.tmpout.seek(0) - self.tmperr.seek(0) - return self.tmpout, self.tmperr - - -if __name__ == '__main__': - # test - c = Capture() - try: - os.system('echo hello') - finally: - fout, ferr = c.done() - print 'Output:', `fout.read()` - print 'Error:', `ferr.read()` diff --git a/ctypes_configure/test/__init__.py b/ctypes_configure/test/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/test/test_configure.py b/ctypes_configure/test/test_configure.py deleted file mode 100644 --- a/ctypes_configure/test/test_configure.py +++ /dev/null @@ -1,212 +0,0 @@ -import py, sys, struct -from ctypes_configure import configure -from ctypes_configure.cbuild import ExternalCompilationInfo -import ctypes - -def test_dirent(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_reclen", ctypes.c_ushort)]) - assert issubclass(dirent, ctypes.Structure) - ssize = (ctypes.sizeof(ctypes.c_long) + - ctypes.sizeof(ctypes.c_int) + - ctypes.sizeof(ctypes.c_ushort) + - 32) - extra_padding = (-ssize) % ctypes.alignment(ctypes.c_long) - - assert dirent._fields_ == [('_alignment', ctypes.c_long), - ('_pad0', ctypes.c_char), - ('_pad1', ctypes.c_char), - ('_pad2', ctypes.c_char), - ('_pad3', ctypes.c_char), - ('d_reclen', ctypes.c_ushort), - ] + [ - ('_pad%d' % n, ctypes.c_char) - for n in range(4, 4+32+extra_padding)] - assert ctypes.sizeof(dirent) == ssize + extra_padding - assert ctypes.alignment(dirent) == ctypes.alignment(ctypes.c_long) - -def test_fit_type(): - S = configure.getstruct("struct S", - """ - struct S { - signed char c; - unsigned char uc; - short s; - unsigned short us; - int i; - unsigned int ui; - long l; - unsigned long ul; - long long ll; - unsigned long long ull; - float f; - double d; - }; - """, - [("c", ctypes.c_int), - ("uc", ctypes.c_int), - ("s", ctypes.c_uint), - ("us", ctypes.c_int), - ("i", ctypes.c_int), - ("ui", ctypes.c_int), - ("l", ctypes.c_int), - ("ul", ctypes.c_int), - ("ll", ctypes.c_int), - ("ull", ctypes.c_int), - ("f", ctypes.c_double), - ("d", ctypes.c_float)]) - assert issubclass(S, ctypes.Structure) - fields = dict(S._fields_) - assert fields["c"] == ctypes.c_byte - assert fields["uc"] == ctypes.c_ubyte - assert fields["s"] == ctypes.c_short - assert fields["us"] == ctypes.c_ushort - assert fields["i"] == ctypes.c_int - assert fields["ui"] == ctypes.c_uint - assert fields["l"] == ctypes.c_long - assert fields["ul"] == ctypes.c_ulong - assert fields["ll"] == ctypes.c_longlong - assert fields["ull"] == ctypes.c_ulonglong - assert fields["f"] == ctypes.c_float - assert fields["d"] == ctypes.c_double - -def test_simple_type(): - ctype = configure.getsimpletype('test_t', - 'typedef unsigned short test_t;', - ctypes.c_int) - assert ctype == ctypes.c_ushort - -def test_constant_integer(): - value = configure.getconstantinteger('BLAH', - '#define BLAH (6*7)') - assert value == 42 - value = configure.getconstantinteger('BLAH', - '#define BLAH (-2147483648LL)') - assert value == -2147483648 - value = configure.getconstantinteger('BLAH', - '#define BLAH (3333333333ULL)') - assert value == 3333333333 - -def test_defined(): - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', '') - assert not res - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', - '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') - assert res - -def test_configure(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform.h') - test_h.write('#define XYZZY 42\n') - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - - res = configure.configure(CConfig) - assert issubclass(res['FILE'], ctypes.Structure) - assert res == {'FILE': res['FILE'], - 'ushort': ctypes.c_ushort, - 'XYZZY': 42} - -def test_ifdef(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines = ['/* a C comment */', - '#define XYZZY 42', - 'typedef int foo;', - 'struct s {', - 'int i;', - 'double f;' - '};']) - - - s = configure.Struct('struct s', [('i', ctypes.c_int)], - ifdef='XYZZY') - z = configure.Struct('struct z', [('i', ctypes.c_int)], - ifdef='FOOBAR') - - foo = configure.SimpleType('foo', ifdef='XYZZY') - bar = configure.SimpleType('bar', ifdef='FOOBAR') - - res = configure.configure(CConfig) - assert res['s'] is not None - assert res['z'] is None - assert res['foo'] is not None - assert res['bar'] is None - -def test_nested_structs(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines=""" - struct x { - int foo; - unsigned long bar; - }; - struct y { - char c; - struct x x; - }; - """.split("\n")) - - x = configure.Struct("struct x", [("bar", ctypes.c_short)]) - y = configure.Struct("struct y", [("x", x)]) - - res = configure.configure(CConfig) - c_x = res["x"] - c_y = res["y"] - c_y_fields = dict(c_y._fields_) - assert issubclass(c_x , ctypes.Structure) - assert issubclass(c_y, ctypes.Structure) - assert c_y_fields["x"] is c_x - -def test_array(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_name", ctypes.c_char * 0)]) - assert dirent.d_name.size == 32 - -def test_has(): - assert configure.has("x", "int x = 3;") - assert not configure.has("x", "") - # has() should also not crash if it is given an invalid #include - assert not configure.has("x", "#include ") - -def test_check_eci(): - eci = ExternalCompilationInfo() - assert configure.check_eci(eci) - eci = ExternalCompilationInfo(libraries=['some_name_that_doesnt_exist_']) - assert not configure.check_eci(eci) - -def test_sizeof(): - assert configure.sizeof("char", ExternalCompilationInfo()) == 1 - -def test_memory_alignment(): - a = configure.memory_alignment() - print a - assert a % struct.calcsize("P") == 0 diff --git a/ctypes_configure/test/test_dumpcache.py b/ctypes_configure/test/test_dumpcache.py deleted file mode 100644 --- a/ctypes_configure/test/test_dumpcache.py +++ /dev/null @@ -1,61 +0,0 @@ -import ctypes -from ctypes_configure import configure, dumpcache -from ctypes_configure.cbuild import ExternalCompilationInfo - - -def test_cache(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform2.h') - test_h.write('#define XYZZY 42\n' - "#define large 2147483648L\n") - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - XUZ = configure.Has('XUZ') - large = configure.DefinedConstantInteger('large') - undef = configure.Defined('really_undefined') - - res = configure.configure(CConfig) - - cachefile = configdir.join('cache') - dumpcache.dumpcache('', str(cachefile), res) - - d = {} - execfile(str(cachefile), d) - assert d['XYZZY'] == res['XYZZY'] - assert d['ushort'] == res['ushort'] - assert d['FILE']._fields_ == res['FILE']._fields_ - assert d['FILE'].__mro__[1:] == res['FILE'].__mro__[1:] - assert d['undef'] == res['undef'] - assert d['large'] == res['large'] - assert d['XUZ'] == res['XUZ'] - - -def test_cache_array(): - configdir = configure.configdir - res = {'foo': ctypes.c_short * 27} - cachefile = configdir.join('cache_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] - -def test_cache_array_array(): - configdir = configure.configdir - res = {'foo': (ctypes.c_int * 2) * 3} - cachefile = configdir.join('cache_array_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] From pypy.commits at gmail.com Tue Dec 27 05:50:53 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 27 Dec 2016 02:50:53 -0800 (PST) Subject: [pypy-commit] pypy default: update version Message-ID: <5862478d.973f1c0a.5201d.45c4@mx.google.com> Author: Armin Rigo Branch: Changeset: r89242:53bc748f8ebb Date: 2016-12-27 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/53bc748f8ebb/ Log: update version diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.9.1 +Version: 1.9.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski From pypy.commits at gmail.com Tue Dec 27 05:55:22 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 27 Dec 2016 02:55:22 -0800 (PST) Subject: [pypy-commit] cffi default: Fill in whatsnew Message-ID: <5862489a.ce941c0a.d731b.5b07@mx.google.com> Author: Armin Rigo Branch: Changeset: r2843:cff0b081741a Date: 2016-12-27 11:55 +0100 http://bitbucket.org/cffi/cffi/changeset/cff0b081741a/ Log: Fill in whatsnew diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -10,10 +10,15 @@ PyObject_Malloc()+memset() to handle ffi.new() with a default allocator. Speeds up ``ffi.new(large-array)`` where most of the time you never touch most of the array. (But avoid doing that too often: - on 32-bit PyPy it will quickly exhaust the address space. This case - is best handled by explicit calls to calloc() and free().) + on 32-bit PyPy it will quickly exhaust the address space. If possible, + use instead explicit calls to calloc() and free().) -* some OS/X build fixes ("only with Xcode but without CLT"). +* Some OS/X build fixes ("only with Xcode but without CLT"). + +* Improve a couple of error messages: when getting mismatched versions + of cffi and its backend; and when calling functions which cannot be + called with libffi because an argument is a struct that is "too + complicated" (not a struct *pointer*). v1.9 From pypy.commits at gmail.com Tue Dec 27 06:38:21 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 03:38:21 -0800 (PST) Subject: [pypy-commit] pypy issue2444: Delay PyMemoryViewObject configuration so we can use the standard mechanism to configure Py_buffer Message-ID: <586252ad.ce941c0a.d731b.6bd4@mx.google.com> Author: Ronan Lamy Branch: issue2444 Changeset: r89243:9999f71d34a9 Date: 2016-12-27 12:37 +0100 http://bitbucket.org/pypy/pypy/changeset/9999f71d34a9/ Log: Delay PyMemoryViewObject configuration so we can use the standard mechanism to configure Py_buffer diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -630,7 +630,8 @@ PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = rffi.CStruct( "Py_buffer", +Py_buffer = cpython_struct( + "Py_buffer", ( ('buf', rffi.VOIDP), ('obj', PyObject), ('len', Py_ssize_t), @@ -647,10 +648,7 @@ ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), ('internal', rffi.VOIDP), - hints={'size': 6 * rffi.sizeof(Py_ssize_tP) + 2 * rffi.sizeof(Py_ssize_t) + - 2 * rffi.sizeof(rffi.INT_real) + rffi.sizeof(rffi.CCHARP) + - Py_MAX_FMT * rffi.sizeof(rffi.UCHAR) + - 2 * Py_MAX_NDIMS * rffi.sizeof(Py_ssize_t)}) +)) Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -17,7 +17,9 @@ PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) PyMemoryViewObjectFields = PyObjectFields + \ (("view", Py_buffer),) -cpython_struct("PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct) +cpython_struct( + "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, + level=2) @bootstrap_function def init_memoryobject(space): From pypy.commits at gmail.com Tue Dec 27 06:45:32 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 03:45:32 -0800 (PST) Subject: [pypy-commit] pypy issue2444: Py_buffer: keep the same struct name as CPython Message-ID: <5862545c.45f6c20a.23a87.cfc5@mx.google.com> Author: Ronan Lamy Branch: issue2444 Changeset: r89244:bb8de55c4722 Date: 2016-12-27 12:44 +0100 http://bitbucket.org/pypy/pypy/changeset/bb8de55c4722/ Log: Py_buffer: keep the same struct name as CPython diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -145,7 +145,7 @@ /* Py3k buffer interface, adapted for PyPy */ #define Py_MAX_NDIMS 32 #define Py_MAX_FMT 128 -typedef struct Py_buffer { +typedef struct bufferinfo { void *buf; PyObject *obj; /* owned reference */ Py_ssize_t len; From pypy.commits at gmail.com Tue Dec 27 10:16:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 07:16:41 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Add Py_buffer to the pseudo-header Message-ID: <586285d9.832cc20a.e7ec1.e2ff@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89245:cb02fc0344cb Date: 2016-12-27 16:15 +0100 http://bitbucket.org/pypy/pypy/changeset/cb02fc0344cb/ Log: Add Py_buffer to the pseudo-header diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -624,9 +624,41 @@ typedef struct _typeobject PyTypeObject; typedef void (*freefunc)(void *); + +/* Py3k buffer interface, adapted for PyPy */ +#define Py_MAX_NDIMS 32 +#define Py_MAX_FMT 128 +typedef struct bufferinfo { + void *buf; + PyObject *obj; /* owned reference */ + Py_ssize_t len; + + /* This is Py_ssize_t so it can be + pointed to by strides in simple case.*/ + Py_ssize_t itemsize; + int readonly; + int ndim; + char *format; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; /* alway NULL for app-level objects*/ + unsigned char _format[Py_MAX_FMT]; + Py_ssize_t _strides[Py_MAX_NDIMS]; + Py_ssize_t _shape[Py_MAX_NDIMS]; + /* static store for shape and strides of + mono-dimensional buffers. */ + /* Py_ssize_t smalltable[2]; */ + void *internal; /* always NULL for app-level objects */ +} Py_buffer; + + +typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); +typedef void (*releasebufferproc)(PyObject *, Py_buffer *); +/* end Py3k buffer interface */ + """) -Py_ssize_t = lltype.Typedef(h.definitions['Py_ssize_t'], 'Py_ssize_t') +Py_ssize_t = h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -645,26 +677,9 @@ PyVarObjectStruct = h.definitions['PyVarObject'] PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = cpython_struct( - "Py_buffer", ( - ('buf', rffi.VOIDP), - ('obj', PyObject), - ('len', Py_ssize_t), - ('itemsize', Py_ssize_t), +Py_buffer = h.definitions['Py_buffer'] +Py_bufferP = lltype.Ptr(Py_buffer) - ('readonly', lltype.Signed), - ('ndim', lltype.Signed), - ('format', rffi.CCHARP), - ('shape', Py_ssize_tP), - ('strides', Py_ssize_tP), - ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), - ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('suboffsets', Py_ssize_tP), - #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - ('internal', rffi.VOIDP) - )) -Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() def is_PyObject(TYPE): diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -666,15 +666,6 @@ self.fields = fields self.TYPE = TYPE - def config_fields(self): - result = [] - for name, value in self.fields: - if isinstance(value, DelayedStruct): - result.append((name, value.TYPE)) - else: - result.append((name, value)) - return result - def __repr__(self): return "".format(**vars(self)) @@ -725,7 +716,7 @@ configname = type_name.replace(' ', '__') if configure_now: setattr(self._Config, configname, - rffi_platform.Struct(type_name, struct.config_fields())) + rffi_platform.Struct(type_name, struct.fields)) self._TYPES[configname] = struct.TYPE else: cpython_struct(type_name, struct.fields, forward=struct.TYPE) diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -90,9 +90,9 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest1']) - == ('Py_ssize_t', 'Py_ssize_t arg0')) + == ('Signed', 'Signed arg0')) assert (api.c_function_signature(db, api.FUNCTIONS['PyPy_TypedefTest2']) - == ('Py_ssize_t *', 'Py_ssize_t *arg0')) + == ('Signed *', 'Signed *arg0')) PyPy_TypedefTest1(space, 0) ppos = lltype.malloc(api.Py_ssize_tP.TO, 1, flavor='raw') @@ -100,7 +100,7 @@ PyPy_TypedefTest2(space, ppos) lltype.free(ppos, flavor='raw') - at pytest.mark.skipif(os.environ.get('USER')=='root', + at pytest.mark.skipif(os.environ.get('USER')=='root', reason='root can write to all files') def test_copy_header_files(tmpdir): api.copy_header_files(tmpdir, True) diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -100,7 +100,7 @@ includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) Object = foo_h.definitions['Object'] - assert isinstance(Object, lltype.ForwardReference) or hash(Object) + assert isinstance(Object, lltype.ForwardReference) def test_recursive(tmpdir): cdef = """ From pypy.commits at gmail.com Tue Dec 27 10:48:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 07:48:43 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Handle FILE* Message-ID: <58628d5b.96a61c0a.46f2d.d2ce@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89246:c7d8d8b857b2 Date: 2016-12-18 02:56 +0000 http://bitbucket.org/pypy/pypy/changeset/c7d8d8b857b2/ Log: Handle FILE* diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -3,6 +3,7 @@ from cffi.commontypes import COMMON_TYPES, resolve_common_type import pycparser import weakref, re +from rpython.rlib.rfile import FILEP from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rfficache, rffi_platform @@ -644,7 +645,7 @@ CNAME_TO_LLTYPE = { 'char': rffi.CHAR, 'double': rffi.DOUBLE, 'long double': rffi.LONGDOUBLE, - 'float': rffi.FLOAT} + 'float': rffi.FLOAT, 'FILE': FILEP.TO} def add_inttypes(): for name in rffi.TYPES: @@ -702,6 +703,8 @@ self.macros[name] = value def new_struct(self, obj): + if obj.name == '_IO_FILE': # cffi weirdness + return cname_to_lltype('FILE') struct = DelayedStruct(obj.name, None, lltype.ForwardReference()) # Cache it early, to avoid infinite recursion self.structs[obj] = struct From pypy.commits at gmail.com Tue Dec 27 10:48:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 07:48:48 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Handle const pointers (we probably only care about 'const char *') Message-ID: <58628d60.973f1c0a.5201d.b73b@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89249:6c787b358130 Date: 2016-12-18 20:34 +0000 http://bitbucket.org/pypy/pypy/changeset/6c787b358130/ Log: Handle const pointers (we probably only care about 'const char *') diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -695,9 +695,9 @@ self.structs.update(other.structs) self.includes.append(other) - def add_typedef(self, name, obj, configure_now=False): + def add_typedef(self, name, obj, quals, configure_now=False): assert name not in self.definitions - tp = self.convert_type(obj) + tp = self.convert_type(obj, quals) if isinstance(tp, DelayedStruct): tp = self.realize_struct(tp, name, configure_now=configure_now) self.definitions[name] = tp @@ -735,7 +735,7 @@ continue if name.startswith('typedef '): name = name[8:] - self.add_typedef(name, obj, configure_now=configure_now) + self.add_typedef(name, obj, quals, configure_now=configure_now) elif name.startswith('macro '): name = name[6:] self.add_macro(name, obj) @@ -743,7 +743,7 @@ if name in self._TYPES: self._TYPES[name].become(TYPE) - def convert_type(self, obj): + def convert_type(self, obj, quals=0): if isinstance(obj, model.PrimitiveType): return cname_to_lltype(obj.name) elif isinstance(obj, model.StructType): @@ -754,11 +754,16 @@ TO = self.convert_type(obj.totype) if TO is lltype.Void: return rffi.VOIDP - elif isinstance(obj.totype, model.PrimitiveType): - return rffi.CArrayPtr(TO) elif isinstance(TO, DelayedStruct): TO = TO.TYPE - return lltype.Ptr(TO) + if isinstance(TO, lltype.ContainerType): + return lltype.Ptr(TO) + else: + if obj.quals & model.Q_CONST: + return lltype.Ptr(lltype.Array( + TO, hints={'nolength': True, 'render_as_const': True})) + else: + return rffi.CArrayPtr(TO) elif isinstance(obj, model.FunctionPtrType): if obj.ellipsis: raise NotImplementedError diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -129,3 +129,16 @@ Object = foo_h.definitions['Object'] assert isinstance(Object, lltype.Struct) hash(Object) + +def test_const(tmpdir): + cdef = """ + typedef struct { + const char * const foo; + } bar; + """ + (tmpdir / 'foo.h').write(cdef) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], + includes=['sys/types.h', 'foo.h']) + hdr = parse_source(cdef, eci=eci, configure_now=True) + assert hdr.definitions['bar'].c_foo == rffi.CONST_CCHARP != rffi.CCHARP From pypy.commits at gmail.com Tue Dec 27 10:48:45 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 07:48:45 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: INT vs INT_real mess Message-ID: <58628d5d.0f341c0a.add22.b32a@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89247:89ef63c39400 Date: 2016-12-18 03:42 +0000 http://bitbucket.org/pypy/pypy/changeset/89ef63c39400/ Log: INT vs INT_real mess diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -657,6 +657,7 @@ CNAME_TO_LLTYPE[name] = rfficache.platform.types[rname] add_inttypes() +CNAME_TO_LLTYPE['int'] = rffi.INT_real def cname_to_lltype(name): return CNAME_TO_LLTYPE[name] From pypy.commits at gmail.com Tue Dec 27 10:48:47 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 07:48:47 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Use cffi's copy of pycparser Message-ID: <58628d5f.ce841c0a.66e63.cd48@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89248:f895e2854833 Date: 2016-12-18 14:03 +0000 http://bitbucket.org/pypy/pypy/changeset/f895e2854833/ Log: Use cffi's copy of pycparser diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -1,7 +1,10 @@ from collections import OrderedDict from cffi import api, model from cffi.commontypes import COMMON_TYPES, resolve_common_type -import pycparser +try: + from cffi import _pycparser as pycparser +except ImportError: + import pycparser import weakref, re from rpython.rlib.rfile import FILEP from rpython.rtyper.lltypesystem import rffi, lltype From pypy.commits at gmail.com Tue Dec 27 14:04:21 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 11:04:21 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Rename api.h to api.object_h Message-ID: <5862bb35.12921c0a.c9ba.1b32@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89250:f3620ea3d56d Date: 2016-12-27 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/f3620ea3d56d/ Log: Rename api.h to api.object_h diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -601,7 +601,7 @@ % (cpyname, )) build_exported_objects() -h = parse_source(""" +object_h = parse_source(""" typedef ssize_t Py_ssize_t; #define PyObject_HEAD \ @@ -658,7 +658,7 @@ """) -Py_ssize_t = h.definitions['Py_ssize_t'] +Py_ssize_t = object_h.definitions['Py_ssize_t'] Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) size_t = rffi.ULONG ADDR = lltype.Signed @@ -666,18 +666,18 @@ # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = h.definitions['PyTypeObject'] +PyTypeObject = object_h.definitions['PyTypeObject'] PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = h.definitions['PyObject'] +PyObjectStruct = object_h.definitions['PyObject'] PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -PyVarObjectStruct = h.definitions['PyVarObject'] +PyVarObjectStruct = object_h.definitions['PyVarObject'] PyVarObject = lltype.Ptr(PyVarObjectStruct) -Py_buffer = h.definitions['Py_buffer'] +Py_buffer = object_h.definitions['Py_buffer'] Py_bufferP = lltype.Ptr(Py_buffer) diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -5,14 +5,14 @@ Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.modsupport import PyMethodDef -from pypy.module.cpyext.api import Py_bufferP, h +from pypy.module.cpyext.api import Py_bufferP, object_h P, FT, PyO = Ptr, FuncType, PyObject PyOPtr = Ptr(lltype.Array(PyO, hints={'nolength': True})) #freefunc = P(FT([rffi.VOIDP], Void)) -freefunc = h.definitions['freefunc'] +freefunc = object_h.definitions['freefunc'] destructor = P(FT([PyO], Void)) printfunc = P(FT([PyO, FILEP, rffi.INT_real], rffi.INT)) @@ -236,5 +236,3 @@ ("tp_del", destructor), #N ]) cpython_struct("PyTypeObject", PyTypeObjectFields, PyTypeObject) - - From pypy.commits at gmail.com Tue Dec 27 14:04:23 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 27 Dec 2016 11:04:23 -0800 (PST) Subject: [pypy-commit] pypy rffi-parser-2: Add .gettype() Message-ID: <5862bb37.e7b1c20a.678f4.7a28@mx.google.com> Author: Ronan Lamy Branch: rffi-parser-2 Changeset: r89251:1fc5cb8950e1 Date: 2016-12-27 20:03 +0100 http://bitbucket.org/pypy/pypy/changeset/1fc5cb8950e1/ Log: Add .gettype() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -658,27 +658,27 @@ """) -Py_ssize_t = object_h.definitions['Py_ssize_t'] -Py_ssize_tP = rffi.CArrayPtr(Py_ssize_t) +Py_ssize_t = object_h.gettype('Py_ssize_t') +Py_ssize_tP = object_h.gettype('Py_ssize_t *') size_t = rffi.ULONG ADDR = lltype.Signed # Note: as a special case, "PyObject" is the pointer type in RPython, # corresponding to "PyObject *" in C. We do that only for PyObject. # For example, "PyTypeObject" is the struct type even in RPython. -PyTypeObject = object_h.definitions['PyTypeObject'] -PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -PyObjectStruct = object_h.definitions['PyObject'] -PyObject = lltype.Ptr(PyObjectStruct) +PyTypeObject = object_h.gettype('PyTypeObject') +PyTypeObjectPtr = object_h.gettype('PyTypeObject *') +PyObjectStruct = object_h.gettype('PyObject') +PyObject = object_h.gettype('PyObject *') PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_pypy_link", lltype.Signed), ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) -PyVarObjectStruct = object_h.definitions['PyVarObject'] -PyVarObject = lltype.Ptr(PyVarObjectStruct) +PyVarObjectStruct = object_h.gettype('PyVarObject') +PyVarObject = object_h.gettype('PyVarObject *') -Py_buffer = object_h.definitions['Py_buffer'] -Py_bufferP = lltype.Ptr(Py_buffer) +Py_buffer = object_h.gettype('Py_buffer') +Py_bufferP = object_h.gettype('Py_buffer *') @specialize.memo() diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -284,8 +284,7 @@ return self.parse_type_and_quals(cdecl)[0] def parse_type_and_quals(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] - assert not macros + ast, _, _ = self._parse('void __dummy(\n%s\n);' % cdecl) exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) @@ -777,6 +776,13 @@ else: raise NotImplementedError + def gettype(self, cdecl): + obj = self.ctx.parse_type(cdecl) + result = self.convert_type(obj) + if isinstance(result, DelayedStruct): + result = result.TYPE + return result + def parse_source(source, includes=None, eci=None, configure_now=False): ctx = Parser() diff --git a/pypy/module/cpyext/test/test_cparser.py b/pypy/module/cpyext/test/test_cparser.py --- a/pypy/module/cpyext/test/test_cparser.py +++ b/pypy/module/cpyext/test/test_cparser.py @@ -99,7 +99,7 @@ include_dirs=[str(tmpdir)], includes=['sys/types.h', 'foo.h']) foo_h = parse_source(cdef, eci=eci) - Object = foo_h.definitions['Object'] + Object = foo_h.gettype('Object') assert isinstance(Object, lltype.ForwardReference) def test_recursive(tmpdir): @@ -142,3 +142,24 @@ includes=['sys/types.h', 'foo.h']) hdr = parse_source(cdef, eci=eci, configure_now=True) assert hdr.definitions['bar'].c_foo == rffi.CONST_CCHARP != rffi.CCHARP + +def test_gettype(tmpdir): + decl = """ + typedef ssize_t Py_ssize_t; + + #define PyObject_HEAD \ + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ + + typedef struct { + PyObject_HEAD + double ob_fval; + } TestFloatObject; + """ + hdr = tmpdir / 'header.h' + hdr.write(decl) + eci = ExternalCompilationInfo( + include_dirs=[str(tmpdir)], includes=['sys/types.h', 'header.h']) + res = parse_source(decl, eci=eci, configure_now=True) + assert res.gettype('Py_ssize_t') == rffi.SSIZE_T + assert res.gettype('TestFloatObject *').TO.c_ob_refcnt == rffi.SSIZE_T From pypy.commits at gmail.com Tue Dec 27 15:43:18 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 27 Dec 2016 12:43:18 -0800 (PST) Subject: [pypy-commit] pypy default: merge issue2444 into default Message-ID: <5862d266.8675c20a.dfef3.a6a8@mx.google.com> Author: Matti Picus Branch: Changeset: r89252:dd0eb4e98059 Date: 2016-12-27 19:46 +0200 http://bitbucket.org/pypy/pypy/changeset/dd0eb4e98059/ Log: merge issue2444 into default diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -637,18 +637,18 @@ ('len', Py_ssize_t), ('itemsize', Py_ssize_t), - ('readonly', lltype.Signed), - ('ndim', lltype.Signed), + ('readonly', rffi.INT_real), + ('ndim', rffi.INT_real), ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('suboffsets', Py_ssize_tP), ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - ('internal', rffi.VOIDP) - )) + ('internal', rffi.VOIDP), +)) Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( PyObjectFields, CANNOT_FAIL, - cpython_api, bootstrap_function, cpython_struct, build_type_checkers) + cpython_api, bootstrap_function, build_type_checkers) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref, Py_DecRef, make_typedescr from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.__builtin__.interp_classobj import W_ClassObject, W_InstanceObject diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,6 +5,14 @@ extern "C" { #endif +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; + diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,14 +1,59 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, make_ref, incref, from_ref + Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, PyObjectFields, cpython_struct, + bootstrap_function, Py_bufferP) +from pypy.module.cpyext.pyobject import (PyObject, make_ref, as_pyobj, incref, + decref, from_ref, make_typedescr) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen from pypy.objspace.std.memoryobject import W_MemoryView +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import -PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") +PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") - at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + +PyMemoryViewObjectStruct = lltype.ForwardReference() +PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) +PyMemoryViewObjectFields = PyObjectFields + \ + (("view", Py_buffer),) +cpython_struct( + "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, + level=2) + + at bootstrap_function +def init_memoryobject(space): + "Type description of PyDictObject" + make_typedescr(W_MemoryView.typedef, + basestruct=PyMemoryViewObject.TO, + attach=memory_attach, + dealloc=memory_dealloc, + #realize=memory_realize, + ) + +def memory_attach(space, py_obj, w_obj, w_userdata=None): + """ + Fills a newly allocated PyMemoryViewObject with the given W_MemoryView object. + """ + py_obj = rffi.cast(PyMemoryViewObject, py_obj) + py_obj.c_view.c_obj = rffi.cast(PyObject, 0) + +def memory_realize(space, py_obj): + """ + Creates the memory object in the interpreter + """ + raise oefmt(space.w_NotImplementedError, "cannot call this yet") + + at cpython_api([PyObject], lltype.Void, header=None) +def memory_dealloc(space, py_obj): + mem_obj = rffi.cast(PyMemoryViewObject, py_obj) + if mem_obj.c_view.c_obj: + decref(space, mem_obj.c_view.c_obj) + mem_obj.c_view.c_obj = rffi.cast(PyObject, 0) + _dealloc(space, py_obj) + + + at cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, error=-1) def PyObject_GetBuffer(space, w_obj, view, flags): """Export obj into a Py_buffer, view. These arguments must @@ -33,11 +78,40 @@ try: view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) except ValueError: - raise BufferError("could not create buffer from object") + if not space.isinstance_w(w_obj, space.w_str): + # XXX Python 3? + raise BufferError("could not create buffer from object") + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_obj), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) ret = fill_Py_buffer(space, buf, view) view.c_obj = make_ref(space, w_obj) return ret + at cpython_api([PyObject], Py_bufferP, error=CANNOT_FAIL) +def PyMemoryView_GET_BUFFER(space, w_obj): + """Return a pointer to the buffer-info structure wrapped by the given + object. The object must be a memoryview instance; this macro doesn't + check its type, you must do it yourself or you will risk crashes.""" + if not isinstance(w_obj, W_MemoryView): + return lltype.nullptr(Py_buffer) + py_memobj = rffi.cast(PyMemoryViewObject, as_pyobj(space, w_obj)) # no inc_ref + view = py_memobj.c_view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + fill_Py_buffer(space, w_obj.buf, view) + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + #view.c_obj = make_ref(space, w_obj) # NO - this creates a ref cycle! + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) + return view + def fill_Py_buffer(space, buf, view): # c_buf, c_obj have been filled in ndim = buf.getndim() @@ -111,7 +185,7 @@ sd *= dim return 1 - at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([Py_bufferP, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fort is 'C') or Fortran-style (fort is 'F') contiguous or either one @@ -132,7 +206,7 @@ def PyMemoryView_FromObject(space, w_obj): return space.call_method(space.builtin, "memoryview", w_obj) - at cpython_api([lltype.Ptr(Py_buffer)], PyObject) + at cpython_api([Py_bufferP], PyObject) def PyMemoryView_FromBuffer(space, view): """Create a memoryview object wrapping the given buffer-info structure view. The memoryview object then owns the buffer, which means you shouldn't @@ -149,29 +223,3 @@ # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BASE') - at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) -def PyMemoryView_GET_BUFFER(space, w_obj): - """Return a pointer to the buffer-info structure wrapped by the given - object. The object must be a memoryview instance; this macro doesn't - check its type, you must do it yourself or you will risk crashes.""" - view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - if not isinstance(w_obj, W_MemoryView): - return view - ndim = w_obj.buf.getndim() - if ndim >= Py_MAX_NDIMS: - # XXX warn? - return view - fill_Py_buffer(space, w_obj.buf, view) - try: - view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) - view.c_obj = make_ref(space, w_obj) - rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) - isstr = False - except ValueError: - w_s = w_obj.descr_tobytes(space) - view.c_obj = make_ref(space, w_s) - view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) - rffi.setintfield(view, 'c_readonly', 1) - isstr = True - return view - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen +from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl, Py_buffer, Py_bufferP) @@ -12,8 +13,8 @@ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, getbufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, Py_DecRef + readbufferproc, getbufferproc, releasebufferproc, ssizessizeobjargproc) +from pypy.module.cpyext.pyobject import make_ref, decref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State @@ -98,7 +99,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): @@ -109,7 +110,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_ternaryfunc(space, w_self, w_args, func): @@ -131,7 +132,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) arg3 = space.w_None if len(args_w) > 1: arg3 = args_w[1] @@ -321,11 +322,14 @@ # Similar to Py_buffer _immutable_ = True - def __init__(self, ptr, size, w_obj, format='B', shape=None, - strides=None, ndim=1, itemsize=1, readonly=True): + def __init__(self, space, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True, + releasebuffer=None): + self.space = space self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.pyobj = make_ref(space, w_obj) self.format = format if not shape: self.shape = [size] @@ -338,6 +342,27 @@ self.ndim = ndim self.itemsize = itemsize self.readonly = readonly + self.releasebufferproc = releasebuffer + + def releasebuffer(self): + if self.pyobj: + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return + if self.releasebufferproc: + func_target = rffi.cast(releasebufferproc, self.releasebufferproc) + with lltype.scoped_alloc(Py_buffer) as pybuf: + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = rffi.cast(rffi.INT_real, self.ndim) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + pybuf.c_format = rffi.str2charp(self.format) + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + self.releasebufferproc = rffi.cast(rffi.VOIDP, 0) def getlength(self): return self.size @@ -367,26 +392,61 @@ # absolutely no safety checks, what could go wrong? self.ptr[index] = char +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + decref(space, py_obj) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) + buf = CPyBuffer(space, ptr[0], size, w_self, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getwritebuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + decref(space, py_obj) + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(ptr[0], size, w_self, readonly=False)) + buf = CPyBuffer(space, ptr[0], size, w_self, readonly=False, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + decref(space, py_obj) with lltype.scoped_alloc(Py_buffer) as pybuf: _flags = 0 if space.len_w(w_args) > 0: @@ -407,10 +467,13 @@ format = rffi.charp2str(pybuf.c_format) else: format = 'B' - return space.newbuffer(CPyBuffer(ptr, size, w_self, format=format, + buf = CPyBuffer(space, ptr, size, w_self, format=format, ndim=ndim, shape=shape, strides=strides, itemsize=pybuf.c_itemsize, - readonly=widen(pybuf.c_readonly))) + readonly=widen(pybuf.c_readonly), + releasebuffer = releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -10,11 +10,7 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP -PyDateTime_Date = rffi.VOIDP -PyDateTime_DateTime = rffi.VOIDP -PyDateTime_Time = rffi.VOIDP wrapperbase = rffi.VOIDP FILE = rffi.VOIDP PyFileObject = rffi.VOIDP diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -185,19 +185,6 @@ (initproc)PyMyArray_init, /* tp_init */ }; -static PyObject* -test_buffer(PyObject* self, PyObject* args) -{ - Py_buffer* view = NULL; - PyObject* obj = PyTuple_GetItem(args, 0); - PyObject* memoryview = PyMemoryView_FromObject(obj); - if (memoryview == NULL) - return PyInt_FromLong(-1); - view = PyMemoryView_GET_BUFFER(memoryview); - Py_DECREF(memoryview); - return PyInt_FromLong(view->len); -} - /* Copied from numpy tests */ /* * Create python string from a FLAG and or the corresponding PyBuf flag @@ -308,7 +295,6 @@ static PyMethodDef buffer_functions[] = { - {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -64,3 +64,58 @@ b = buffer(a) assert module.roundtrip(b) == 'text' + def test_releasebuffer(self): + module = self.import_extension('foo', [ + ("create_test", "METH_NOARGS", + """ + PyObject *obj; + obj = PyObject_New(PyObject, (PyTypeObject*)type); + return obj; + """), + ("get_cnt", "METH_NOARGS", + 'return PyLong_FromLong(cnt);')], prologue=""" + static float test_data = 42.f; + static int cnt=0; + static PyHeapTypeObject * type=NULL; + + int getbuffer(PyObject *obj, Py_buffer *view, int flags) { + + cnt ++; + memset(view, 0, sizeof(Py_buffer)); + view->obj = obj; + view->ndim = 0; + view->buf = (void *) &test_data; + view->itemsize = sizeof(float); + view->len = 1; + view->strides = NULL; + view->shape = NULL; + view->format = "f"; + return 0; + } + + void releasebuffer(PyObject *obj, Py_buffer *view) { + cnt --; + } + """, more_init=""" + type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + + type->ht_type.tp_name = "Test"; + type->ht_type.tp_basicsize = sizeof(PyObject); + type->ht_name = PyString_FromString("Test"); + type->ht_type.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HEAPTYPE | Py_TPFLAGS_HAVE_NEWBUFFER; + type->ht_type.tp_flags &= ~Py_TPFLAGS_HAVE_GC; + + type->ht_type.tp_as_buffer = &type->as_buffer; + type->as_buffer.bf_getbuffer = getbuffer; + type->as_buffer.bf_releasebuffer = releasebuffer; + + if (PyType_Ready(&type->ht_type) < 0) INITERROR; + """, ) + import gc + assert module.get_cnt() == 0 + a = memoryview(module.create_test()) + assert module.get_cnt() == 1 + del a + gc.collect(); gc.collect(); gc.collect() + assert module.get_cnt() == 0 diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -20,16 +20,16 @@ def test_frombuffer(self, space, api): w_buf = space.newbuffer(StringBuffer("hello")) w_memoryview = api.PyMemoryView_FromObject(w_buf) - w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) - assert w_view.c_ndim == 1 - f = rffi.charp2str(w_view.c_format) + view = api.PyMemoryView_GET_BUFFER(w_memoryview) + assert view.c_ndim == 1 + f = rffi.charp2str(view.c_format) assert f == 'B' - assert w_view.c_shape[0] == 5 - assert w_view.c_strides[0] == 1 - assert w_view.c_len == 5 - o = rffi.charp2str(w_view.c_buf) + assert view.c_shape[0] == 5 + assert view.c_strides[0] == 1 + assert view.c_len == 5 + o = rffi.charp2str(view.c_buf) assert o == 'hello' - w_mv = api.PyMemoryView_FromBuffer(w_view) + w_mv = api.PyMemoryView_FromBuffer(view) for f in ('format', 'itemsize', 'ndim', 'readonly', 'shape', 'strides', 'suboffsets'): w_f = space.wrap(f) @@ -37,7 +37,7 @@ space.getattr(w_memoryview, w_f)) class AppTestBufferProtocol(AppTestCpythonExtensionBase): - def test_buffer_protocol(self): + def test_buffer_protocol_app(self): import struct module = self.import_module(name='buffer_test') arr = module.PyMyArray(10) @@ -48,8 +48,41 @@ s = y[3] assert len(s) == struct.calcsize('i') assert s == struct.pack('i', 3) - viewlen = module.test_buffer(arr) - assert viewlen == y.itemsize * len(y) + + def test_buffer_protocol_capi(self): + foo = self.import_extension('foo', [ + ("get_len", "METH_VARARGS", + """ + Py_buffer view; + PyObject* obj = PyTuple_GetItem(args, 0); + long ret, vlen; + memset(&view, 0, sizeof(Py_buffer)); + ret = PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO); + if (ret != 0) + return NULL; + vlen = view.len / view.itemsize; + PyBuffer_Release(&view); + return PyInt_FromLong(vlen); + """), + ("test_buffer", "METH_VARARGS", + """ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len / view->itemsize); + """)]) + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + ten = foo.get_len(arr) + assert ten == 10 + ten = foo.get_len('1234567890') + assert ten == 10 + ten = foo.test_buffer(arr) + assert ten == 10 @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_buffer_info(self): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -60,7 +60,7 @@ segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) -releasebufferproc = rffi.VOIDP +releasebufferproc = P(FT([PyO, Py_bufferP], Void)) PyGetSetDef = cpython_struct("PyGetSetDef", ( diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -17,6 +17,9 @@ assert isinstance(buf, Buffer) self.buf = buf + def _finalize_(self): + return self.buf.releasebuffer() + def buffer_w(self, space, flags): space.check_buf_flags(flags, self.buf.readonly) return self.buf diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -321,8 +321,9 @@ def newseqiter(self, w_obj): return W_SeqIterObject(w_obj) - def newbuffer(self, w_obj): - return W_Buffer(w_obj) + def newbuffer(self, obj): + ret = W_Buffer(obj) + return ret def newbytes(self, s): return W_BytesObject(s) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -6,7 +6,7 @@ class Buffer(object): """Abstract base class for buffers.""" - __slots__ = ['readonly'] + _attrs_ = ['readonly'] _immutable_ = True def getlength(self): @@ -75,8 +75,11 @@ def getstrides(self): return [1] + def releasebuffer(self): + pass + class StringBuffer(Buffer): - __slots__ = ['value'] + _attrs_ = ['readonly', 'value'] _immutable_ = True def __init__(self, value): @@ -107,7 +110,7 @@ class SubBuffer(Buffer): - __slots__ = ['buffer', 'offset', 'size'] + _attrs_ = ['buffer', 'offset', 'size', 'readonly'] _immutable_ = True def __init__(self, buffer, offset, size): From pypy.commits at gmail.com Tue Dec 27 15:43:20 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 27 Dec 2016 12:43:20 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <5862d268.876ec20a.e50e2.9759@mx.google.com> Author: Matti Picus Branch: Changeset: r89253:c39632394d10 Date: 2016-12-27 19:51 +0200 http://bitbucket.org/pypy/pypy/changeset/c39632394d10/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,3 +68,11 @@ .. branch: stdlib-2.7.13 Updated the implementation to match CPython 2.7.13 instead of 2.7.13. + +.. branch: issue2444 + +Fix ``PyObject_GetBuffer`` and ``PyMemoryView_GET_BUFFER``, which leaked +memory and held references. Add a finalizer to CPyBuffer, add a +PyMemoryViewObject with a PyBuffer attached so that the call to +``PyMemoryView_GET_BUFFER`` does not leak a PyBuffer-sized piece of memory. +Properly call ``bf_releasebuffer`` when not ``NULL``. From pypy.commits at gmail.com Tue Dec 27 16:33:50 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 27 Dec 2016 13:33:50 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: fix 1b0451031b2e, more tests needed Message-ID: <5862de3e.d32f1c0a.9e8ad.26a3@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89255:fce2d925e21f Date: 2016-12-27 23:08 +0200 http://bitbucket.org/pypy/pypy/changeset/fce2d925e21f/ Log: fix 1b0451031b2e, more tests needed diff --git a/pypy/module/cpyext/userslot.py b/pypy/module/cpyext/userslot.py --- a/pypy/module/cpyext/userslot.py +++ b/pypy/module/cpyext/userslot.py @@ -67,15 +67,15 @@ @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_subtract(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.sub(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_multiply(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.mul(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_divide(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.div(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_inplace_add(space, w_obj1, w_obj2): @@ -83,15 +83,15 @@ @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_inplace_subtract(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.sub(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_inplace_multiply(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.mul(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_nb_inplace_divide(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.div(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_sq_concat(space, w_obj1, w_obj2): @@ -103,7 +103,7 @@ @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_mp_subscript(space, w_obj1, w_obj2): - return space.add(w_obj1, w_obj2) + return space.getitem(w_obj1, w_obj2) @cpython_api([PyObject, PyObject], PyObject, header=None) def slot_tp_getattr(space, w_obj1, w_obj2): From pypy.commits at gmail.com Tue Dec 27 16:33:52 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 27 Dec 2016 13:33:52 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: merge default into branch Message-ID: <5862de40.0777c20a.c29ab.c934@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89256:d8febc18447e Date: 2016-12-27 23:09 +0200 http://bitbucket.org/pypy/pypy/changeset/d8febc18447e/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,3 +68,11 @@ .. branch: stdlib-2.7.13 Updated the implementation to match CPython 2.7.13 instead of 2.7.13. + +.. branch: issue2444 + +Fix ``PyObject_GetBuffer`` and ``PyMemoryView_GET_BUFFER``, which leaked +memory and held references. Add a finalizer to CPyBuffer, add a +PyMemoryViewObject with a PyBuffer attached so that the call to +``PyMemoryView_GET_BUFFER`` does not leak a PyBuffer-sized piece of memory. +Properly call ``bf_releasebuffer`` when not ``NULL``. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -637,18 +637,18 @@ ('len', Py_ssize_t), ('itemsize', Py_ssize_t), - ('readonly', lltype.Signed), - ('ndim', lltype.Signed), + ('readonly', rffi.INT_real), + ('ndim', rffi.INT_real), ('format', rffi.CCHARP), ('shape', Py_ssize_tP), ('strides', Py_ssize_tP), + ('suboffsets', Py_ssize_tP), ('_format', rffi.CFixedArray(rffi.UCHAR, Py_MAX_FMT)), ('_shape', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), ('_strides', rffi.CFixedArray(Py_ssize_t, Py_MAX_NDIMS)), - ('suboffsets', Py_ssize_tP), #('smalltable', rffi.CFixedArray(Py_ssize_t, 2)), - ('internal', rffi.VOIDP) - )) + ('internal', rffi.VOIDP), +)) Py_bufferP = lltype.Ptr(Py_buffer) @specialize.memo() diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( PyObjectFields, CANNOT_FAIL, - cpython_api, bootstrap_function, cpython_struct, build_type_checkers) + cpython_api, bootstrap_function, build_type_checkers) from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref, Py_DecRef, make_typedescr from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.__builtin__.interp_classobj import W_ClassObject, W_InstanceObject diff --git a/pypy/module/cpyext/include/memoryobject.h b/pypy/module/cpyext/include/memoryobject.h --- a/pypy/module/cpyext/include/memoryobject.h +++ b/pypy/module/cpyext/include/memoryobject.h @@ -5,6 +5,14 @@ extern "C" { #endif +/* The struct is declared here but it shouldn't + be considered public. Don't access those fields directly, + use the functions instead! */ +typedef struct { + PyObject_HEAD + Py_buffer view; +} PyMemoryViewObject; + diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,14 +1,59 @@ from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, - Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, Py_ssize_tP) -from pypy.module.cpyext.pyobject import PyObject, make_ref, incref, from_ref + Py_MAX_FMT, Py_MAX_NDIMS, build_type_checkers, + Py_ssize_tP, PyObjectFields, cpython_struct, + bootstrap_function, Py_bufferP) +from pypy.module.cpyext.pyobject import (PyObject, make_ref, as_pyobj, incref, + decref, from_ref, make_typedescr) from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import widen from pypy.objspace.std.memoryobject import W_MemoryView +from pypy.module.cpyext.object import _dealloc from pypy.module.cpyext.import_ import PyImport_Import -PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView", "w_memoryview") +PyMemoryView_Check, PyMemoryView_CheckExact = build_type_checkers("MemoryView") - at cpython_api([PyObject, lltype.Ptr(Py_buffer), rffi.INT_real], + +PyMemoryViewObjectStruct = lltype.ForwardReference() +PyMemoryViewObject = lltype.Ptr(PyMemoryViewObjectStruct) +PyMemoryViewObjectFields = PyObjectFields + \ + (("view", Py_buffer),) +cpython_struct( + "PyMemoryViewObject", PyMemoryViewObjectFields, PyMemoryViewObjectStruct, + level=2) + + at bootstrap_function +def init_memoryobject(space): + "Type description of PyDictObject" + make_typedescr(W_MemoryView.typedef, + basestruct=PyMemoryViewObject.TO, + attach=memory_attach, + dealloc=memory_dealloc, + #realize=memory_realize, + ) + +def memory_attach(space, py_obj, w_obj, w_userdata=None): + """ + Fills a newly allocated PyMemoryViewObject with the given W_MemoryView object. + """ + py_obj = rffi.cast(PyMemoryViewObject, py_obj) + py_obj.c_view.c_obj = rffi.cast(PyObject, 0) + +def memory_realize(space, py_obj): + """ + Creates the memory object in the interpreter + """ + raise oefmt(space.w_NotImplementedError, "cannot call this yet") + + at cpython_api([PyObject], lltype.Void, header=None) +def memory_dealloc(space, py_obj): + mem_obj = rffi.cast(PyMemoryViewObject, py_obj) + if mem_obj.c_view.c_obj: + decref(space, mem_obj.c_view.c_obj) + mem_obj.c_view.c_obj = rffi.cast(PyObject, 0) + _dealloc(space, py_obj) + + + at cpython_api([PyObject, Py_bufferP, rffi.INT_real], rffi.INT_real, error=-1) def PyObject_GetBuffer(space, w_obj, view, flags): """Export obj into a Py_buffer, view. These arguments must @@ -33,11 +78,40 @@ try: view.c_buf = rffi.cast(rffi.VOIDP, buf.get_raw_address()) except ValueError: - raise BufferError("could not create buffer from object") + if not space.isinstance_w(w_obj, space.w_str): + # XXX Python 3? + raise BufferError("could not create buffer from object") + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_obj), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) ret = fill_Py_buffer(space, buf, view) view.c_obj = make_ref(space, w_obj) return ret + at cpython_api([PyObject], Py_bufferP, error=CANNOT_FAIL) +def PyMemoryView_GET_BUFFER(space, w_obj): + """Return a pointer to the buffer-info structure wrapped by the given + object. The object must be a memoryview instance; this macro doesn't + check its type, you must do it yourself or you will risk crashes.""" + if not isinstance(w_obj, W_MemoryView): + return lltype.nullptr(Py_buffer) + py_memobj = rffi.cast(PyMemoryViewObject, as_pyobj(space, w_obj)) # no inc_ref + view = py_memobj.c_view + ndim = w_obj.buf.getndim() + if ndim >= Py_MAX_NDIMS: + # XXX warn? + return view + fill_Py_buffer(space, w_obj.buf, view) + try: + view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) + #view.c_obj = make_ref(space, w_obj) # NO - this creates a ref cycle! + rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) + except ValueError: + w_s = w_obj.descr_tobytes(space) + view.c_obj = make_ref(space, w_s) + view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) + rffi.setintfield(view, 'c_readonly', 1) + return view + def fill_Py_buffer(space, buf, view): # c_buf, c_obj have been filled in ndim = buf.getndim() @@ -111,7 +185,7 @@ sd *= dim return 1 - at cpython_api([lltype.Ptr(Py_buffer), lltype.Char], rffi.INT_real, error=CANNOT_FAIL) + at cpython_api([Py_bufferP, lltype.Char], rffi.INT_real, error=CANNOT_FAIL) def PyBuffer_IsContiguous(space, view, fort): """Return 1 if the memory defined by the view is C-style (fort is 'C') or Fortran-style (fort is 'F') contiguous or either one @@ -132,7 +206,7 @@ def PyMemoryView_FromObject(space, w_obj): return space.call_method(space.builtin, "memoryview", w_obj) - at cpython_api([lltype.Ptr(Py_buffer)], PyObject) + at cpython_api([Py_bufferP], PyObject) def PyMemoryView_FromBuffer(space, view): """Create a memoryview object wrapping the given buffer-info structure view. The memoryview object then owns the buffer, which means you shouldn't @@ -149,29 +223,3 @@ # XXX needed for numpy on py3k raise NotImplementedError('PyMemoryView_GET_BASE') - at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) -def PyMemoryView_GET_BUFFER(space, w_obj): - """Return a pointer to the buffer-info structure wrapped by the given - object. The object must be a memoryview instance; this macro doesn't - check its type, you must do it yourself or you will risk crashes.""" - view = lltype.malloc(Py_buffer, flavor='raw', zero=True) - if not isinstance(w_obj, W_MemoryView): - return view - ndim = w_obj.buf.getndim() - if ndim >= Py_MAX_NDIMS: - # XXX warn? - return view - fill_Py_buffer(space, w_obj.buf, view) - try: - view.c_buf = rffi.cast(rffi.VOIDP, w_obj.buf.get_raw_address()) - view.c_obj = make_ref(space, w_obj) - rffi.setintfield(view, 'c_readonly', w_obj.buf.readonly) - isstr = False - except ValueError: - w_s = w_obj.descr_tobytes(space) - view.c_obj = make_ref(space, w_s) - view.c_buf = rffi.cast(rffi.VOIDP, rffi.str2charp(space.str_w(w_s), track_allocation=False)) - rffi.setintfield(view, 'c_readonly', 1) - isstr = True - return view - diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.rarithmetic import widen +from rpython.rlib import rgc # Force registration of gc.collect from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl, Py_buffer, Py_bufferP) @@ -12,8 +13,8 @@ getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, - readbufferproc, getbufferproc, ssizessizeobjargproc) -from pypy.module.cpyext.pyobject import make_ref, Py_DecRef + readbufferproc, getbufferproc, releasebufferproc, ssizessizeobjargproc) +from pypy.module.cpyext.pyobject import make_ref, decref from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.memoryobject import fill_Py_buffer from pypy.module.cpyext.state import State @@ -99,7 +100,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): @@ -110,7 +111,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_ternaryfunc(space, w_self, w_args, func): @@ -132,7 +133,7 @@ if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and not space.issubtype_w(space.type(args_w[0]), space.type(w_self))): return space.w_NotImplemented - Py_DecRef(space, ref) + decref(space, ref) arg3 = space.w_None if len(args_w) > 1: arg3 = args_w[1] @@ -322,11 +323,14 @@ # Similar to Py_buffer _immutable_ = True - def __init__(self, ptr, size, w_obj, format='B', shape=None, - strides=None, ndim=1, itemsize=1, readonly=True): + def __init__(self, space, ptr, size, w_obj, format='B', shape=None, + strides=None, ndim=1, itemsize=1, readonly=True, + releasebuffer=None): + self.space = space self.ptr = ptr self.size = size self.w_obj = w_obj # kept alive + self.pyobj = make_ref(space, w_obj) self.format = format if not shape: self.shape = [size] @@ -339,6 +343,27 @@ self.ndim = ndim self.itemsize = itemsize self.readonly = readonly + self.releasebufferproc = releasebuffer + + def releasebuffer(self): + if self.pyobj: + decref(self.space, self.pyobj) + self.pyobj = lltype.nullptr(PyObject.TO) + else: + #do not call twice + return + if self.releasebufferproc: + func_target = rffi.cast(releasebufferproc, self.releasebufferproc) + with lltype.scoped_alloc(Py_buffer) as pybuf: + pybuf.c_buf = self.ptr + pybuf.c_len = self.size + pybuf.c_ndim = rffi.cast(rffi.INT_real, self.ndim) + for i in range(self.ndim): + pybuf.c_shape[i] = self.shape[i] + pybuf.c_strides[i] = self.strides[i] + pybuf.c_format = rffi.str2charp(self.format) + generic_cpy_call(self.space, func_target, self.pyobj, pybuf) + self.releasebufferproc = rffi.cast(rffi.VOIDP, 0) def getlength(self): return self.size @@ -368,26 +393,61 @@ # absolutely no safety checks, what could go wrong? self.ptr[index] = char +class FQ(rgc.FinalizerQueue): + Class = CPyBuffer + def finalizer_trigger(self): + while 1: + buf = self.next_dead() + if not buf: + break + buf.releasebuffer() + +fq = FQ() + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + decref(space, py_obj) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(ptr[0], size, w_self)) + buf = CPyBuffer(space, ptr[0], size, w_self, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getwritebuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + decref(space, py_obj) + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: index = rffi.cast(Py_ssize_t, 0) size = generic_cpy_call(space, func_target, w_self, index, ptr) if size < 0: space.fromcache(State).check_and_raise_exception(always=True) - return space.newbuffer(CPyBuffer(ptr[0], size, w_self, readonly=False)) + buf = CPyBuffer(space, ptr[0], size, w_self, readonly=False, + releasebuffer=releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def wrap_getbuffer(space, w_self, w_args, func): func_target = rffi.cast(getbufferproc, func) + py_obj = make_ref(space, w_self) + py_type = py_obj.c_ob_type + releasebuffer = rffi.cast(rffi.VOIDP, 0) + if py_type.c_tp_as_buffer: + releasebuffer = rffi.cast(rffi.VOIDP, py_type.c_tp_as_buffer.c_bf_releasebuffer) + decref(space, py_obj) with lltype.scoped_alloc(Py_buffer) as pybuf: _flags = 0 if space.len_w(w_args) > 0: @@ -408,10 +468,13 @@ format = rffi.charp2str(pybuf.c_format) else: format = 'B' - return space.newbuffer(CPyBuffer(ptr, size, w_self, format=format, + buf = CPyBuffer(space, ptr, size, w_self, format=format, ndim=ndim, shape=shape, strides=strides, itemsize=pybuf.c_itemsize, - readonly=widen(pybuf.c_readonly))) + readonly=widen(pybuf.c_readonly), + releasebuffer = releasebuffer) + fq.register_finalizer(buf) + return space.newbuffer(buf) def get_richcmp_func(OP_CONST): def inner(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py --- a/pypy/module/cpyext/stubs.py +++ b/pypy/module/cpyext/stubs.py @@ -10,11 +10,7 @@ PyMethodDef = rffi.VOIDP PyGetSetDef = rffi.VOIDP PyMemberDef = rffi.VOIDP -Py_buffer = rffi.VOIDP va_list = rffi.VOIDP -PyDateTime_Date = rffi.VOIDP -PyDateTime_DateTime = rffi.VOIDP -PyDateTime_Time = rffi.VOIDP wrapperbase = rffi.VOIDP FILE = rffi.VOIDP PyFileObject = rffi.VOIDP diff --git a/pypy/module/cpyext/test/buffer_test.c b/pypy/module/cpyext/test/buffer_test.c --- a/pypy/module/cpyext/test/buffer_test.c +++ b/pypy/module/cpyext/test/buffer_test.c @@ -185,19 +185,6 @@ (initproc)PyMyArray_init, /* tp_init */ }; -static PyObject* -test_buffer(PyObject* self, PyObject* args) -{ - Py_buffer* view = NULL; - PyObject* obj = PyTuple_GetItem(args, 0); - PyObject* memoryview = PyMemoryView_FromObject(obj); - if (memoryview == NULL) - return PyInt_FromLong(-1); - view = PyMemoryView_GET_BUFFER(memoryview); - Py_DECREF(memoryview); - return PyInt_FromLong(view->len); -} - /* Copied from numpy tests */ /* * Create python string from a FLAG and or the corresponding PyBuf flag @@ -308,7 +295,6 @@ static PyMethodDef buffer_functions[] = { - {"test_buffer", (PyCFunction)test_buffer, METH_VARARGS, NULL}, {"get_buffer_info", (PyCFunction)get_buffer_info, METH_VARARGS, NULL}, {NULL, NULL} /* Sentinel */ }; diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -64,3 +64,58 @@ b = buffer(a) assert module.roundtrip(b) == 'text' + def test_releasebuffer(self): + module = self.import_extension('foo', [ + ("create_test", "METH_NOARGS", + """ + PyObject *obj; + obj = PyObject_New(PyObject, (PyTypeObject*)type); + return obj; + """), + ("get_cnt", "METH_NOARGS", + 'return PyLong_FromLong(cnt);')], prologue=""" + static float test_data = 42.f; + static int cnt=0; + static PyHeapTypeObject * type=NULL; + + int getbuffer(PyObject *obj, Py_buffer *view, int flags) { + + cnt ++; + memset(view, 0, sizeof(Py_buffer)); + view->obj = obj; + view->ndim = 0; + view->buf = (void *) &test_data; + view->itemsize = sizeof(float); + view->len = 1; + view->strides = NULL; + view->shape = NULL; + view->format = "f"; + return 0; + } + + void releasebuffer(PyObject *obj, Py_buffer *view) { + cnt --; + } + """, more_init=""" + type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); + + type->ht_type.tp_name = "Test"; + type->ht_type.tp_basicsize = sizeof(PyObject); + type->ht_name = PyString_FromString("Test"); + type->ht_type.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HEAPTYPE | Py_TPFLAGS_HAVE_NEWBUFFER; + type->ht_type.tp_flags &= ~Py_TPFLAGS_HAVE_GC; + + type->ht_type.tp_as_buffer = &type->as_buffer; + type->as_buffer.bf_getbuffer = getbuffer; + type->as_buffer.bf_releasebuffer = releasebuffer; + + if (PyType_Ready(&type->ht_type) < 0) INITERROR; + """, ) + import gc + assert module.get_cnt() == 0 + a = memoryview(module.create_test()) + assert module.get_cnt() == 1 + del a + gc.collect(); gc.collect(); gc.collect() + assert module.get_cnt() == 0 diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -20,16 +20,16 @@ def test_frombuffer(self, space, api): w_buf = space.newbuffer(StringBuffer("hello")) w_memoryview = api.PyMemoryView_FromObject(w_buf) - w_view = api.PyMemoryView_GET_BUFFER(w_memoryview) - assert w_view.c_ndim == 1 - f = rffi.charp2str(w_view.c_format) + view = api.PyMemoryView_GET_BUFFER(w_memoryview) + assert view.c_ndim == 1 + f = rffi.charp2str(view.c_format) assert f == 'B' - assert w_view.c_shape[0] == 5 - assert w_view.c_strides[0] == 1 - assert w_view.c_len == 5 - o = rffi.charp2str(w_view.c_buf) + assert view.c_shape[0] == 5 + assert view.c_strides[0] == 1 + assert view.c_len == 5 + o = rffi.charp2str(view.c_buf) assert o == 'hello' - w_mv = api.PyMemoryView_FromBuffer(w_view) + w_mv = api.PyMemoryView_FromBuffer(view) for f in ('format', 'itemsize', 'ndim', 'readonly', 'shape', 'strides', 'suboffsets'): w_f = space.wrap(f) @@ -37,7 +37,7 @@ space.getattr(w_memoryview, w_f)) class AppTestBufferProtocol(AppTestCpythonExtensionBase): - def test_buffer_protocol(self): + def test_buffer_protocol_app(self): import struct module = self.import_module(name='buffer_test') arr = module.PyMyArray(10) @@ -48,8 +48,41 @@ s = y[3] assert len(s) == struct.calcsize('i') assert s == struct.pack('i', 3) - viewlen = module.test_buffer(arr) - assert viewlen == y.itemsize * len(y) + + def test_buffer_protocol_capi(self): + foo = self.import_extension('foo', [ + ("get_len", "METH_VARARGS", + """ + Py_buffer view; + PyObject* obj = PyTuple_GetItem(args, 0); + long ret, vlen; + memset(&view, 0, sizeof(Py_buffer)); + ret = PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO); + if (ret != 0) + return NULL; + vlen = view.len / view.itemsize; + PyBuffer_Release(&view); + return PyInt_FromLong(vlen); + """), + ("test_buffer", "METH_VARARGS", + """ + Py_buffer* view = NULL; + PyObject* obj = PyTuple_GetItem(args, 0); + PyObject* memoryview = PyMemoryView_FromObject(obj); + if (memoryview == NULL) + return PyInt_FromLong(-1); + view = PyMemoryView_GET_BUFFER(memoryview); + Py_DECREF(memoryview); + return PyInt_FromLong(view->len / view->itemsize); + """)]) + module = self.import_module(name='buffer_test') + arr = module.PyMyArray(10) + ten = foo.get_len(arr) + assert ten == 10 + ten = foo.get_len('1234567890') + assert ten == 10 + ten = foo.test_buffer(arr) + assert ten == 10 @pytest.mark.skipif(only_pypy, reason='pypy only test') def test_buffer_info(self): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -60,7 +60,7 @@ segcountproc = P(FT([PyO, Py_ssize_tP], Py_ssize_t)) charbufferproc = P(FT([PyO, Py_ssize_t, rffi.CCHARPP], Py_ssize_t)) getbufferproc = P(FT([PyO, Py_bufferP, rffi.INT_real], rffi.INT_real)) -releasebufferproc = rffi.VOIDP +releasebufferproc = P(FT([PyO, Py_bufferP], Void)) PyGetSetDef = cpython_struct("PyGetSetDef", ( diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -17,6 +17,9 @@ assert isinstance(buf, Buffer) self.buf = buf + def _finalize_(self): + return self.buf.releasebuffer() + def buffer_w(self, space, flags): space.check_buf_flags(flags, self.buf.readonly) return self.buf diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -321,8 +321,9 @@ def newseqiter(self, w_obj): return W_SeqIterObject(w_obj) - def newbuffer(self, w_obj): - return W_Buffer(w_obj) + def newbuffer(self, obj): + ret = W_Buffer(obj) + return ret def newbytes(self, s): return W_BytesObject(s) diff --git a/rpython/rlib/buffer.py b/rpython/rlib/buffer.py --- a/rpython/rlib/buffer.py +++ b/rpython/rlib/buffer.py @@ -6,7 +6,7 @@ class Buffer(object): """Abstract base class for buffers.""" - __slots__ = ['readonly'] + _attrs_ = ['readonly'] _immutable_ = True def getlength(self): @@ -75,8 +75,11 @@ def getstrides(self): return [1] + def releasebuffer(self): + pass + class StringBuffer(Buffer): - __slots__ = ['value'] + _attrs_ = ['readonly', 'value'] _immutable_ = True def __init__(self, value): @@ -107,7 +110,7 @@ class SubBuffer(Buffer): - __slots__ = ['buffer', 'offset', 'size'] + _attrs_ = ['buffer', 'offset', 'size', 'readonly'] _immutable_ = True def __init__(self, buffer, offset, size): From pypy.commits at gmail.com Tue Dec 27 16:33:48 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 27 Dec 2016 13:33:48 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: merge default into branch Message-ID: <5862de3c.aaa3c20a.75e9a.bb44@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89254:6a94394133ae Date: 2016-12-27 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/6a94394133ae/ Log: merge default into branch diff too long, truncating to 2000 out of 46043 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -77,3 +77,5 @@ ^.hypothesis/ ^release/ ^rpython/_cache$ + +pypy/module/cppyy/.+/*\.pcm diff --git a/ctypes_configure/__init__.py b/ctypes_configure/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py deleted file mode 100644 --- a/ctypes_configure/cbuild.py +++ /dev/null @@ -1,456 +0,0 @@ - -import os, sys, inspect, re, imp, py -from ctypes_configure import stdoutcapture -import distutils - -debug = 0 - -configdir = py.path.local.make_numbered_dir(prefix='ctypes_configure-') - -class ExternalCompilationInfo(object): - - _ATTRIBUTES = ['pre_include_lines', 'includes', 'include_dirs', - 'post_include_lines', 'libraries', 'library_dirs', - 'separate_module_sources', 'separate_module_files'] - _AVOID_DUPLICATES = ['separate_module_files', 'libraries', 'includes', - 'include_dirs', 'library_dirs', 'separate_module_sources'] - - def __init__(self, - pre_include_lines = [], - includes = [], - include_dirs = [], - post_include_lines = [], - libraries = [], - library_dirs = [], - separate_module_sources = [], - separate_module_files = []): - """ - pre_include_lines: list of lines that should be put at the top - of the generated .c files, before any #include. They shouldn't - contain an #include themselves. - - includes: list of .h file names to be #include'd from the - generated .c files. - - include_dirs: list of dir names that is passed to the C compiler - - post_include_lines: list of lines that should be put at the top - of the generated .c files, after the #includes. - - libraries: list of library names that is passed to the linker - - library_dirs: list of dir names that is passed to the linker - - separate_module_sources: list of multiline strings that are - each written to a .c file and compiled separately and linked - later on. (If function prototypes are needed for other .c files - to access this, they can be put in post_include_lines.) - - separate_module_files: list of .c file names that are compiled - separately and linked later on. (If an .h file is needed for - other .c files to access this, it can be put in includes.) - """ - for name in self._ATTRIBUTES: - value = locals()[name] - assert isinstance(value, (list, tuple)) - setattr(self, name, tuple(value)) - - def _value(self): - return tuple([getattr(self, x) for x in self._ATTRIBUTES]) - - def __hash__(self): - return hash(self._value()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and \ - self._value() == other._value() - - def __ne__(self, other): - return not self == other - - def __repr__(self): - info = [] - for attr in self._ATTRIBUTES: - val = getattr(self, attr) - info.append("%s=%s" % (attr, repr(val))) - return "" % ", ".join(info) - - def merge(self, *others): - others = list(others) - attrs = {} - for name in self._ATTRIBUTES: - if name not in self._AVOID_DUPLICATES: - s = [] - for i in [self] + others: - s += getattr(i, name) - attrs[name] = s - else: - s = set() - attr = [] - for one in [self] + others: - for elem in getattr(one, name): - if elem not in s: - s.add(elem) - attr.append(elem) - attrs[name] = attr - return ExternalCompilationInfo(**attrs) - - def write_c_header(self, fileobj): - for line in self.pre_include_lines: - print >> fileobj, line - for path in self.includes: - print >> fileobj, '#include <%s>' % (path,) - for line in self.post_include_lines: - print >> fileobj, line - - def _copy_attributes(self): - d = {} - for attr in self._ATTRIBUTES: - d[attr] = getattr(self, attr) - return d - - def convert_sources_to_files(self, cache_dir=None, being_main=False): - if not self.separate_module_sources: - return self - if cache_dir is None: - cache_dir = configdir.join('module_cache').ensure(dir=1) - num = 0 - files = [] - for source in self.separate_module_sources: - while 1: - filename = cache_dir.join('module_%d.c' % num) - num += 1 - if not filename.check(): - break - f = filename.open("w") - if being_main: - f.write("#define PYPY_NOT_MAIN_FILE\n") - self.write_c_header(f) - source = str(source) - f.write(source) - if not source.endswith('\n'): - f.write('\n') - f.close() - files.append(str(filename)) - d = self._copy_attributes() - d['separate_module_sources'] = () - d['separate_module_files'] += tuple(files) - return ExternalCompilationInfo(**d) - - def compile_shared_lib(self): - self = self.convert_sources_to_files() - if not self.separate_module_files: - return self - lib = compile_c_module([], 'externmod', self) - d = self._copy_attributes() - d['libraries'] += (lib,) - d['separate_module_files'] = () - d['separate_module_sources'] = () - return ExternalCompilationInfo(**d) - -if sys.platform == 'win32': - so_ext = '.dll' -else: - so_ext = '.so' - -def compiler_command(): - # e.g. for tcc, you might set this to - # "tcc -shared -o %s.so %s.c" - return os.getenv('PYPY_CC') - -def enable_fast_compilation(): - if sys.platform == 'win32': - dash = '/' - else: - dash = '-' - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt: - opt = re.sub('%sO\d+' % dash, '%sO0' % dash, opt) - else: - opt = '%sO0' % dash - gcv['OPT'] = opt - -def ensure_correct_math(): - if sys.platform != 'win32': - return # so far - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt and '/Op' not in opt: - opt += '/Op' - gcv['OPT'] = opt - - -def try_compile(c_files, eci): - try: - build_executable(c_files, eci) - result = True - except (distutils.errors.CompileError, - distutils.errors.LinkError): - result = False - return result - -def compile_c_module(cfiles, modbasename, eci, tmpdir=None): - #try: - # from distutils.log import set_threshold - # set_threshold(10000) - #except ImportError: - # print "ERROR IMPORTING" - # pass - cfiles = [py.path.local(f) for f in cfiles] - if tmpdir is None: - tmpdir = configdir.join("module_cache").ensure(dir=1) - num = 0 - cfiles += eci.separate_module_files - include_dirs = list(eci.include_dirs) - library_dirs = list(eci.library_dirs) - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in include_dirs and \ - os.path.exists(s + 'include'): - include_dirs.append(s + 'include') - if s + 'lib' not in library_dirs and \ - os.path.exists(s + 'lib'): - library_dirs.append(s + 'lib') - - num = 0 - modname = modbasename - while 1: - if not tmpdir.join(modname + so_ext).check(): - break - num += 1 - modname = '%s_%d' % (modbasename, num) - - lastdir = tmpdir.chdir() - libraries = eci.libraries - ensure_correct_math() - try: - if debug: print "modname", modname - c = stdoutcapture.Capture(mixed_out_err = True) - try: - try: - if compiler_command(): - # GCC-ish options only - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - cmd = compiler_command().replace('%s', - str(tmpdir.join(modname))) - for dir in [gcv['INCLUDEPY']] + list(include_dirs): - cmd += ' -I%s' % dir - for dir in library_dirs: - cmd += ' -L%s' % dir - os.system(cmd) - else: - from distutils.dist import Distribution - from distutils.extension import Extension - from distutils.ccompiler import get_default_compiler - saved_environ = os.environ.items() - try: - # distutils.core.setup() is really meant for end-user - # interactive usage, because it eats most exceptions and - # turn them into SystemExits. Instead, we directly - # instantiate a Distribution, which also allows us to - # ignore unwanted features like config files. - extra_compile_args = [] - # ensure correct math on windows - if sys.platform == 'win32': - extra_compile_args.append('/Op') # get extra precision - if get_default_compiler() == 'unix': - old_version = False - try: - g = os.popen('gcc --version', 'r') - verinfo = g.read() - g.close() - except (OSError, IOError): - pass - else: - old_version = verinfo.startswith('2') - if not old_version: - extra_compile_args.extend(["-Wno-unused-label", - "-Wno-unused-variable"]) - attrs = { - 'name': "testmodule", - 'ext_modules': [ - Extension(modname, [str(cfile) for cfile in cfiles], - include_dirs=include_dirs, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - libraries=list(libraries),) - ], - 'script_name': 'setup.py', - 'script_args': ['-q', 'build_ext', '--inplace', '--force'], - } - dist = Distribution(attrs) - if not dist.parse_command_line(): - raise ValueError, "distutils cmdline parse error" - dist.run_commands() - finally: - for key, value in saved_environ: - if os.environ.get(key) != value: - os.environ[key] = value - finally: - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = open("%s.errors" % modname, "w") - fdump.write(data) - fdump.close() - # XXX do we need to do some check on fout/ferr? - # XXX not a nice way to import a module - except: - print >>sys.stderr, data - raise - finally: - lastdir.chdir() - return str(tmpdir.join(modname) + so_ext) - -def make_module_from_c(cfile, eci): - cfile = py.path.local(cfile) - modname = cfile.purebasename - compile_c_module([cfile], modname, eci) - return import_module_from_directory(cfile.dirpath(), modname) - -def import_module_from_directory(dir, modname): - file, pathname, description = imp.find_module(modname, [str(dir)]) - try: - mod = imp.load_module(modname, file, pathname, description) - finally: - if file: - file.close() - return mod - - -def log_spawned_cmd(spawn): - def spawn_and_log(cmd, *args, **kwds): - if debug: - print ' '.join(cmd) - return spawn(cmd, *args, **kwds) - return spawn_and_log - - -class ProfOpt(object): - #XXX assuming gcc style flags for now - name = "profopt" - - def __init__(self, compiler): - self.compiler = compiler - - def first(self): - self.build('-fprofile-generate') - - def probe(self, exe, args): - # 'args' is a single string typically containing spaces - # and quotes, which represents several arguments. - os.system("'%s' %s" % (exe, args)) - - def after(self): - self.build('-fprofile-use') - - def build(self, option): - compiler = self.compiler - compiler.compile_extra.append(option) - compiler.link_extra.append(option) - try: - compiler._build() - finally: - compiler.compile_extra.pop() - compiler.link_extra.pop() - -class CCompiler: - - def __init__(self, cfilenames, eci, outputfilename=None, - compiler_exe=None, profbased=None): - self.cfilenames = cfilenames - ext = '' - self.compile_extra = [] - self.link_extra = [] - self.libraries = list(eci.libraries) - self.include_dirs = list(eci.include_dirs) - self.library_dirs = list(eci.library_dirs) - self.compiler_exe = compiler_exe - self.profbased = profbased - if not sys.platform in ('win32', 'darwin', 'cygwin'): # xxx - if 'm' not in self.libraries: - self.libraries.append('m') - if 'pthread' not in self.libraries: - self.libraries.append('pthread') - self.compile_extra += ['-O3', '-fomit-frame-pointer', '-pthread'] - self.link_extra += ['-pthread'] - if sys.platform == 'win32': - self.link_extra += ['/DEBUG'] # generate .pdb file - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in self.include_dirs and \ - os.path.exists(s + 'include'): - self.include_dirs.append(s + 'include') - if s + 'lib' not in self.library_dirs and \ - os.path.exists(s + 'lib'): - self.library_dirs.append(s + 'lib') - self.compile_extra += ['-O3', '-fomit-frame-pointer'] - - if outputfilename is None: - self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) - else: - self.outputfilename = py.path.local(outputfilename) - - def build(self, noerr=False): - basename = self.outputfilename.new(ext='') - data = '' - try: - saved_environ = os.environ.copy() - c = stdoutcapture.Capture(mixed_out_err = True) - try: - self._build() - finally: - # workaround for a distutils bugs where some env vars can - # become longer and longer every time it is used - for key, value in saved_environ.items(): - if os.environ.get(key) != value: - os.environ[key] = value - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = basename.new(ext='errors').open("w") - fdump.write(data) - fdump.close() - except: - if not noerr: - print >>sys.stderr, data - raise - - def _build(self): - from distutils.ccompiler import new_compiler - compiler = new_compiler(force=1) - if self.compiler_exe is not None: - for c in '''compiler compiler_so compiler_cxx - linker_exe linker_so'''.split(): - compiler.executables[c][0] = self.compiler_exe - compiler.spawn = log_spawned_cmd(compiler.spawn) - objects = [] - for cfile in self.cfilenames: - cfile = py.path.local(cfile) - old = cfile.dirpath().chdir() - try: - res = compiler.compile([cfile.basename], - include_dirs=self.include_dirs, - extra_preargs=self.compile_extra) - assert len(res) == 1 - cobjfile = py.path.local(res[0]) - assert cobjfile.check() - objects.append(str(cobjfile)) - finally: - old.chdir() - compiler.link_executable(objects, str(self.outputfilename), - libraries=self.libraries, - extra_preargs=self.link_extra, - library_dirs=self.library_dirs) - -def build_executable(*args, **kwds): - noerr = kwds.pop('noerr', False) - compiler = CCompiler(*args, **kwds) - compiler.build(noerr=noerr) - return str(compiler.outputfilename) diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py deleted file mode 100755 --- a/ctypes_configure/configure.py +++ /dev/null @@ -1,621 +0,0 @@ -#! /usr/bin/env python - -import os, py, sys -import ctypes -from ctypes_configure.cbuild import build_executable, configdir, try_compile -from ctypes_configure.cbuild import ExternalCompilationInfo -import distutils - -# ____________________________________________________________ -# -# Helpers for simple cases - -def eci_from_header(c_header_source): - return ExternalCompilationInfo( - pre_include_lines=c_header_source.split("\n") - ) - - -def getstruct(name, c_header_source, interesting_fields): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - STRUCT = Struct(name, interesting_fields) - return configure(CConfig)['STRUCT'] - -def getsimpletype(name, c_header_source, ctype_hint=ctypes.c_int): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - TYPE = SimpleType(name, ctype_hint) - return configure(CConfig)['TYPE'] - -def getconstantinteger(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - CONST = ConstantInteger(name) - return configure(CConfig)['CONST'] - -def getdefined(macro, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - DEFINED = Defined(macro) - return configure(CConfig)['DEFINED'] - -def has(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - HAS = Has(name) - return configure(CConfig)['HAS'] - -def check_eci(eci): - """Check if a given ExternalCompilationInfo compiles and links.""" - class CConfig: - _compilation_info_ = eci - WORKS = Works() - return configure(CConfig)['WORKS'] - -def sizeof(name, eci, **kwds): - class CConfig: - _compilation_info_ = eci - SIZE = SizeOf(name) - for k, v in kwds.items(): - setattr(CConfig, k, v) - return configure(CConfig)['SIZE'] - -def memory_alignment(): - """Return the alignment (in bytes) of memory allocations. - This is enough to make sure a structure with pointers and 'double' - fields is properly aligned.""" - global _memory_alignment - if _memory_alignment is None: - S = getstruct('struct memory_alignment_test', """ - struct memory_alignment_test { - double d; - void* p; - }; - """, []) - result = ctypes.alignment(S) - assert result & (result-1) == 0, "not a power of two??" - _memory_alignment = result - return _memory_alignment -_memory_alignment = None - -# ____________________________________________________________ -# -# General interface - -class ConfigResult: - def __init__(self, CConfig, info, entries): - self.CConfig = CConfig - self.result = {} - self.info = info - self.entries = entries - - def get_entry_result(self, entry): - try: - return self.result[entry] - except KeyError: - pass - name = self.entries[entry] - info = self.info[name] - self.result[entry] = entry.build_result(info, self) - - def get_result(self): - return dict([(name, self.result[entry]) - for entry, name in self.entries.iteritems()]) - - -class _CWriter(object): - """ A simple class which aggregates config parts - """ - def __init__(self, CConfig): - self.path = uniquefilepath() - self.f = self.path.open("w") - self.config = CConfig - - def write_header(self): - f = self.f - CConfig = self.config - CConfig._compilation_info_.write_c_header(f) - print >> f, C_HEADER - print >> f - - def write_entry(self, key, entry): - f = self.f - print >> f, 'void dump_section_%s(void) {' % (key,) - for line in entry.prepare_code(): - if line and line[0] != '#': - line = '\t' + line - print >> f, line - print >> f, '}' - print >> f - - def write_entry_main(self, key): - print >> self.f, '\tprintf("-+- %s\\n");' % (key,) - print >> self.f, '\tdump_section_%s();' % (key,) - print >> self.f, '\tprintf("---\\n");' - - def start_main(self): - print >> self.f, 'int main(int argc, char *argv[]) {' - - def close(self): - f = self.f - print >> f, '\treturn 0;' - print >> f, '}' - f.close() - - def ask_gcc(self, question): - self.start_main() - self.f.write(question + "\n") - self.close() - eci = self.config._compilation_info_ - return try_compile([self.path], eci) - - -def configure(CConfig, noerr=False): - """Examine the local system by running the C compiler. - The CConfig class contains CConfigEntry attribues that describe - what should be inspected; configure() returns a dict mapping - names to the results. - """ - for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_', - '_include_dirs_', '_header_']: - assert not hasattr(CConfig, attr), "Found legacy attribut %s on CConfig" % (attr,) - entries = [] - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigEntry): - entries.append((key, value)) - - if entries: # can be empty if there are only CConfigSingleEntries - writer = _CWriter(CConfig) - writer.write_header() - for key, entry in entries: - writer.write_entry(key, entry) - - f = writer.f - writer.start_main() - for key, entry in entries: - writer.write_entry_main(key) - writer.close() - - eci = CConfig._compilation_info_ - infolist = list(run_example_code(writer.path, eci, noerr=noerr)) - assert len(infolist) == len(entries) - - resultinfo = {} - resultentries = {} - for info, (key, entry) in zip(infolist, entries): - resultinfo[key] = info - resultentries[entry] = key - - result = ConfigResult(CConfig, resultinfo, resultentries) - for name, entry in entries: - result.get_entry_result(entry) - res = result.get_result() - else: - res = {} - - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigSingleEntry): - writer = _CWriter(CConfig) - writer.write_header() - res[key] = value.question(writer.ask_gcc) - return res - -# ____________________________________________________________ - - -class CConfigEntry(object): - "Abstract base class." - -class Struct(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined structure. - """ - def __init__(self, name, interesting_fields, ifdef=None): - self.name = name - self.interesting_fields = interesting_fields - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield 'typedef struct {' - yield ' char c;' - yield ' ctypesplatcheck_t s;' - yield '} ctypesplatcheck2_t;' - yield '' - yield 'ctypesplatcheck_t s;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("align", offsetof(ctypesplatcheck2_t, s));' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - for fieldname, fieldtype in self.interesting_fields: - yield 'dump("fldofs %s", offsetof(ctypesplatcheck_t, %s));'%( - fieldname, fieldname) - yield 'dump("fldsize %s", sizeof(s.%s));' % ( - fieldname, fieldname) - if fieldtype in integer_class: - yield 's.%s = 0; s.%s = ~s.%s;' % (fieldname, - fieldname, - fieldname) - yield 'dump("fldunsigned %s", s.%s > 0);' % (fieldname, - fieldname) - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None: - if not info['defined']: - return None - alignment = 1 - layout = [None] * info['size'] - for fieldname, fieldtype in self.interesting_fields: - if isinstance(fieldtype, Struct): - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - c_fieldtype = config_result.get_entry_result(fieldtype) - layout_addfield(layout, offset, c_fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(c_fieldtype)) - else: - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != size_and_sign(fieldtype): - fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) - layout_addfield(layout, offset, fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(fieldtype)) - - # try to enforce the same alignment as the one of the original - # structure - if alignment < info['align']: - choices = [ctype for ctype in alignment_types - if ctype_alignment(ctype) == info['align']] - assert choices, "unsupported alignment %d" % (info['align'],) - choices = [(ctypes.sizeof(ctype), i, ctype) - for i, ctype in enumerate(choices)] - csize, _, ctype = min(choices) - for i in range(0, info['size'] - csize + 1, info['align']): - if layout[i:i+csize] == [None] * csize: - layout_addfield(layout, i, ctype, '_alignment') - break - else: - raise AssertionError("unenforceable alignment %d" % ( - info['align'],)) - - n = 0 - for i, cell in enumerate(layout): - if cell is not None: - continue - layout_addfield(layout, i, ctypes.c_char, '_pad%d' % (n,)) - n += 1 - - # build the ctypes Structure - seen = {} - fields = [] - for cell in layout: - if cell in seen: - continue - fields.append((cell.name, cell.ctype)) - seen[cell] = True - - class S(ctypes.Structure): - _fields_ = fields - name = self.name - if name.startswith('struct '): - name = name[7:] - S.__name__ = name - return S - -class SimpleType(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined simple numeric type. - """ - def __init__(self, name, ctype_hint=ctypes.c_int, ifdef=None): - self.name = name - self.ctype_hint = ctype_hint - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield '' - yield 'ctypesplatcheck_t x;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - if self.ctype_hint in integer_class: - yield 'x = 0; x = ~x;' - yield 'dump("unsigned", x > 0);' - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None and not info['defined']: - return None - size = info['size'] - sign = info.get('unsigned', False) - ctype = self.ctype_hint - if (size, sign) != size_and_sign(ctype): - ctype = fixup_ctype(ctype, self.name, (size, sign)) - return ctype - -class ConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'if ((%s) < 0) {' % (self.name,) - yield ' long long x = (long long)(%s);' % (self.name,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.name,) - yield ' printf("value: %llu\\n", x);' - yield '}' - - def build_result(self, info, config_result): - return info['value'] - -class DefinedConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. If not #defined the value will be None. - """ - def __init__(self, macro): - self.name = self.macro = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'dump("defined", 1);' - yield 'if ((%s) < 0) {' % (self.macro,) - yield ' long long x = (long long)(%s);' % (self.macro,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.macro,) - yield ' printf("value: %llu\\n", x);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - return info['value'] - return None - - -class DefinedConstantString(CConfigEntry): - """ - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'int i;' - yield 'char *p = %s;' % self.macro - yield 'dump("defined", 1);' - yield 'for (i = 0; p[i] != 0; i++ ) {' - yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - string = '' - d = 0 - while info.has_key('value_%d' % d): - string += chr(info['value_%d' % d]) - d += 1 - return string - return None - - -class Defined(CConfigEntry): - """A boolean, corresponding to an #ifdef. - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % (self.macro,) - yield 'dump("defined", 1);' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - return bool(info['defined']) - -class CConfigSingleEntry(object): - """ An abstract class of type which requires - gcc succeeding/failing instead of only asking - """ - pass - -class Has(CConfigSingleEntry): - def __init__(self, name): - self.name = name - - def question(self, ask_gcc): - return ask_gcc(self.name + ';') - -class Works(CConfigSingleEntry): - def question(self, ask_gcc): - return ask_gcc("") - -class SizeOf(CConfigEntry): - """An entry in a CConfig class that stands for - some external opaque type - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'dump("size", sizeof(%s));' % self.name - - def build_result(self, info, config_result): - return info['size'] - -# ____________________________________________________________ -# -# internal helpers - -def ctype_alignment(c_type): - if issubclass(c_type, ctypes.Structure): - return max([ctype_alignment(fld_type) - for fld_name, fld_type in c_type._fields_]) - - return ctypes.alignment(c_type) - -def uniquefilepath(LAST=[0]): - i = LAST[0] - LAST[0] += 1 - return configdir.join('ctypesplatcheck_%d.c' % i) - -alignment_types = [ - ctypes.c_short, - ctypes.c_int, - ctypes.c_long, - ctypes.c_float, - ctypes.c_double, - ctypes.c_char_p, - ctypes.c_void_p, - ctypes.c_longlong, - ctypes.c_wchar, - ctypes.c_wchar_p, - ] - -integer_class = [ctypes.c_byte, ctypes.c_ubyte, - ctypes.c_short, ctypes.c_ushort, - ctypes.c_int, ctypes.c_uint, - ctypes.c_long, ctypes.c_ulong, - ctypes.c_longlong, ctypes.c_ulonglong, - ] -float_class = [ctypes.c_float, ctypes.c_double] - -class Field(object): - def __init__(self, name, ctype): - self.name = name - self.ctype = ctype - def __repr__(self): - return '' % (self.name, self.ctype) - -def layout_addfield(layout, offset, ctype, prefix): - size = ctypes.sizeof(ctype) - name = prefix - i = 0 - while name in layout: - i += 1 - name = '%s_%d' % (prefix, i) - field = Field(name, ctype) - for i in range(offset, offset+size): - assert layout[i] is None, "%s overlaps %r" % (fieldname, layout[i]) - layout[i] = field - return field - -def size_and_sign(ctype): - return (ctypes.sizeof(ctype), - ctype in integer_class and ctype(-1).value > 0) - -def fixup_ctype(fieldtype, fieldname, expected_size_and_sign): - for typeclass in [integer_class, float_class]: - if fieldtype in typeclass: - for ctype in typeclass: - if size_and_sign(ctype) == expected_size_and_sign: - return ctype - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_char): - # for now, assume it is an array of chars; otherwise we'd also - # have to check the exact integer type of the elements of the array - size, sign = expected_size_and_sign - return ctypes.c_char * size - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_ubyte): - # grumble, fields of type 'c_char array' have automatic cast-to- - # Python-string behavior in ctypes, which may not be what you - # want, so here is the same with c_ubytes instead... - size, sign = expected_size_and_sign - return ctypes.c_ubyte * size - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) - - -C_HEADER = """ -#include -#include /* for offsetof() */ -#ifndef _WIN32 -# include /* FreeBSD: for uint64_t */ -#endif - -void dump(char* key, int value) { - printf("%s: %d\\n", key, value); -} -""" - -def run_example_code(filepath, eci, noerr=False): - executable = build_executable([filepath], eci, noerr=noerr) - output = py.process.cmdexec(executable) - section = None - for line in output.splitlines(): - line = line.strip() - if line.startswith('-+- '): # start of a new section - section = {} - elif line == '---': # section end - assert section is not None - yield section - section = None - elif line: - assert section is not None - key, value = line.split(': ') - section[key] = int(value) - -# ____________________________________________________________ - -def get_python_include_dir(): - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - return gcv['INCLUDEPY'] - -if __name__ == '__main__': - doc = """Example: - - ctypes_platform.py -h sys/types.h -h netinet/in.h - 'struct sockaddr_in' - sin_port c_int - """ - import sys, getopt - opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:') - if not args: - print >> sys.stderr, doc - else: - assert len(args) % 2 == 1 - headers = [] - for opt, value in opts: - if opt == '-h': - headers.append('#include <%s>' % (value,)) - name = args[0] - fields = [] - for i in range(1, len(args), 2): - ctype = getattr(ctypes, args[i+1]) - fields.append((args[i], ctype)) - - S = getstruct(name, '\n'.join(headers), fields) - - for key, value in S._fields_: - print key, value diff --git a/ctypes_configure/doc/configure.html b/ctypes_configure/doc/configure.html deleted file mode 100644 --- a/ctypes_configure/doc/configure.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - -ctypes configure - - -
    -

    ctypes configure

    -
    -

    idea

    -

    One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py).

    -
    -
    -

    installation

    -

    easy_install ctypes_configure

    -
    -
    -

    usage

    -

    sample.py explains in details how to use it.

    -
    -
    - - diff --git a/ctypes_configure/doc/configure.txt b/ctypes_configure/doc/configure.txt deleted file mode 100644 --- a/ctypes_configure/doc/configure.txt +++ /dev/null @@ -1,22 +0,0 @@ -================= -ctypes configure -================= - -idea -==== - -One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size\_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py). - -installation -============ - -``easy_install ctypes_configure`` - -usage -===== - -:source:`sample.py ` explains in details how to use it. diff --git a/ctypes_configure/doc/sample.py b/ctypes_configure/doc/sample.py deleted file mode 100644 --- a/ctypes_configure/doc/sample.py +++ /dev/null @@ -1,72 +0,0 @@ - -from ctypes_configure import configure -import ctypes - -class CConfigure: - _compilation_info_ = configure.ExternalCompilationInfo( - - # all lines landing in C header before includes - pre_include_lines = [], - - # list of .h files to include - includes = ['time.h', 'sys/time.h', 'unistd.h'], - - # list of directories to search for include files - include_dirs = [], - - # all lines landing in C header after includes - post_include_lines = [], - - # libraries to link with - libraries = [], - - # library directories - library_dirs = [], - - # additional C sources to compile with (that go to - # created .c files) - separate_module_sources = [], - - # additional existing C source file names - separate_module_files = [], - ) - - # get real int type out of hint and name - size_t = configure.SimpleType('size_t', ctypes.c_int) - - # grab value of numerical #define - NULL = configure.ConstantInteger('NULL') - - # grab #define, whether it's defined or not - EXISTANT = configure.Defined('NULL') - NOT_EXISTANT = configure.Defined('XXXNOTNULL') - - # check for existance of C functions - has_write = configure.Has('write') - no_xxxwrite = configure.Has('xxxwrite') - - # check for size of type - sizeof_size_t = configure.SizeOf('size_t') - - # structure, with given hints for interesting fields, - # types does not need to be too specific. - # all interesting fields would end up with right offset - # size and order - struct_timeval = configure.Struct('struct timeval',[ - ('tv_sec', ctypes.c_int), - ('tv_usec', ctypes.c_int)]) - -info = configure.configure(CConfigure) - -assert info['has_write'] -assert not info['no_xxxwrite'] -assert info['NULL'] == 0 -size_t = info['size_t'] -print "size_t in ctypes is ", size_t -assert ctypes.sizeof(size_t) == info['sizeof_size_t'] -assert info['EXISTANT'] -assert not info['NOT_EXISTANT'] -print -print "fields of struct timeval are " -for name, value in info['struct_timeval']._fields_: - print " ", name, " ", value diff --git a/ctypes_configure/dumpcache.py b/ctypes_configure/dumpcache.py deleted file mode 100644 --- a/ctypes_configure/dumpcache.py +++ /dev/null @@ -1,46 +0,0 @@ -import os, sys -import ctypes - - -def dumpcache(referencefilename, filename, config): - dirname = os.path.dirname(referencefilename) - filename = os.path.join(dirname, filename) - f = open(filename, 'w') - print >> f, 'import ctypes' - print >> f - names = config.keys() - names.sort() - print >> f, '__all__ = %r' % (tuple(names),) - print >> f - for key in names: - val = config[key] - if isinstance(val, (int, long)): - f.write("%s = %d\n" % (key, val)) - elif val is None: - f.write("%s = None\n" % key) - elif isinstance(val, ctypes.Structure.__class__): - f.write("class %s(ctypes.Structure):\n" % key) - f.write(" _fields_ = [\n") - for k, v in val._fields_: - f.write(" ('%s', %s),\n" % (k, ctypes_repr(v))) - f.write(" ]\n") - elif isinstance(val, (tuple, list)): - for x in val: - assert isinstance(x, (int, long, str)), \ - "lists of integers or strings only" - f.write("%s = %r\n" % (key, val)) - else: - # a simple type, hopefully - f.write("%s = %s\n" % (key, ctypes_repr(val))) - f.close() - print 'Wrote %s.' % (filename,) - sys.stdout.flush() - -def ctypes_repr(cls): - # ctypes_configure does not support nested structs so far - # so let's ignore it - if isinstance(cls, ctypes._SimpleCData.__class__): - return "ctypes." + cls.__name__ - if hasattr(cls, '_length_') and hasattr(cls, '_type_'): # assume an array - return '%s*%d' % (ctypes_repr(cls._type_), cls._length_) - raise NotImplementedError("saving of object with type %r" % type(cls)) diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py deleted file mode 100644 --- a/ctypes_configure/stdoutcapture.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -A quick hack to capture stdout/stderr. -""" - -import os, sys - - -class Capture: - - def __init__(self, mixed_out_err = False): - "Start capture of the Unix-level stdout and stderr." - if (not hasattr(os, 'tmpfile') or - not hasattr(os, 'dup') or - not hasattr(os, 'dup2') or - not hasattr(os, 'fdopen')): - self.dummy = 1 - else: - try: - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() - except OSError: # bah? on at least one Windows box - self.dummy = 1 - return - self.dummy = 0 - # make new stdout/stderr files if needed - self.localoutfd = os.dup(1) - self.localerrfd = os.dup(2) - if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: - self.saved_stdout = sys.stdout - sys.stdout = os.fdopen(self.localoutfd, 'w', 1) - else: - self.saved_stdout = None - if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: - self.saved_stderr = sys.stderr - sys.stderr = os.fdopen(self.localerrfd, 'w', 0) - else: - self.saved_stderr = None - os.dup2(self.tmpout.fileno(), 1) - os.dup2(self.tmperr.fileno(), 2) - - def done(self): - "End capture and return the captured text (stdoutfile, stderrfile)." - if self.dummy: - import cStringIO - return cStringIO.StringIO(), cStringIO.StringIO() - else: - os.dup2(self.localoutfd, 1) - os.dup2(self.localerrfd, 2) - if self.saved_stdout is not None: - f = sys.stdout - sys.stdout = self.saved_stdout - f.close() - else: - os.close(self.localoutfd) - if self.saved_stderr is not None: - f = sys.stderr - sys.stderr = self.saved_stderr - f.close() - else: - os.close(self.localerrfd) - self.tmpout.seek(0) - self.tmperr.seek(0) - return self.tmpout, self.tmperr - - -if __name__ == '__main__': - # test - c = Capture() - try: - os.system('echo hello') - finally: - fout, ferr = c.done() - print 'Output:', `fout.read()` - print 'Error:', `ferr.read()` diff --git a/ctypes_configure/test/__init__.py b/ctypes_configure/test/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/test/test_configure.py b/ctypes_configure/test/test_configure.py deleted file mode 100644 --- a/ctypes_configure/test/test_configure.py +++ /dev/null @@ -1,212 +0,0 @@ -import py, sys, struct -from ctypes_configure import configure -from ctypes_configure.cbuild import ExternalCompilationInfo -import ctypes - -def test_dirent(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_reclen", ctypes.c_ushort)]) - assert issubclass(dirent, ctypes.Structure) - ssize = (ctypes.sizeof(ctypes.c_long) + - ctypes.sizeof(ctypes.c_int) + - ctypes.sizeof(ctypes.c_ushort) + - 32) - extra_padding = (-ssize) % ctypes.alignment(ctypes.c_long) - - assert dirent._fields_ == [('_alignment', ctypes.c_long), - ('_pad0', ctypes.c_char), - ('_pad1', ctypes.c_char), - ('_pad2', ctypes.c_char), - ('_pad3', ctypes.c_char), - ('d_reclen', ctypes.c_ushort), - ] + [ - ('_pad%d' % n, ctypes.c_char) - for n in range(4, 4+32+extra_padding)] - assert ctypes.sizeof(dirent) == ssize + extra_padding - assert ctypes.alignment(dirent) == ctypes.alignment(ctypes.c_long) - -def test_fit_type(): - S = configure.getstruct("struct S", - """ - struct S { - signed char c; - unsigned char uc; - short s; - unsigned short us; - int i; - unsigned int ui; - long l; - unsigned long ul; - long long ll; - unsigned long long ull; - float f; - double d; - }; - """, - [("c", ctypes.c_int), - ("uc", ctypes.c_int), - ("s", ctypes.c_uint), - ("us", ctypes.c_int), - ("i", ctypes.c_int), - ("ui", ctypes.c_int), - ("l", ctypes.c_int), - ("ul", ctypes.c_int), - ("ll", ctypes.c_int), - ("ull", ctypes.c_int), - ("f", ctypes.c_double), - ("d", ctypes.c_float)]) - assert issubclass(S, ctypes.Structure) - fields = dict(S._fields_) - assert fields["c"] == ctypes.c_byte - assert fields["uc"] == ctypes.c_ubyte - assert fields["s"] == ctypes.c_short - assert fields["us"] == ctypes.c_ushort - assert fields["i"] == ctypes.c_int - assert fields["ui"] == ctypes.c_uint - assert fields["l"] == ctypes.c_long - assert fields["ul"] == ctypes.c_ulong - assert fields["ll"] == ctypes.c_longlong - assert fields["ull"] == ctypes.c_ulonglong - assert fields["f"] == ctypes.c_float - assert fields["d"] == ctypes.c_double - -def test_simple_type(): - ctype = configure.getsimpletype('test_t', - 'typedef unsigned short test_t;', - ctypes.c_int) - assert ctype == ctypes.c_ushort - -def test_constant_integer(): - value = configure.getconstantinteger('BLAH', - '#define BLAH (6*7)') - assert value == 42 - value = configure.getconstantinteger('BLAH', - '#define BLAH (-2147483648LL)') - assert value == -2147483648 - value = configure.getconstantinteger('BLAH', - '#define BLAH (3333333333ULL)') - assert value == 3333333333 - -def test_defined(): - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', '') - assert not res - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', - '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') - assert res - -def test_configure(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform.h') - test_h.write('#define XYZZY 42\n') - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - - res = configure.configure(CConfig) - assert issubclass(res['FILE'], ctypes.Structure) - assert res == {'FILE': res['FILE'], - 'ushort': ctypes.c_ushort, - 'XYZZY': 42} - -def test_ifdef(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines = ['/* a C comment */', - '#define XYZZY 42', - 'typedef int foo;', - 'struct s {', - 'int i;', - 'double f;' - '};']) - - - s = configure.Struct('struct s', [('i', ctypes.c_int)], - ifdef='XYZZY') - z = configure.Struct('struct z', [('i', ctypes.c_int)], - ifdef='FOOBAR') - - foo = configure.SimpleType('foo', ifdef='XYZZY') - bar = configure.SimpleType('bar', ifdef='FOOBAR') - - res = configure.configure(CConfig) - assert res['s'] is not None - assert res['z'] is None - assert res['foo'] is not None - assert res['bar'] is None - -def test_nested_structs(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines=""" - struct x { - int foo; - unsigned long bar; - }; - struct y { - char c; - struct x x; - }; - """.split("\n")) - - x = configure.Struct("struct x", [("bar", ctypes.c_short)]) - y = configure.Struct("struct y", [("x", x)]) - - res = configure.configure(CConfig) - c_x = res["x"] - c_y = res["y"] - c_y_fields = dict(c_y._fields_) - assert issubclass(c_x , ctypes.Structure) - assert issubclass(c_y, ctypes.Structure) - assert c_y_fields["x"] is c_x - -def test_array(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_name", ctypes.c_char * 0)]) - assert dirent.d_name.size == 32 - -def test_has(): - assert configure.has("x", "int x = 3;") - assert not configure.has("x", "") - # has() should also not crash if it is given an invalid #include - assert not configure.has("x", "#include ") - -def test_check_eci(): - eci = ExternalCompilationInfo() - assert configure.check_eci(eci) - eci = ExternalCompilationInfo(libraries=['some_name_that_doesnt_exist_']) - assert not configure.check_eci(eci) - -def test_sizeof(): - assert configure.sizeof("char", ExternalCompilationInfo()) == 1 - -def test_memory_alignment(): - a = configure.memory_alignment() - print a - assert a % struct.calcsize("P") == 0 diff --git a/ctypes_configure/test/test_dumpcache.py b/ctypes_configure/test/test_dumpcache.py deleted file mode 100644 --- a/ctypes_configure/test/test_dumpcache.py +++ /dev/null @@ -1,61 +0,0 @@ -import ctypes -from ctypes_configure import configure, dumpcache -from ctypes_configure.cbuild import ExternalCompilationInfo - - -def test_cache(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform2.h') - test_h.write('#define XYZZY 42\n' - "#define large 2147483648L\n") - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - XUZ = configure.Has('XUZ') - large = configure.DefinedConstantInteger('large') - undef = configure.Defined('really_undefined') - - res = configure.configure(CConfig) - - cachefile = configdir.join('cache') - dumpcache.dumpcache('', str(cachefile), res) - - d = {} - execfile(str(cachefile), d) - assert d['XYZZY'] == res['XYZZY'] - assert d['ushort'] == res['ushort'] - assert d['FILE']._fields_ == res['FILE']._fields_ - assert d['FILE'].__mro__[1:] == res['FILE'].__mro__[1:] - assert d['undef'] == res['undef'] - assert d['large'] == res['large'] - assert d['XUZ'] == res['XUZ'] - - -def test_cache_array(): - configdir = configure.configdir - res = {'foo': ctypes.c_short * 27} - cachefile = configdir.join('cache_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] - -def test_cache_array_array(): - configdir = configure.configdir - res = {'foo': (ctypes.c_int * 2) * 3} - cachefile = configdir.join('cache_array_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -188,7 +188,7 @@ are considered private and will not be called by SimpleXMLRPCServer. - If a registered function matches a XML-RPC request, then it + If a registered function matches an XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -274,7 +274,7 @@ Even though IOBase does not declare read, readinto, or write because their signatures will vary, implementations and clients should consider those methods part of the interface. Also, implementations - may raise a IOError when operations they do not support are called. + may raise an IOError when operations they do not support are called. The basic type used for binary data read from or written to a file is the bytes type. Method arguments may also be bytearray or memoryview of diff --git a/lib-python/2.7/calendar.py b/lib-python/2.7/calendar.py --- a/lib-python/2.7/calendar.py +++ b/lib-python/2.7/calendar.py @@ -174,22 +174,23 @@ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield (0, date.weekday()) - else: - yield (date.day, date.weekday()) + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ - for date in self.itermonthdates(year, month): - if date.month != month: - yield 0 - else: - yield date.day + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + for _ in range(days_before): + yield 0 + for d in range(1, ndays + 1): + yield d + days_after = (self.firstweekday - day1 - ndays) % 7 + for _ in range(days_after): + yield 0 def monthdatescalendar(self, year, month): """ diff --git a/lib-python/2.7/chunk.py b/lib-python/2.7/chunk.py --- a/lib-python/2.7/chunk.py +++ b/lib-python/2.7/chunk.py @@ -21,7 +21,7 @@ usage of the Chunk class defined here is to instantiate an instance at the start of each chunk and read from the instance until it reaches the end, after which a new instance can be instantiated. At the end -of the file, creating a new instance will fail with a EOFError +of the file, creating a new instance will fail with an EOFError exception. Usage: diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -252,7 +252,7 @@ """ def __init__(self, errors='strict'): """ - Creates a IncrementalDecoder instance. + Creates an IncrementalDecoder instance. The IncrementalDecoder may use different error handling schemes by providing the errors keyword argument. See the module docstring @@ -1012,7 +1012,7 @@ """ Encoding iterator. - Encodes the input strings from the iterator using a IncrementalEncoder. + Encodes the input strings from the iterator using an IncrementalEncoder. errors and kwargs are passed through to the IncrementalEncoder constructor. @@ -1030,7 +1030,7 @@ """ Decoding iterator. - Decodes the input strings from the iterator using a IncrementalDecoder. + Decodes the input strings from the iterator using an IncrementalDecoder. errors and kwargs are passed through to the IncrementalDecoder constructor. diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -113,7 +113,7 @@ """ if t is None: t = time.time() year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7] - return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % ( DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec) diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,3 +1,4 @@ +import functools import unittest from ctypes import * from ctypes.test import need_symbol @@ -248,6 +249,40 @@ self.assertEqual(result, callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5)) + def test_callback_large_struct(self): + class Check: pass + + class X(Structure): + _fields_ = [ + ('first', c_ulong), + ('second', c_ulong), + ('third', c_ulong), + ] + + def callback(check, s): + check.first = s.first + check.second = s.second + check.third = s.third + + check = Check() + s = X() + s.first = 0xdeadbeef + s.second = 0xcafebabe + s.third = 0x0bad1dea + + CALLBACK = CFUNCTYPE(None, X) + dll = CDLL(_ctypes_test.__file__) + func = dll._testfunc_cbk_large_struct + func.argtypes = (X, CALLBACK) + func.restype = None + # the function just calls the callback with the passed structure + func(s, CALLBACK(functools.partial(callback, check))) + self.assertEqual(check.first, s.first) + self.assertEqual(check.second, s.second) + self.assertEqual(check.third, s.third) + self.assertEqual(check.first, 0xdeadbeef) + self.assertEqual(check.second, 0xcafebabe) + self.assertEqual(check.third, 0x0bad1dea) ################################################################ diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,6 +1,7 @@ import unittest -import os +import os.path import sys +from test import test_support from ctypes import * from ctypes.util import find_library from ctypes.test import is_resource_enabled @@ -65,28 +66,10 @@ if self.gle: self.gle.gleGetJoinStyle -# On platforms where the default shared library suffix is '.so', -# at least some libraries can be loaded as attributes of the cdll -# object, since ctypes now tries loading the lib again -# with '.so' appended of the first try fails. -# -# Won't work for libc, unfortunately. OTOH, it isn't -# needed for libc since this is already mapped into the current -# process (?) -# -# On MAC OSX, it won't work either, because dlopen() needs a full path, -# and the default suffix is either none or '.dylib'. - at unittest.skip('test disabled') - at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", - 'test not suitable for this platform') -class LoadLibs(unittest.TestCase): - def test_libm(self): - import math - libm = cdll.libm - sqrt = libm.sqrt - sqrt.argtypes = (c_double,) - sqrt.restype = c_double - self.assertEqual(sqrt(2), math.sqrt(2)) + def test_shell_injection(self): + result = find_library('; echo Hello shell > ' + test_support.TESTFN) + self.assertFalse(os.path.lexists(test_support.TESTFN)) + self.assertIsNone(result) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -77,5 +77,13 @@ self.assertRaises(ValueError, (c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int)) + def test_abstract(self): + self.assertRaises(TypeError, Array.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Structure.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Union.from_buffer, bytearray(10)) + self.assertRaises(TypeError, Array.from_buffer_copy, b"123") + self.assertRaises(TypeError, Structure.from_buffer_copy, b"123") + self.assertRaises(TypeError, Union.from_buffer_copy, b"123") + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -77,7 +77,7 @@ self.assertEqual(t(v).value, truth(v)) def test_typeerror(self): - # Only numbers are allowed in the contructor, + # Only numbers are allowed in the constructor, # otherwise TypeError is raised for t in signed_types + unsigned_types + float_types: self.assertRaises(TypeError, t, "") diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py --- a/lib-python/2.7/ctypes/test/test_structures.py +++ b/lib-python/2.7/ctypes/test/test_structures.py @@ -106,7 +106,7 @@ self.assertEqual(alignment(XX), alignment(X)) self.assertEqual(sizeof(XX), calcsize("3s 3s 0s")) - def test_emtpy(self): + def test_empty(self): # I had problems with these # # Although these are pathological cases: Empty Structures! diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -1,4 +1,6 @@ -import sys, os +import os +import subprocess +import sys # find_library(name) returns the pathname of a library, or None. if os.name == "nt": @@ -87,25 +89,28 @@ def _findLib_gcc(name): import tempfile + # Run GCC's linker with the -t (aka --trace) option and examine the + # library name it prints out. The GCC command will fail because we + # haven't supplied a proper program with main(), but that does not + # matter. expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) - fdout, ccout = tempfile.mkstemp() - os.close(fdout) - cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ - 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name + cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit; fi;' \ + 'LANG=C LC_ALL=C $CC -Wl,-t -o "$2" 2>&1 -l"$1"' + + temp = tempfile.NamedTemporaryFile() try: - f = os.popen(cmd) - try: - trace = f.read() - finally: - rv = f.close() + proc = subprocess.Popen((cmd, '_findLib_gcc', name, temp.name), + shell=True, + stdout=subprocess.PIPE) + [trace, _] = proc.communicate() finally: try: - os.unlink(ccout) + temp.close() except OSError, e: + # ENOENT is raised if the file was already removed, which is + # the normal behaviour of GCC if linking fails if e.errno != errno.ENOENT: raise - if rv == 10: - raise OSError, 'gcc or cc command not found' res = re.search(expr, trace) if not res: return None @@ -117,13 +122,17 @@ def _get_soname(f): if not f: return None - cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f - f = os.popen(cmd) + + null = open(os.devnull, "wb") try: - data = f.read() - finally: - f.close() - res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) + with null: + proc = subprocess.Popen(("/usr/ccs/bin/dump", "-Lpv", f), + stdout=subprocess.PIPE, + stderr=null) + except OSError: # E.g. command not found + return None + [data, _] = proc.communicate() + res = re.search(br'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) @@ -132,16 +141,12 @@ # assuming GNU binutils / ELF if not f: return None - cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ - "objdump -p -j .dynamic 2>/dev/null " + f - f = os.popen(cmd) - try: - dump = f.read() - finally: - rv = f.close() - if rv == 10: - raise OSError, 'objdump command not found' - res = re.search(r'\sSONAME\s+([^\s]+)', dump) + cmd = 'if ! type objdump >/dev/null 2>&1; then exit; fi;' \ + 'objdump -p -j .dynamic 2>/dev/null "$1"' + proc = subprocess.Popen((cmd, '_get_soname', f), shell=True, + stdout=subprocess.PIPE) + [dump, _] = proc.communicate() From pypy.commits at gmail.com Wed Dec 28 03:45:50 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 28 Dec 2016 00:45:50 -0800 (PST) Subject: [pypy-commit] pypy default: Add so_prefixes to all posix platforms Message-ID: <58637bbe.4438c20a.b4b80.60ea@mx.google.com> Author: Armin Rigo Branch: Changeset: r89257:4b1508ade877 Date: 2016-12-28 09:45 +0100 http://bitbucket.org/pypy/pypy/changeset/4b1508ade877/ Log: Add so_prefixes to all posix platforms diff --git a/rpython/translator/platform/bsd.py b/rpython/translator/platform/bsd.py --- a/rpython/translator/platform/bsd.py +++ b/rpython/translator/platform/bsd.py @@ -6,7 +6,6 @@ DEFAULT_CC = 'clang' so_ext = 'so' - so_prefixes = ('lib', '') make_cmd = 'gmake' standalone_only = [] diff --git a/rpython/translator/platform/cygwin.py b/rpython/translator/platform/cygwin.py --- a/rpython/translator/platform/cygwin.py +++ b/rpython/translator/platform/cygwin.py @@ -26,7 +26,6 @@ shared_only = ('-fPIC',) so_ext = 'dll' exe_ext = 'exe' - so_prefixes = ('lib', '') def _args_for_shared(self, args): return ['-shared'] + args diff --git a/rpython/translator/platform/linux.py b/rpython/translator/platform/linux.py --- a/rpython/translator/platform/linux.py +++ b/rpython/translator/platform/linux.py @@ -19,7 +19,6 @@ standalone_only = () shared_only = ('-fPIC',) so_ext = 'so' - so_prefixes = ('lib', '') if platform.machine() == 's390x': from rpython.translator.platform.arch import s390x diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -10,6 +10,7 @@ class BasePosix(Platform): exe_ext = '' make_cmd = 'make' + so_prefixes = ('lib', '') relevant_environ = ('CPATH', 'LIBRARY_PATH', 'C_INCLUDE_PATH') From pypy.commits at gmail.com Wed Dec 28 10:28:12 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 28 Dec 2016 07:28:12 -0800 (PST) Subject: [pypy-commit] pypy default: Fix for a translation crash of 'rpython --sandbox -O2' Message-ID: <5863da0c.8a29c20a.c1a15.e107@mx.google.com> Author: Armin Rigo Branch: Changeset: r89258:0b536e14dfbc Date: 2016-12-28 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/0b536e14dfbc/ Log: Fix for a translation crash of 'rpython --sandbox -O2' diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -398,6 +398,8 @@ s_result = self.specialize(inputcells, op) if isinstance(s_result, FunctionGraph): s_result = s_result.getreturnvar().annotation + if s_result is None: + s_result = s_ImpossibleValue s_result = unionof(s_result, s_previous_result) return s_result From pypy.commits at gmail.com Wed Dec 28 12:01:07 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 28 Dec 2016 09:01:07 -0800 (PST) Subject: [pypy-commit] pypy default: cleanup dead code (arigato) Message-ID: <5863efd3.212dc20a.22a2c.1139@mx.google.com> Author: Matti Picus Branch: Changeset: r89259:141b5d96fb22 Date: 2016-12-28 19:00 +0200 http://bitbucket.org/pypy/pypy/changeset/141b5d96fb22/ Log: cleanup dead code (arigato) diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -17,9 +17,6 @@ assert isinstance(buf, Buffer) self.buf = buf - def _finalize_(self): - return self.buf.releasebuffer() - def buffer_w(self, space, flags): space.check_buf_flags(flags, self.buf.readonly) return self.buf From pypy.commits at gmail.com Wed Dec 28 17:13:02 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 28 Dec 2016 14:13:02 -0800 (PST) Subject: [pypy-commit] pypy cppyy-skip: a proof-of-concept to skip everything, including pytest_configure Message-ID: <586438ee.ca57c20a.96102.7d42@mx.google.com> Author: Matti Picus Branch: cppyy-skip Changeset: r89260:c2f75063c7a2 Date: 2016-12-29 00:11 +0200 http://bitbucket.org/pypy/pypy/changeset/c2f75063c7a2/ Log: a proof-of-concept to skip everything, including pytest_configure diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,13 +1,20 @@ import py, sys +skip_reason = '' +if sys.platform == 'win32': + skip_reason= 'need to refactor for MSVC' +else: + # tests require minimally std=c++11 + cc_info = py.process.cmdexec('gcc -v --help') + if not '-std=c++11' in cc_info: + skip_reason = 'gcc does not support -std=c+11' + + +# This is supposed to work? How? +#py.test.mark = py.test.mark.skipif(skip_reason != '', reason=skip_reason) + @py.test.mark.tryfirst def pytest_runtest_setup(item): - if 'linux' in sys.platform: - # tests require minimally std=c++11 - cc_info = py.process.cmdexec('gcc -v --help') - if not '-std=c++11' in cc_info: - py.test.skip('skipping tests because gcc does not support C++11') - if py.path.local.sysfind('genreflex') is None: import pypy.module.cppyy.capi.loadable_capi as lcapi if 'dummy' in lcapi.reflection_library: @@ -29,41 +36,45 @@ def pytest_ignore_collect(path, config): if py.path.local.sysfind('genreflex') is None and config.option.runappdirect: return True # "can't run dummy tests in -A" + # This actually worked + if skip_reason: + return True + +if skip_reason == '': + def pytest_configure(config): + if py.path.local.sysfind('genreflex') is None: + import pypy.module.cppyy.capi.loadable_capi as lcapi + try: + import ctypes + ctypes.CDLL(lcapi.reflection_library) + except Exception as e: + if config.option.runappdirect: + return # "can't run dummy tests in -A" -def pytest_configure(config): - if py.path.local.sysfind('genreflex') is None: - import pypy.module.cppyy.capi.loadable_capi as lcapi - try: - import ctypes - ctypes.CDLL(lcapi.reflection_library) - except Exception as e: - if config.option.runappdirect: - return # "can't run dummy tests in -A" + # build dummy backend (which has reflex info and calls hard-wired) + import os + from rpython.translator.tool.cbuild import ExternalCompilationInfo + from rpython.translator.platform import platform + from rpython.translator import cdir - # build dummy backend (which has reflex info and calls hard-wired) - import os - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.translator.platform import platform - from rpython.translator import cdir + from rpython.rtyper.lltypesystem import rffi - from rpython.rtyper.lltypesystem import rffi + pkgpath = py.path.local(__file__).dirpath().join(os.pardir) + srcpath = pkgpath.join('src') + incpath = pkgpath.join('include') + tstpath = pkgpath.join('test') - pkgpath = py.path.local(__file__).dirpath().join(os.pardir) - srcpath = pkgpath.join('src') - incpath = pkgpath.join('include') - tstpath = pkgpath.join('test') + eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join('dummy_backend.cxx')], + include_dirs=[incpath, tstpath, cdir], + compile_extra=['-DRPY_EXTERN=RPY_EXPORTED', '-DCPPYY_DUMMY_BACKEND', + '-fno-strict-aliasing', '-std=c++11'], + use_cpp_linker=True, + ) - eci = ExternalCompilationInfo( - separate_module_files=[srcpath.join('dummy_backend.cxx')], - include_dirs=[incpath, tstpath, cdir], - compile_extra=['-DRPY_EXTERN=RPY_EXPORTED', '-DCPPYY_DUMMY_BACKEND', - '-fno-strict-aliasing', '-std=c++11'], - use_cpp_linker=True, - ) + soname = platform.compile( + [], eci, + outputfilename='libcppyy_dummy_backend', + standalone=False) - soname = platform.compile( - [], eci, - outputfilename='libcppyy_dummy_backend', - standalone=False) - - lcapi.reflection_library = str(soname) + lcapi.reflection_library = str(soname) From pypy.commits at gmail.com Thu Dec 29 07:05:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 29 Dec 2016 04:05:43 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: expose a function to check if a W_Buffer is readonly or not (ctypes from_buffer functions needs to check that) Message-ID: <5864fc17.4306c20a.d0b9e.3e82@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89261:530e24e0fbd0 Date: 2016-12-29 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/530e24e0fbd0/ Log: expose a function to check if a W_Buffer is readonly or not (ctypes from_buffer functions needs to check that) diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -85,8 +85,8 @@ def from_buffer(self, obj, offset=0): size = self._sizeofinstances() - buf = memoryview(obj)[offset:] - if buf.readonly: + buf = buffer(obj, offset, size) + if buf._pypy_is_readonly(): raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) if len(buf) < size: raise ValueError( diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -138,6 +138,11 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) return space.wrap(rffi.cast(lltype.Signed, ptr)) + def descr_is_readonly(self, space): + """ Needed in ctypes (from_buffer), CPython can check if a + buffer can be readonly (has a C Function/Macro for that) """ + return space.newbool(bool(self.buf.readonly)) + W_Buffer.typedef = TypeDef( "buffer", None, None, "read-write", __doc__ = """\ @@ -166,5 +171,6 @@ __repr__ = interp2app(W_Buffer.descr_repr), __buffer__ = interp2app(W_Buffer.descr_getbuffer), _pypy_raw_address = interp2app(W_Buffer.descr_pypy_raw_address), + _pypy_is_readonly = interp2app(W_Buffer.descr_is_readonly), ) W_Buffer.typedef.acceptable_as_base_class = False From pypy.commits at gmail.com Thu Dec 29 07:41:16 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 29 Dec 2016 04:41:16 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: fix link Message-ID: <5865046c.8ab81c0a.8808d.fa8c@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r839:03a87c8a2ff4 Date: 2016-12-29 13:41 +0100 http://bitbucket.org/pypy/pypy.org/changeset/03a87c8a2ff4/ Log: fix link diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -77,7 +77,7 @@

    We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:

    • the Python2.7 compatible release — PyPy2.7 v5.6.0 — (what's new in PyPy2.7?)
    • -
    • the Python3.3 compatible release — PyPy3.3 v5.5 — (what's new in PyPy3.3?).
    • +
    • the Python3.3 compatible release — PyPy3.3 v5.5 — (what's new in PyPy3.3?).
    • the Python2.7 Software Transactional Memory special release — PyPy-STM 2.5.1 (Linux x86-64 only)
      diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -21,7 +21,7 @@ * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) .. _what's new in PyPy2.7?: http://doc.pypy.org/en/latest/release-pypy2.7-v5.6.0.html -.. _what's new in PyPy3.3?: http://doc.pypy.org/en/latest/release-pypy3.3-v5.5-alpha.html +.. _what's new in PyPy3.3?: http://doc.pypy.org/en/latest/release-pypy3.3-v5.5.0.html .. class:: download_menu From pypy.commits at gmail.com Thu Dec 29 08:51:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 29 Dec 2016 05:51:20 -0800 (PST) Subject: [pypy-commit] pypy py3.5: rStringIO assert that during tests the slicing is within bounds, extend a test to check readline returns empty string if the position is out of bounds Message-ID: <586514d8.8675c20a.dfef3.97c5@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89263:c7076180efda Date: 2016-12-29 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/c7076180efda/ Log: rStringIO assert that during tests the slicing is within bounds, extend a test to check readline returns empty string if the position is out of bounds diff --git a/rpython/rlib/rStringIO.py b/rpython/rlib/rStringIO.py --- a/rpython/rlib/rStringIO.py +++ b/rpython/rlib/rStringIO.py @@ -1,4 +1,5 @@ from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import we_are_translated AT_END = -1 @@ -154,8 +155,11 @@ assert p >= 0 self.__copy_into_bigbuffer() end = len(self.__bigbuffer) - if size >= 0 and size < end - p: + count = end - p + if size >= 0 and size < count: end = p + size + if count <= 0: + return '' i = p while i < end: finished = self.__bigbuffer[i] == '\n' @@ -163,6 +167,11 @@ if finished: break self.__pos = i + if not we_are_translated(): + # assert that we read within the bounds! + bl = len(self.__bigbuffer) + assert p <= bl + assert i <= bl return ''.join(self.__bigbuffer[p:i]) def truncate(self, size): diff --git a/rpython/rlib/test/test_rStringIO.py b/rpython/rlib/test/test_rStringIO.py --- a/rpython/rlib/test/test_rStringIO.py +++ b/rpython/rlib/test/test_rStringIO.py @@ -91,6 +91,10 @@ assert f.readline() == 'baz' assert f.readline() == '' + f.seek(100000, 0) + assert f.tell() == 100000 + assert f.readline() == '' + def test_truncate(): f = RStringIO() f.truncate(20) From pypy.commits at gmail.com Thu Dec 29 08:51:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 29 Dec 2016 05:51:18 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: merge default Message-ID: <586514d6.820bc30a.5970b.9e54@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89262:8e166d469274 Date: 2016-12-29 13:09 +0100 http://bitbucket.org/pypy/pypy/changeset/8e166d469274/ Log: merge default diff too long, truncating to 2000 out of 3046 lines diff --git a/ctypes_configure/__init__.py b/ctypes_configure/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/cbuild.py b/ctypes_configure/cbuild.py deleted file mode 100644 --- a/ctypes_configure/cbuild.py +++ /dev/null @@ -1,456 +0,0 @@ - -import os, sys, inspect, re, imp, py -from ctypes_configure import stdoutcapture -import distutils - -debug = 0 - -configdir = py.path.local.make_numbered_dir(prefix='ctypes_configure-') - -class ExternalCompilationInfo(object): - - _ATTRIBUTES = ['pre_include_lines', 'includes', 'include_dirs', - 'post_include_lines', 'libraries', 'library_dirs', - 'separate_module_sources', 'separate_module_files'] - _AVOID_DUPLICATES = ['separate_module_files', 'libraries', 'includes', - 'include_dirs', 'library_dirs', 'separate_module_sources'] - - def __init__(self, - pre_include_lines = [], - includes = [], - include_dirs = [], - post_include_lines = [], - libraries = [], - library_dirs = [], - separate_module_sources = [], - separate_module_files = []): - """ - pre_include_lines: list of lines that should be put at the top - of the generated .c files, before any #include. They shouldn't - contain an #include themselves. - - includes: list of .h file names to be #include'd from the - generated .c files. - - include_dirs: list of dir names that is passed to the C compiler - - post_include_lines: list of lines that should be put at the top - of the generated .c files, after the #includes. - - libraries: list of library names that is passed to the linker - - library_dirs: list of dir names that is passed to the linker - - separate_module_sources: list of multiline strings that are - each written to a .c file and compiled separately and linked - later on. (If function prototypes are needed for other .c files - to access this, they can be put in post_include_lines.) - - separate_module_files: list of .c file names that are compiled - separately and linked later on. (If an .h file is needed for - other .c files to access this, it can be put in includes.) - """ - for name in self._ATTRIBUTES: - value = locals()[name] - assert isinstance(value, (list, tuple)) - setattr(self, name, tuple(value)) - - def _value(self): - return tuple([getattr(self, x) for x in self._ATTRIBUTES]) - - def __hash__(self): - return hash(self._value()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and \ - self._value() == other._value() - - def __ne__(self, other): - return not self == other - - def __repr__(self): - info = [] - for attr in self._ATTRIBUTES: - val = getattr(self, attr) - info.append("%s=%s" % (attr, repr(val))) - return "" % ", ".join(info) - - def merge(self, *others): - others = list(others) - attrs = {} - for name in self._ATTRIBUTES: - if name not in self._AVOID_DUPLICATES: - s = [] - for i in [self] + others: - s += getattr(i, name) - attrs[name] = s - else: - s = set() - attr = [] - for one in [self] + others: - for elem in getattr(one, name): - if elem not in s: - s.add(elem) - attr.append(elem) - attrs[name] = attr - return ExternalCompilationInfo(**attrs) - - def write_c_header(self, fileobj): - for line in self.pre_include_lines: - print >> fileobj, line - for path in self.includes: - print >> fileobj, '#include <%s>' % (path,) - for line in self.post_include_lines: - print >> fileobj, line - - def _copy_attributes(self): - d = {} - for attr in self._ATTRIBUTES: - d[attr] = getattr(self, attr) - return d - - def convert_sources_to_files(self, cache_dir=None, being_main=False): - if not self.separate_module_sources: - return self - if cache_dir is None: - cache_dir = configdir.join('module_cache').ensure(dir=1) - num = 0 - files = [] - for source in self.separate_module_sources: - while 1: - filename = cache_dir.join('module_%d.c' % num) - num += 1 - if not filename.check(): - break - f = filename.open("w") - if being_main: - f.write("#define PYPY_NOT_MAIN_FILE\n") - self.write_c_header(f) - source = str(source) - f.write(source) - if not source.endswith('\n'): - f.write('\n') - f.close() - files.append(str(filename)) - d = self._copy_attributes() - d['separate_module_sources'] = () - d['separate_module_files'] += tuple(files) - return ExternalCompilationInfo(**d) - - def compile_shared_lib(self): - self = self.convert_sources_to_files() - if not self.separate_module_files: - return self - lib = compile_c_module([], 'externmod', self) - d = self._copy_attributes() - d['libraries'] += (lib,) - d['separate_module_files'] = () - d['separate_module_sources'] = () - return ExternalCompilationInfo(**d) - -if sys.platform == 'win32': - so_ext = '.dll' -else: - so_ext = '.so' - -def compiler_command(): - # e.g. for tcc, you might set this to - # "tcc -shared -o %s.so %s.c" - return os.getenv('PYPY_CC') - -def enable_fast_compilation(): - if sys.platform == 'win32': - dash = '/' - else: - dash = '-' - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt: - opt = re.sub('%sO\d+' % dash, '%sO0' % dash, opt) - else: - opt = '%sO0' % dash - gcv['OPT'] = opt - -def ensure_correct_math(): - if sys.platform != 'win32': - return # so far - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - opt = gcv.get('OPT') # not always existent - if opt and '/Op' not in opt: - opt += '/Op' - gcv['OPT'] = opt - - -def try_compile(c_files, eci): - try: - build_executable(c_files, eci) - result = True - except (distutils.errors.CompileError, - distutils.errors.LinkError): - result = False - return result - -def compile_c_module(cfiles, modbasename, eci, tmpdir=None): - #try: - # from distutils.log import set_threshold - # set_threshold(10000) - #except ImportError: - # print "ERROR IMPORTING" - # pass - cfiles = [py.path.local(f) for f in cfiles] - if tmpdir is None: - tmpdir = configdir.join("module_cache").ensure(dir=1) - num = 0 - cfiles += eci.separate_module_files - include_dirs = list(eci.include_dirs) - library_dirs = list(eci.library_dirs) - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in include_dirs and \ - os.path.exists(s + 'include'): - include_dirs.append(s + 'include') - if s + 'lib' not in library_dirs and \ - os.path.exists(s + 'lib'): - library_dirs.append(s + 'lib') - - num = 0 - modname = modbasename - while 1: - if not tmpdir.join(modname + so_ext).check(): - break - num += 1 - modname = '%s_%d' % (modbasename, num) - - lastdir = tmpdir.chdir() - libraries = eci.libraries - ensure_correct_math() - try: - if debug: print "modname", modname - c = stdoutcapture.Capture(mixed_out_err = True) - try: - try: - if compiler_command(): - # GCC-ish options only - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - cmd = compiler_command().replace('%s', - str(tmpdir.join(modname))) - for dir in [gcv['INCLUDEPY']] + list(include_dirs): - cmd += ' -I%s' % dir - for dir in library_dirs: - cmd += ' -L%s' % dir - os.system(cmd) - else: - from distutils.dist import Distribution - from distutils.extension import Extension - from distutils.ccompiler import get_default_compiler - saved_environ = os.environ.items() - try: - # distutils.core.setup() is really meant for end-user - # interactive usage, because it eats most exceptions and - # turn them into SystemExits. Instead, we directly - # instantiate a Distribution, which also allows us to - # ignore unwanted features like config files. - extra_compile_args = [] - # ensure correct math on windows - if sys.platform == 'win32': - extra_compile_args.append('/Op') # get extra precision - if get_default_compiler() == 'unix': - old_version = False - try: - g = os.popen('gcc --version', 'r') - verinfo = g.read() - g.close() - except (OSError, IOError): - pass - else: - old_version = verinfo.startswith('2') - if not old_version: - extra_compile_args.extend(["-Wno-unused-label", - "-Wno-unused-variable"]) - attrs = { - 'name': "testmodule", - 'ext_modules': [ - Extension(modname, [str(cfile) for cfile in cfiles], - include_dirs=include_dirs, - library_dirs=library_dirs, - extra_compile_args=extra_compile_args, - libraries=list(libraries),) - ], - 'script_name': 'setup.py', - 'script_args': ['-q', 'build_ext', '--inplace', '--force'], - } - dist = Distribution(attrs) - if not dist.parse_command_line(): - raise ValueError, "distutils cmdline parse error" - dist.run_commands() - finally: - for key, value in saved_environ: - if os.environ.get(key) != value: - os.environ[key] = value - finally: - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = open("%s.errors" % modname, "w") - fdump.write(data) - fdump.close() - # XXX do we need to do some check on fout/ferr? - # XXX not a nice way to import a module - except: - print >>sys.stderr, data - raise - finally: - lastdir.chdir() - return str(tmpdir.join(modname) + so_ext) - -def make_module_from_c(cfile, eci): - cfile = py.path.local(cfile) - modname = cfile.purebasename - compile_c_module([cfile], modname, eci) - return import_module_from_directory(cfile.dirpath(), modname) - -def import_module_from_directory(dir, modname): - file, pathname, description = imp.find_module(modname, [str(dir)]) - try: - mod = imp.load_module(modname, file, pathname, description) - finally: - if file: - file.close() - return mod - - -def log_spawned_cmd(spawn): - def spawn_and_log(cmd, *args, **kwds): - if debug: - print ' '.join(cmd) - return spawn(cmd, *args, **kwds) - return spawn_and_log - - -class ProfOpt(object): - #XXX assuming gcc style flags for now - name = "profopt" - - def __init__(self, compiler): - self.compiler = compiler - - def first(self): - self.build('-fprofile-generate') - - def probe(self, exe, args): - # 'args' is a single string typically containing spaces - # and quotes, which represents several arguments. - os.system("'%s' %s" % (exe, args)) - - def after(self): - self.build('-fprofile-use') - - def build(self, option): - compiler = self.compiler - compiler.compile_extra.append(option) - compiler.link_extra.append(option) - try: - compiler._build() - finally: - compiler.compile_extra.pop() - compiler.link_extra.pop() - -class CCompiler: - - def __init__(self, cfilenames, eci, outputfilename=None, - compiler_exe=None, profbased=None): - self.cfilenames = cfilenames - ext = '' - self.compile_extra = [] - self.link_extra = [] - self.libraries = list(eci.libraries) - self.include_dirs = list(eci.include_dirs) - self.library_dirs = list(eci.library_dirs) - self.compiler_exe = compiler_exe - self.profbased = profbased - if not sys.platform in ('win32', 'darwin', 'cygwin'): # xxx - if 'm' not in self.libraries: - self.libraries.append('m') - if 'pthread' not in self.libraries: - self.libraries.append('pthread') - self.compile_extra += ['-O3', '-fomit-frame-pointer', '-pthread'] - self.link_extra += ['-pthread'] - if sys.platform == 'win32': - self.link_extra += ['/DEBUG'] # generate .pdb file - if (sys.platform == 'darwin' or # support Fink & Darwinports - sys.platform.startswith('freebsd')): - for s in ('/sw/', '/opt/local/', '/usr/local/'): - if s + 'include' not in self.include_dirs and \ - os.path.exists(s + 'include'): - self.include_dirs.append(s + 'include') - if s + 'lib' not in self.library_dirs and \ - os.path.exists(s + 'lib'): - self.library_dirs.append(s + 'lib') - self.compile_extra += ['-O3', '-fomit-frame-pointer'] - - if outputfilename is None: - self.outputfilename = py.path.local(cfilenames[0]).new(ext=ext) - else: - self.outputfilename = py.path.local(outputfilename) - - def build(self, noerr=False): - basename = self.outputfilename.new(ext='') - data = '' - try: - saved_environ = os.environ.copy() - c = stdoutcapture.Capture(mixed_out_err = True) - try: - self._build() - finally: - # workaround for a distutils bugs where some env vars can - # become longer and longer every time it is used - for key, value in saved_environ.items(): - if os.environ.get(key) != value: - os.environ[key] = value - foutput, foutput = c.done() - data = foutput.read() - if data: - fdump = basename.new(ext='errors').open("w") - fdump.write(data) - fdump.close() - except: - if not noerr: - print >>sys.stderr, data - raise - - def _build(self): - from distutils.ccompiler import new_compiler - compiler = new_compiler(force=1) - if self.compiler_exe is not None: - for c in '''compiler compiler_so compiler_cxx - linker_exe linker_so'''.split(): - compiler.executables[c][0] = self.compiler_exe - compiler.spawn = log_spawned_cmd(compiler.spawn) - objects = [] - for cfile in self.cfilenames: - cfile = py.path.local(cfile) - old = cfile.dirpath().chdir() - try: - res = compiler.compile([cfile.basename], - include_dirs=self.include_dirs, - extra_preargs=self.compile_extra) - assert len(res) == 1 - cobjfile = py.path.local(res[0]) - assert cobjfile.check() - objects.append(str(cobjfile)) - finally: - old.chdir() - compiler.link_executable(objects, str(self.outputfilename), - libraries=self.libraries, - extra_preargs=self.link_extra, - library_dirs=self.library_dirs) - -def build_executable(*args, **kwds): - noerr = kwds.pop('noerr', False) - compiler = CCompiler(*args, **kwds) - compiler.build(noerr=noerr) - return str(compiler.outputfilename) diff --git a/ctypes_configure/configure.py b/ctypes_configure/configure.py deleted file mode 100755 --- a/ctypes_configure/configure.py +++ /dev/null @@ -1,621 +0,0 @@ -#! /usr/bin/env python - -import os, py, sys -import ctypes -from ctypes_configure.cbuild import build_executable, configdir, try_compile -from ctypes_configure.cbuild import ExternalCompilationInfo -import distutils - -# ____________________________________________________________ -# -# Helpers for simple cases - -def eci_from_header(c_header_source): - return ExternalCompilationInfo( - pre_include_lines=c_header_source.split("\n") - ) - - -def getstruct(name, c_header_source, interesting_fields): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - STRUCT = Struct(name, interesting_fields) - return configure(CConfig)['STRUCT'] - -def getsimpletype(name, c_header_source, ctype_hint=ctypes.c_int): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - TYPE = SimpleType(name, ctype_hint) - return configure(CConfig)['TYPE'] - -def getconstantinteger(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - CONST = ConstantInteger(name) - return configure(CConfig)['CONST'] - -def getdefined(macro, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - DEFINED = Defined(macro) - return configure(CConfig)['DEFINED'] - -def has(name, c_header_source): - class CConfig: - _compilation_info_ = eci_from_header(c_header_source) - HAS = Has(name) - return configure(CConfig)['HAS'] - -def check_eci(eci): - """Check if a given ExternalCompilationInfo compiles and links.""" - class CConfig: - _compilation_info_ = eci - WORKS = Works() - return configure(CConfig)['WORKS'] - -def sizeof(name, eci, **kwds): - class CConfig: - _compilation_info_ = eci - SIZE = SizeOf(name) - for k, v in kwds.items(): - setattr(CConfig, k, v) - return configure(CConfig)['SIZE'] - -def memory_alignment(): - """Return the alignment (in bytes) of memory allocations. - This is enough to make sure a structure with pointers and 'double' - fields is properly aligned.""" - global _memory_alignment - if _memory_alignment is None: - S = getstruct('struct memory_alignment_test', """ - struct memory_alignment_test { - double d; - void* p; - }; - """, []) - result = ctypes.alignment(S) - assert result & (result-1) == 0, "not a power of two??" - _memory_alignment = result - return _memory_alignment -_memory_alignment = None - -# ____________________________________________________________ -# -# General interface - -class ConfigResult: - def __init__(self, CConfig, info, entries): - self.CConfig = CConfig - self.result = {} - self.info = info - self.entries = entries - - def get_entry_result(self, entry): - try: - return self.result[entry] - except KeyError: - pass - name = self.entries[entry] - info = self.info[name] - self.result[entry] = entry.build_result(info, self) - - def get_result(self): - return dict([(name, self.result[entry]) - for entry, name in self.entries.iteritems()]) - - -class _CWriter(object): - """ A simple class which aggregates config parts - """ - def __init__(self, CConfig): - self.path = uniquefilepath() - self.f = self.path.open("w") - self.config = CConfig - - def write_header(self): - f = self.f - CConfig = self.config - CConfig._compilation_info_.write_c_header(f) - print >> f, C_HEADER - print >> f - - def write_entry(self, key, entry): - f = self.f - print >> f, 'void dump_section_%s(void) {' % (key,) - for line in entry.prepare_code(): - if line and line[0] != '#': - line = '\t' + line - print >> f, line - print >> f, '}' - print >> f - - def write_entry_main(self, key): - print >> self.f, '\tprintf("-+- %s\\n");' % (key,) - print >> self.f, '\tdump_section_%s();' % (key,) - print >> self.f, '\tprintf("---\\n");' - - def start_main(self): - print >> self.f, 'int main(int argc, char *argv[]) {' - - def close(self): - f = self.f - print >> f, '\treturn 0;' - print >> f, '}' - f.close() - - def ask_gcc(self, question): - self.start_main() - self.f.write(question + "\n") - self.close() - eci = self.config._compilation_info_ - return try_compile([self.path], eci) - - -def configure(CConfig, noerr=False): - """Examine the local system by running the C compiler. - The CConfig class contains CConfigEntry attribues that describe - what should be inspected; configure() returns a dict mapping - names to the results. - """ - for attr in ['_includes_', '_libraries_', '_sources_', '_library_dirs_', - '_include_dirs_', '_header_']: - assert not hasattr(CConfig, attr), "Found legacy attribut %s on CConfig" % (attr,) - entries = [] - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigEntry): - entries.append((key, value)) - - if entries: # can be empty if there are only CConfigSingleEntries - writer = _CWriter(CConfig) - writer.write_header() - for key, entry in entries: - writer.write_entry(key, entry) - - f = writer.f - writer.start_main() - for key, entry in entries: - writer.write_entry_main(key) - writer.close() - - eci = CConfig._compilation_info_ - infolist = list(run_example_code(writer.path, eci, noerr=noerr)) - assert len(infolist) == len(entries) - - resultinfo = {} - resultentries = {} - for info, (key, entry) in zip(infolist, entries): - resultinfo[key] = info - resultentries[entry] = key - - result = ConfigResult(CConfig, resultinfo, resultentries) - for name, entry in entries: - result.get_entry_result(entry) - res = result.get_result() - else: - res = {} - - for key in dir(CConfig): - value = getattr(CConfig, key) - if isinstance(value, CConfigSingleEntry): - writer = _CWriter(CConfig) - writer.write_header() - res[key] = value.question(writer.ask_gcc) - return res - -# ____________________________________________________________ - - -class CConfigEntry(object): - "Abstract base class." - -class Struct(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined structure. - """ - def __init__(self, name, interesting_fields, ifdef=None): - self.name = name - self.interesting_fields = interesting_fields - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield 'typedef struct {' - yield ' char c;' - yield ' ctypesplatcheck_t s;' - yield '} ctypesplatcheck2_t;' - yield '' - yield 'ctypesplatcheck_t s;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("align", offsetof(ctypesplatcheck2_t, s));' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - for fieldname, fieldtype in self.interesting_fields: - yield 'dump("fldofs %s", offsetof(ctypesplatcheck_t, %s));'%( - fieldname, fieldname) - yield 'dump("fldsize %s", sizeof(s.%s));' % ( - fieldname, fieldname) - if fieldtype in integer_class: - yield 's.%s = 0; s.%s = ~s.%s;' % (fieldname, - fieldname, - fieldname) - yield 'dump("fldunsigned %s", s.%s > 0);' % (fieldname, - fieldname) - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None: - if not info['defined']: - return None - alignment = 1 - layout = [None] * info['size'] - for fieldname, fieldtype in self.interesting_fields: - if isinstance(fieldtype, Struct): - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - c_fieldtype = config_result.get_entry_result(fieldtype) - layout_addfield(layout, offset, c_fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(c_fieldtype)) - else: - offset = info['fldofs ' + fieldname] - size = info['fldsize ' + fieldname] - sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != size_and_sign(fieldtype): - fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) - layout_addfield(layout, offset, fieldtype, fieldname) - alignment = max(alignment, ctype_alignment(fieldtype)) - - # try to enforce the same alignment as the one of the original - # structure - if alignment < info['align']: - choices = [ctype for ctype in alignment_types - if ctype_alignment(ctype) == info['align']] - assert choices, "unsupported alignment %d" % (info['align'],) - choices = [(ctypes.sizeof(ctype), i, ctype) - for i, ctype in enumerate(choices)] - csize, _, ctype = min(choices) - for i in range(0, info['size'] - csize + 1, info['align']): - if layout[i:i+csize] == [None] * csize: - layout_addfield(layout, i, ctype, '_alignment') - break - else: - raise AssertionError("unenforceable alignment %d" % ( - info['align'],)) - - n = 0 - for i, cell in enumerate(layout): - if cell is not None: - continue - layout_addfield(layout, i, ctypes.c_char, '_pad%d' % (n,)) - n += 1 - - # build the ctypes Structure - seen = {} - fields = [] - for cell in layout: - if cell in seen: - continue - fields.append((cell.name, cell.ctype)) - seen[cell] = True - - class S(ctypes.Structure): - _fields_ = fields - name = self.name - if name.startswith('struct '): - name = name[7:] - S.__name__ = name - return S - -class SimpleType(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined simple numeric type. - """ - def __init__(self, name, ctype_hint=ctypes.c_int, ifdef=None): - self.name = name - self.ctype_hint = ctype_hint - self.ifdef = ifdef - - def prepare_code(self): - if self.ifdef is not None: - yield '#ifdef %s' % (self.ifdef,) - yield 'typedef %s ctypesplatcheck_t;' % (self.name,) - yield '' - yield 'ctypesplatcheck_t x;' - if self.ifdef is not None: - yield 'dump("defined", 1);' - yield 'dump("size", sizeof(ctypesplatcheck_t));' - if self.ctype_hint in integer_class: - yield 'x = 0; x = ~x;' - yield 'dump("unsigned", x > 0);' - if self.ifdef is not None: - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if self.ifdef is not None and not info['defined']: - return None - size = info['size'] - sign = info.get('unsigned', False) - ctype = self.ctype_hint - if (size, sign) != size_and_sign(ctype): - ctype = fixup_ctype(ctype, self.name, (size, sign)) - return ctype - -class ConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'if ((%s) < 0) {' % (self.name,) - yield ' long long x = (long long)(%s);' % (self.name,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.name,) - yield ' printf("value: %llu\\n", x);' - yield '}' - - def build_result(self, info, config_result): - return info['value'] - -class DefinedConstantInteger(CConfigEntry): - """An entry in a CConfig class that stands for an externally - defined integer constant. If not #defined the value will be None. - """ - def __init__(self, macro): - self.name = self.macro = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'dump("defined", 1);' - yield 'if ((%s) < 0) {' % (self.macro,) - yield ' long long x = (long long)(%s);' % (self.macro,) - yield ' printf("value: %lld\\n", x);' - yield '} else {' - yield ' unsigned long long x = (unsigned long long)(%s);' % ( - self.macro,) - yield ' printf("value: %llu\\n", x);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - return info['value'] - return None - - -class DefinedConstantString(CConfigEntry): - """ - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % self.macro - yield 'int i;' - yield 'char *p = %s;' % self.macro - yield 'dump("defined", 1);' - yield 'for (i = 0; p[i] != 0; i++ ) {' - yield ' printf("value_%d: %d\\n", i, (int)(unsigned char)p[i]);' - yield '}' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - if info["defined"]: - string = '' - d = 0 - while info.has_key('value_%d' % d): - string += chr(info['value_%d' % d]) - d += 1 - return string - return None - - -class Defined(CConfigEntry): - """A boolean, corresponding to an #ifdef. - """ - def __init__(self, macro): - self.macro = macro - self.name = macro - - def prepare_code(self): - yield '#ifdef %s' % (self.macro,) - yield 'dump("defined", 1);' - yield '#else' - yield 'dump("defined", 0);' - yield '#endif' - - def build_result(self, info, config_result): - return bool(info['defined']) - -class CConfigSingleEntry(object): - """ An abstract class of type which requires - gcc succeeding/failing instead of only asking - """ - pass - -class Has(CConfigSingleEntry): - def __init__(self, name): - self.name = name - - def question(self, ask_gcc): - return ask_gcc(self.name + ';') - -class Works(CConfigSingleEntry): - def question(self, ask_gcc): - return ask_gcc("") - -class SizeOf(CConfigEntry): - """An entry in a CConfig class that stands for - some external opaque type - """ - def __init__(self, name): - self.name = name - - def prepare_code(self): - yield 'dump("size", sizeof(%s));' % self.name - - def build_result(self, info, config_result): - return info['size'] - -# ____________________________________________________________ -# -# internal helpers - -def ctype_alignment(c_type): - if issubclass(c_type, ctypes.Structure): - return max([ctype_alignment(fld_type) - for fld_name, fld_type in c_type._fields_]) - - return ctypes.alignment(c_type) - -def uniquefilepath(LAST=[0]): - i = LAST[0] - LAST[0] += 1 - return configdir.join('ctypesplatcheck_%d.c' % i) - -alignment_types = [ - ctypes.c_short, - ctypes.c_int, - ctypes.c_long, - ctypes.c_float, - ctypes.c_double, - ctypes.c_char_p, - ctypes.c_void_p, - ctypes.c_longlong, - ctypes.c_wchar, - ctypes.c_wchar_p, - ] - -integer_class = [ctypes.c_byte, ctypes.c_ubyte, - ctypes.c_short, ctypes.c_ushort, - ctypes.c_int, ctypes.c_uint, - ctypes.c_long, ctypes.c_ulong, - ctypes.c_longlong, ctypes.c_ulonglong, - ] -float_class = [ctypes.c_float, ctypes.c_double] - -class Field(object): - def __init__(self, name, ctype): - self.name = name - self.ctype = ctype - def __repr__(self): - return '' % (self.name, self.ctype) - -def layout_addfield(layout, offset, ctype, prefix): - size = ctypes.sizeof(ctype) - name = prefix - i = 0 - while name in layout: - i += 1 - name = '%s_%d' % (prefix, i) - field = Field(name, ctype) - for i in range(offset, offset+size): - assert layout[i] is None, "%s overlaps %r" % (fieldname, layout[i]) - layout[i] = field - return field - -def size_and_sign(ctype): - return (ctypes.sizeof(ctype), - ctype in integer_class and ctype(-1).value > 0) - -def fixup_ctype(fieldtype, fieldname, expected_size_and_sign): - for typeclass in [integer_class, float_class]: - if fieldtype in typeclass: - for ctype in typeclass: - if size_and_sign(ctype) == expected_size_and_sign: - return ctype - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_char): - # for now, assume it is an array of chars; otherwise we'd also - # have to check the exact integer type of the elements of the array - size, sign = expected_size_and_sign - return ctypes.c_char * size - if (hasattr(fieldtype, '_length_') - and getattr(fieldtype, '_type_', None) == ctypes.c_ubyte): - # grumble, fields of type 'c_char array' have automatic cast-to- - # Python-string behavior in ctypes, which may not be what you - # want, so here is the same with c_ubytes instead... - size, sign = expected_size_and_sign - return ctypes.c_ubyte * size - raise TypeError("conflicting field type %r for %r" % (fieldtype, - fieldname)) - - -C_HEADER = """ -#include -#include /* for offsetof() */ -#ifndef _WIN32 -# include /* FreeBSD: for uint64_t */ -#endif - -void dump(char* key, int value) { - printf("%s: %d\\n", key, value); -} -""" - -def run_example_code(filepath, eci, noerr=False): - executable = build_executable([filepath], eci, noerr=noerr) - output = py.process.cmdexec(executable) - section = None - for line in output.splitlines(): - line = line.strip() - if line.startswith('-+- '): # start of a new section - section = {} - elif line == '---': # section end - assert section is not None - yield section - section = None - elif line: - assert section is not None - key, value = line.split(': ') - section[key] = int(value) - -# ____________________________________________________________ - -def get_python_include_dir(): - from distutils import sysconfig - gcv = sysconfig.get_config_vars() - return gcv['INCLUDEPY'] - -if __name__ == '__main__': - doc = """Example: - - ctypes_platform.py -h sys/types.h -h netinet/in.h - 'struct sockaddr_in' - sin_port c_int - """ - import sys, getopt - opts, args = getopt.gnu_getopt(sys.argv[1:], 'h:') - if not args: - print >> sys.stderr, doc - else: - assert len(args) % 2 == 1 - headers = [] - for opt, value in opts: - if opt == '-h': - headers.append('#include <%s>' % (value,)) - name = args[0] - fields = [] - for i in range(1, len(args), 2): - ctype = getattr(ctypes, args[i+1]) - fields.append((args[i], ctype)) - - S = getstruct(name, '\n'.join(headers), fields) - - for key, value in S._fields_: - print key, value diff --git a/ctypes_configure/doc/configure.html b/ctypes_configure/doc/configure.html deleted file mode 100644 --- a/ctypes_configure/doc/configure.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - -ctypes configure - - -
      -

      ctypes configure

      -
      -

      idea

      -

      One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py).

      -
      -
      -

      installation

      -

      easy_install ctypes_configure

      -
      -
      -

      usage

      -

      sample.py explains in details how to use it.

      -
      -
      - - diff --git a/ctypes_configure/doc/configure.txt b/ctypes_configure/doc/configure.txt deleted file mode 100644 --- a/ctypes_configure/doc/configure.txt +++ /dev/null @@ -1,22 +0,0 @@ -================= -ctypes configure -================= - -idea -==== - -One of ctypes problems is that ctypes programs are usually not very -platform-independent. We created ctypes_configure, which invokes gcc -for various platform-dependent details like -exact sizes of types (for example size\_t), #defines, exact outline -of structures etc. It replaces in this regard code generator (h2py). - -installation -============ - -``easy_install ctypes_configure`` - -usage -===== - -:source:`sample.py ` explains in details how to use it. diff --git a/ctypes_configure/doc/sample.py b/ctypes_configure/doc/sample.py deleted file mode 100644 --- a/ctypes_configure/doc/sample.py +++ /dev/null @@ -1,72 +0,0 @@ - -from ctypes_configure import configure -import ctypes - -class CConfigure: - _compilation_info_ = configure.ExternalCompilationInfo( - - # all lines landing in C header before includes - pre_include_lines = [], - - # list of .h files to include - includes = ['time.h', 'sys/time.h', 'unistd.h'], - - # list of directories to search for include files - include_dirs = [], - - # all lines landing in C header after includes - post_include_lines = [], - - # libraries to link with - libraries = [], - - # library directories - library_dirs = [], - - # additional C sources to compile with (that go to - # created .c files) - separate_module_sources = [], - - # additional existing C source file names - separate_module_files = [], - ) - - # get real int type out of hint and name - size_t = configure.SimpleType('size_t', ctypes.c_int) - - # grab value of numerical #define - NULL = configure.ConstantInteger('NULL') - - # grab #define, whether it's defined or not - EXISTANT = configure.Defined('NULL') - NOT_EXISTANT = configure.Defined('XXXNOTNULL') - - # check for existance of C functions - has_write = configure.Has('write') - no_xxxwrite = configure.Has('xxxwrite') - - # check for size of type - sizeof_size_t = configure.SizeOf('size_t') - - # structure, with given hints for interesting fields, - # types does not need to be too specific. - # all interesting fields would end up with right offset - # size and order - struct_timeval = configure.Struct('struct timeval',[ - ('tv_sec', ctypes.c_int), - ('tv_usec', ctypes.c_int)]) - -info = configure.configure(CConfigure) - -assert info['has_write'] -assert not info['no_xxxwrite'] -assert info['NULL'] == 0 -size_t = info['size_t'] -print "size_t in ctypes is ", size_t -assert ctypes.sizeof(size_t) == info['sizeof_size_t'] -assert info['EXISTANT'] -assert not info['NOT_EXISTANT'] -print -print "fields of struct timeval are " -for name, value in info['struct_timeval']._fields_: - print " ", name, " ", value diff --git a/ctypes_configure/dumpcache.py b/ctypes_configure/dumpcache.py deleted file mode 100644 --- a/ctypes_configure/dumpcache.py +++ /dev/null @@ -1,46 +0,0 @@ -import os, sys -import ctypes - - -def dumpcache(referencefilename, filename, config): - dirname = os.path.dirname(referencefilename) - filename = os.path.join(dirname, filename) - f = open(filename, 'w') - print >> f, 'import ctypes' - print >> f - names = config.keys() - names.sort() - print >> f, '__all__ = %r' % (tuple(names),) - print >> f - for key in names: - val = config[key] - if isinstance(val, (int, long)): - f.write("%s = %d\n" % (key, val)) - elif val is None: - f.write("%s = None\n" % key) - elif isinstance(val, ctypes.Structure.__class__): - f.write("class %s(ctypes.Structure):\n" % key) - f.write(" _fields_ = [\n") - for k, v in val._fields_: - f.write(" ('%s', %s),\n" % (k, ctypes_repr(v))) - f.write(" ]\n") - elif isinstance(val, (tuple, list)): - for x in val: - assert isinstance(x, (int, long, str)), \ - "lists of integers or strings only" - f.write("%s = %r\n" % (key, val)) - else: - # a simple type, hopefully - f.write("%s = %s\n" % (key, ctypes_repr(val))) - f.close() - print 'Wrote %s.' % (filename,) - sys.stdout.flush() - -def ctypes_repr(cls): - # ctypes_configure does not support nested structs so far - # so let's ignore it - if isinstance(cls, ctypes._SimpleCData.__class__): - return "ctypes." + cls.__name__ - if hasattr(cls, '_length_') and hasattr(cls, '_type_'): # assume an array - return '%s*%d' % (ctypes_repr(cls._type_), cls._length_) - raise NotImplementedError("saving of object with type %r" % type(cls)) diff --git a/ctypes_configure/stdoutcapture.py b/ctypes_configure/stdoutcapture.py deleted file mode 100644 --- a/ctypes_configure/stdoutcapture.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -A quick hack to capture stdout/stderr. -""" - -import os, sys - - -class Capture: - - def __init__(self, mixed_out_err = False): - "Start capture of the Unix-level stdout and stderr." - if (not hasattr(os, 'tmpfile') or - not hasattr(os, 'dup') or - not hasattr(os, 'dup2') or - not hasattr(os, 'fdopen')): - self.dummy = 1 - else: - try: - self.tmpout = os.tmpfile() - if mixed_out_err: - self.tmperr = self.tmpout - else: - self.tmperr = os.tmpfile() - except OSError: # bah? on at least one Windows box - self.dummy = 1 - return - self.dummy = 0 - # make new stdout/stderr files if needed - self.localoutfd = os.dup(1) - self.localerrfd = os.dup(2) - if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: - self.saved_stdout = sys.stdout - sys.stdout = os.fdopen(self.localoutfd, 'w', 1) - else: - self.saved_stdout = None - if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: - self.saved_stderr = sys.stderr - sys.stderr = os.fdopen(self.localerrfd, 'w', 0) - else: - self.saved_stderr = None - os.dup2(self.tmpout.fileno(), 1) - os.dup2(self.tmperr.fileno(), 2) - - def done(self): - "End capture and return the captured text (stdoutfile, stderrfile)." - if self.dummy: - import cStringIO - return cStringIO.StringIO(), cStringIO.StringIO() - else: - os.dup2(self.localoutfd, 1) - os.dup2(self.localerrfd, 2) - if self.saved_stdout is not None: - f = sys.stdout - sys.stdout = self.saved_stdout - f.close() - else: - os.close(self.localoutfd) - if self.saved_stderr is not None: - f = sys.stderr - sys.stderr = self.saved_stderr - f.close() - else: - os.close(self.localerrfd) - self.tmpout.seek(0) - self.tmperr.seek(0) - return self.tmpout, self.tmperr - - -if __name__ == '__main__': - # test - c = Capture() - try: - os.system('echo hello') - finally: - fout, ferr = c.done() - print 'Output:', `fout.read()` - print 'Error:', `ferr.read()` diff --git a/ctypes_configure/test/__init__.py b/ctypes_configure/test/__init__.py deleted file mode 100644 diff --git a/ctypes_configure/test/test_configure.py b/ctypes_configure/test/test_configure.py deleted file mode 100644 --- a/ctypes_configure/test/test_configure.py +++ /dev/null @@ -1,212 +0,0 @@ -import py, sys, struct -from ctypes_configure import configure -from ctypes_configure.cbuild import ExternalCompilationInfo -import ctypes - -def test_dirent(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_reclen", ctypes.c_ushort)]) - assert issubclass(dirent, ctypes.Structure) - ssize = (ctypes.sizeof(ctypes.c_long) + - ctypes.sizeof(ctypes.c_int) + - ctypes.sizeof(ctypes.c_ushort) + - 32) - extra_padding = (-ssize) % ctypes.alignment(ctypes.c_long) - - assert dirent._fields_ == [('_alignment', ctypes.c_long), - ('_pad0', ctypes.c_char), - ('_pad1', ctypes.c_char), - ('_pad2', ctypes.c_char), - ('_pad3', ctypes.c_char), - ('d_reclen', ctypes.c_ushort), - ] + [ - ('_pad%d' % n, ctypes.c_char) - for n in range(4, 4+32+extra_padding)] - assert ctypes.sizeof(dirent) == ssize + extra_padding - assert ctypes.alignment(dirent) == ctypes.alignment(ctypes.c_long) - -def test_fit_type(): - S = configure.getstruct("struct S", - """ - struct S { - signed char c; - unsigned char uc; - short s; - unsigned short us; - int i; - unsigned int ui; - long l; - unsigned long ul; - long long ll; - unsigned long long ull; - float f; - double d; - }; - """, - [("c", ctypes.c_int), - ("uc", ctypes.c_int), - ("s", ctypes.c_uint), - ("us", ctypes.c_int), - ("i", ctypes.c_int), - ("ui", ctypes.c_int), - ("l", ctypes.c_int), - ("ul", ctypes.c_int), - ("ll", ctypes.c_int), - ("ull", ctypes.c_int), - ("f", ctypes.c_double), - ("d", ctypes.c_float)]) - assert issubclass(S, ctypes.Structure) - fields = dict(S._fields_) - assert fields["c"] == ctypes.c_byte - assert fields["uc"] == ctypes.c_ubyte - assert fields["s"] == ctypes.c_short - assert fields["us"] == ctypes.c_ushort - assert fields["i"] == ctypes.c_int - assert fields["ui"] == ctypes.c_uint - assert fields["l"] == ctypes.c_long - assert fields["ul"] == ctypes.c_ulong - assert fields["ll"] == ctypes.c_longlong - assert fields["ull"] == ctypes.c_ulonglong - assert fields["f"] == ctypes.c_float - assert fields["d"] == ctypes.c_double - -def test_simple_type(): - ctype = configure.getsimpletype('test_t', - 'typedef unsigned short test_t;', - ctypes.c_int) - assert ctype == ctypes.c_ushort - -def test_constant_integer(): - value = configure.getconstantinteger('BLAH', - '#define BLAH (6*7)') - assert value == 42 - value = configure.getconstantinteger('BLAH', - '#define BLAH (-2147483648LL)') - assert value == -2147483648 - value = configure.getconstantinteger('BLAH', - '#define BLAH (3333333333ULL)') - assert value == 3333333333 - -def test_defined(): - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', '') - assert not res - res = configure.getdefined('ALFKJLKJFLKJFKLEJDLKEWMECEE', - '#define ALFKJLKJFLKJFKLEJDLKEWMECEE') - assert res - -def test_configure(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform.h') - test_h.write('#define XYZZY 42\n') - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - - res = configure.configure(CConfig) - assert issubclass(res['FILE'], ctypes.Structure) - assert res == {'FILE': res['FILE'], - 'ushort': ctypes.c_ushort, - 'XYZZY': 42} - -def test_ifdef(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines = ['/* a C comment */', - '#define XYZZY 42', - 'typedef int foo;', - 'struct s {', - 'int i;', - 'double f;' - '};']) - - - s = configure.Struct('struct s', [('i', ctypes.c_int)], - ifdef='XYZZY') - z = configure.Struct('struct z', [('i', ctypes.c_int)], - ifdef='FOOBAR') - - foo = configure.SimpleType('foo', ifdef='XYZZY') - bar = configure.SimpleType('bar', ifdef='FOOBAR') - - res = configure.configure(CConfig) - assert res['s'] is not None - assert res['z'] is None - assert res['foo'] is not None - assert res['bar'] is None - -def test_nested_structs(): - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - post_include_lines=""" - struct x { - int foo; - unsigned long bar; - }; - struct y { - char c; - struct x x; - }; - """.split("\n")) - - x = configure.Struct("struct x", [("bar", ctypes.c_short)]) - y = configure.Struct("struct y", [("x", x)]) - - res = configure.configure(CConfig) - c_x = res["x"] - c_y = res["y"] - c_y_fields = dict(c_y._fields_) - assert issubclass(c_x , ctypes.Structure) - assert issubclass(c_y, ctypes.Structure) - assert c_y_fields["x"] is c_x - -def test_array(): - dirent = configure.getstruct("struct dirent", - """ - struct dirent /* for this example only, not the exact dirent */ - { - long d_ino; - int d_off; - unsigned short d_reclen; - char d_name[32]; - }; - """, - [("d_name", ctypes.c_char * 0)]) - assert dirent.d_name.size == 32 - -def test_has(): - assert configure.has("x", "int x = 3;") - assert not configure.has("x", "") - # has() should also not crash if it is given an invalid #include - assert not configure.has("x", "#include ") - -def test_check_eci(): - eci = ExternalCompilationInfo() - assert configure.check_eci(eci) - eci = ExternalCompilationInfo(libraries=['some_name_that_doesnt_exist_']) - assert not configure.check_eci(eci) - -def test_sizeof(): - assert configure.sizeof("char", ExternalCompilationInfo()) == 1 - -def test_memory_alignment(): - a = configure.memory_alignment() - print a - assert a % struct.calcsize("P") == 0 diff --git a/ctypes_configure/test/test_dumpcache.py b/ctypes_configure/test/test_dumpcache.py deleted file mode 100644 --- a/ctypes_configure/test/test_dumpcache.py +++ /dev/null @@ -1,61 +0,0 @@ -import ctypes -from ctypes_configure import configure, dumpcache -from ctypes_configure.cbuild import ExternalCompilationInfo - - -def test_cache(): - configdir = configure.configdir - test_h = configdir.join('test_ctypes_platform2.h') - test_h.write('#define XYZZY 42\n' - "#define large 2147483648L\n") - - class CConfig: - _compilation_info_ = ExternalCompilationInfo( - pre_include_lines = ["/* a C comment */", - "#include ", - "#include "], - include_dirs = [str(configdir)] - ) - - FILE = configure.Struct('FILE', []) - ushort = configure.SimpleType('unsigned short') - XYZZY = configure.ConstantInteger('XYZZY') - XUZ = configure.Has('XUZ') - large = configure.DefinedConstantInteger('large') - undef = configure.Defined('really_undefined') - - res = configure.configure(CConfig) - - cachefile = configdir.join('cache') - dumpcache.dumpcache('', str(cachefile), res) - - d = {} - execfile(str(cachefile), d) - assert d['XYZZY'] == res['XYZZY'] - assert d['ushort'] == res['ushort'] - assert d['FILE']._fields_ == res['FILE']._fields_ - assert d['FILE'].__mro__[1:] == res['FILE'].__mro__[1:] - assert d['undef'] == res['undef'] - assert d['large'] == res['large'] - assert d['XUZ'] == res['XUZ'] - - -def test_cache_array(): - configdir = configure.configdir - res = {'foo': ctypes.c_short * 27} - cachefile = configdir.join('cache_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] - -def test_cache_array_array(): - configdir = configure.configdir - res = {'foo': (ctypes.c_int * 2) * 3} - cachefile = configdir.join('cache_array_array') - dumpcache.dumpcache('', str(cachefile), res) - # - d = {} - execfile(str(cachefile), d) - assert d['foo'] == res['foo'] diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.9.1 +Version: 1.9.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.9.1" -__version_info__ = (1, 9, 1) +__version__ = "1.9.2" +__version_info__ = (1, 9, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.9.1" + "\ncompiled with cffi version: 1.9.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,3 +68,11 @@ .. branch: stdlib-2.7.13 Updated the implementation to match CPython 2.7.13 instead of 2.7.13. + +.. branch: issue2444 + +Fix ``PyObject_GetBuffer`` and ``PyMemoryView_GET_BUFFER``, which leaked +memory and held references. Add a finalizer to CPyBuffer, add a +PyMemoryViewObject with a PyBuffer attached so that the call to +``PyMemoryView_GET_BUFFER`` does not leak a PyBuffer-sized piece of memory. +Properly call ``bf_releasebuffer`` when not ``NULL``. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi from rpython.rtyper.lltypesystem import rffi -VERSION = "1.9.1" +VERSION = "1.9.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -17,7 +17,7 @@ from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer from pypy.module._cffi_backend.ctypevoid import W_CTypeVoid -from pypy.module._cffi_backend.ctypestruct import W_CTypeStruct +from pypy.module._cffi_backend.ctypestruct import W_CTypeStruct, W_CTypeUnion from pypy.module._cffi_backend.ctypeprim import (W_CTypePrimitiveSigned, W_CTypePrimitiveUnsigned, W_CTypePrimitiveCharOrUniChar, W_CTypePrimitiveFloat, W_CTypePrimitiveLongDouble) @@ -231,6 +231,11 @@ return cifbuilder.fb_struct_ffi_type(self, is_result_type) return _missing_ffi_type(self, cifbuilder, is_result_type) +def _union_ffi_type(self, cifbuilder, is_result_type): + if self.size >= 0: # only for a better error message + return cifbuilder.fb_union_ffi_type(self, is_result_type) + return _missing_ffi_type(self, cifbuilder, is_result_type) + def _primsigned_ffi_type(self, cifbuilder, is_result_type): size = self.size if size == 1: return clibffi.ffi_type_sint8 @@ -266,6 +271,7 @@ W_CType._get_ffi_type = _missing_ffi_type W_CTypeStruct._get_ffi_type = _struct_ffi_type +W_CTypeUnion._get_ffi_type = _union_ffi_type W_CTypePrimitiveSigned._get_ffi_type = _primsigned_ffi_type W_CTypePrimitiveCharOrUniChar._get_ffi_type = _primunsigned_ffi_type W_CTypePrimitiveUnsigned._get_ffi_type = _primunsigned_ffi_type @@ -276,6 +282,12 @@ # ---------- +_SUPPORTED_IN_API_MODE = ( + " are only supported as %s if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder" + ".cdef()+ffibuilder.set_source() and not taking a final '...' " + "argument)") + class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) @@ -297,6 +309,20 @@ def fb_fill_type(self, ctype, is_result_type): return ctype._get_ffi_type(self, is_result_type) + def fb_unsupported(self, ctype, is_result_type, detail): + place = "return value" if is_result_type else "argument" + raise oefmt(self.space.w_NotImplementedError, + "ctype '%s' not supported as %s. %s. " + "Such structs" + _SUPPORTED_IN_API_MODE, + ctype.name, place, detail, place) + + def fb_union_ffi_type(self, ctype, is_result_type=False): + place = "return value" if is_result_type else "argument" + raise oefmt(self.space.w_NotImplementedError, + "ctype '%s' not supported as %s by libffi. " + "Unions" + _SUPPORTED_IN_API_MODE, + ctype.name, place, place) + def fb_struct_ffi_type(self, ctype, is_result_type=False): # We can't pass a struct that was completed by verify(). # Issue: assume verify() is given "struct { long b; ...; }". @@ -309,37 +335,40 @@ # Another reason for 'custom_field_pos' would be anonymous # nested structures: we lost the information about having it # here, so better safe (and forbid it) than sorry (and maybe - # crash). + # crash). Note: it seems we only get in this case with + # ffi.verify(). space = self.space ctype.force_lazy_struct() if ctype._custom_field_pos: # these NotImplementedErrors may be caught and ignored until # a real call is made to a function of this type - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s (it is a struct declared " - "with \"...;\", but the C calling convention may depend " - "on the missing fields)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct declared with \"...;\", but the C " + "calling convention may depend on the missing fields; " + "or, it contains anonymous struct/unions") + # Another reason: __attribute__((packed)) is not supported by libffi. + if ctype._with_packed_change: + raise self.fb_unsupported(ctype, is_result_type, + "It is a 'packed' structure, with a different layout than " + "expected by libffi") # walk the fields, expanding arrays into repetitions; first, # only count how many flattened fields there are nflat = 0 for i, cf in enumerate(ctype._fields_list): if cf.is_bitfield(): - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s" - " (it is a struct with bit fields)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct with bit fields, which libffi does not " + "support") flat = 1 ct = cf.ctype while isinstance(ct, ctypearray.W_CTypeArray): flat *= ct.length ct = ct.ctitem if flat <= 0: - place = "return value" if is_result_type else "argument" - raise oefmt(space.w_NotImplementedError, - "ctype '%s' not supported as %s (it is a struct" - " with a zero-length array)", ctype.name, place) + raise self.fb_unsupported(ctype, is_result_type, + "It is a struct with a zero-length array, which libffi " + "does not support") nflat += flat if USE_C_LIBFFI_MSVC and is_result_type: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -34,6 +34,7 @@ _fields_dict = None _custom_field_pos = False _with_var_array = False + _with_packed_changed = False def __init__(self, space, name): W_CType.__init__(self, space, -1, name, len(name)) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -303,6 +303,7 @@ fields_dict = {} w_ctype._custom_field_pos = False with_var_array = False + with_packed_change = False for i in range(len(fields_w)): w_field = fields_w[i] @@ -333,7 +334,8 @@ # # update the total alignment requirement, but skip it if the # field is an anonymous bitfield or if SF_PACKED - falign = 1 if sflags & SF_PACKED else ftype.alignof() + falignorg = ftype.alignof() + falign = 1 if sflags & SF_PACKED else falignorg do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -359,7 +361,10 @@ bs_flag = ctypestruct.W_CField.BS_REGULAR # align this field to its own 'falign' by inserting padding + boffsetorg = (boffset + falignorg*8-1) & ~(falignorg*8-1) boffset = (boffset + falign*8-1) & ~(falign*8-1) + if boffsetorg != boffset: + with_packed_change = True if foffset >= 0: # a forced field position: ignore the offset just computed, @@ -372,6 +377,7 @@ if (fname == '' and isinstance(ftype, ctypestruct.W_CTypeStructOrUnion)): # a nested anonymous struct or union + # note: it seems we only get here with ffi.verify() srcfield2names = {} ftype.force_lazy_struct() for name, srcfld in ftype._fields_dict.items(): @@ -530,6 +536,7 @@ w_ctype._fields_dict = fields_dict #w_ctype._custom_field_pos = ...set above already w_ctype._with_var_array = with_var_array + w_ctype._with_packed_change = with_packed_change # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.9.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.9.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -1084,9 +1084,13 @@ BFunc = new_function_type((BStruct,), BDouble) # internally not callable dummy_func = cast(BFunc, 42) e = py.test.raises(NotImplementedError, dummy_func, "?") - msg = ("ctype \'struct foo\' not supported as argument (it is a struct " - 'declared with "...;", but the C calling convention may depend on ' - 'the missing fields)') + msg = ("ctype 'struct foo' not supported as argument. It is a struct " + 'declared with "...;", but the C calling convention may depend ' + "on the missing fields; or, it contains anonymous struct/unions. " + "Such structs are only supported as argument if the function is " + "'API mode' and non-variadic (i.e. declared inside ffibuilder." + "cdef()+ffibuilder.set_source() and not taking a final '...' " + "argument)") assert str(e.value) == msg def test_new_charp(): diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -6,9 +6,9 @@ import pypy.module.cpyext.api # side-effect of pre-importing it - at unwrap_spec(cdef=str, module_name=str, source=str) + at unwrap_spec(cdef=str, module_name=str, source=str, packed=int) def prepare(space, cdef, module_name, source, w_includes=None, - w_extra_source=None, w_min_version=None): + w_extra_source=None, w_min_version=None, packed=False): try: import cffi from cffi import FFI # <== the system one, which @@ -47,7 +47,7 @@ ffi = FFI() for include_ffi_object in includes: ffi.include(include_ffi_object._test_recompiler_source_ffi) - ffi.cdef(cdef) + ffi.cdef(cdef, packed=packed) ffi.set_source(module_name, source) ffi.emit_c_code(c_file) @@ -1838,3 +1838,149 @@ raises(ffi.error, ffi.sizeof, "vmat_t") p = ffi.new("vmat_t", 4) assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") + + def test_call_with_custom_field_pos(self): + ffi, lib = self.prepare(""" + struct foo { int x; ...; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_custom_field_pos", """ + struct foo { int y, x; }; + struct foo f(void) { + struct foo s = { 40, 200 }; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().x == 200 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + + def test_call_with_nested_anonymous_struct(self): + import sys + if sys.platform == 'win32': + py.test.skip("needs a GCC extension") + ffi, lib = self.prepare(""" + struct foo { int a; union { int b, c; }; }; + struct foo f(void); + struct foo g(int, ...); + """, "test_call_with_nested_anonymous_struct", """ + struct foo { int a; union { int b, c; }; }; + struct foo f(void) { + struct foo s = { 40 }; + s.b = 200; + return s; + } + struct foo g(int a, ...) { } + """) + assert lib.f().b == 200 + e = raises(NotImplementedError, lib.g, 0) + assert str(e.value) == ( + 'ctype \'struct foo\' not supported as return value. It is a ' + 'struct declared with "...;", but the C calling convention may ' + 'depend on the missing fields; or, it contains anonymous ' + 'struct/unions. Such structs are only supported ' + 'as return value if the function is \'API mode\' and non-variadic ' + '(i.e. declared inside ffibuilder.cdef()+ffibuilder.set_source() ' + 'and not taking a final \'...\' argument)') + From pypy.commits at gmail.com Thu Dec 29 09:03:06 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 29 Dec 2016 06:03:06 -0800 (PST) Subject: [pypy-commit] pypy py3.5: do not skip ssl tests, _ssl module is now in lib_pypy Message-ID: <5865179a.448e1c0a.328c8.2195@mx.google.com> Author: Richard Plangger Branch: py3.5 Changeset: r89264:fb49af41edfd Date: 2016-12-29 15:02 +0100 http://bitbucket.org/pypy/pypy/changeset/fb49af41edfd/ Log: do not skip ssl tests, _ssl module is now in lib_pypy diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -397,7 +397,7 @@ RegrTest('test_source_encoding.py'), RegrTest('test_spwd.py'), RegrTest('test_sqlite.py', usemodules="thread _rawffi zlib"), - RegrTest('test_ssl.py', usemodules='_ssl _socket select'), + RegrTest('test_ssl.py', usemodules='_socket select'), RegrTest('test_startfile.py'), RegrTest('test_stat.py'), RegrTest('test_statistics.py'), From pypy.commits at gmail.com Thu Dec 29 09:34:04 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 29 Dec 2016 06:34:04 -0800 (PST) Subject: [pypy-commit] pypy default: Remove unused parameter Message-ID: <58651edc.8b9a1c0a.e728f.5e7e@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89265:343e3390bd53 Date: 2016-12-29 15:33 +0100 http://bitbucket.org/pypy/pypy/changeset/343e3390bd53/ Log: Remove unused parameter diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1076,7 +1076,7 @@ '\n' + '\n'.join(functions)) - eci = build_eci(True, export_symbols, code, use_micronumpy) + eci = build_eci(True, code, use_micronumpy) eci = eci.compile_shared_lib( outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) @@ -1341,7 +1341,7 @@ source_dir / "pymem.c", ] -def build_eci(building_bridge, export_symbols, code, use_micronumpy=False): +def build_eci(building_bridge, code, use_micronumpy=False): "NOT_RPYTHON" # Build code and get pointer to the structure kwds = {} @@ -1434,7 +1434,7 @@ code += "#include /* api.py line 1290 */\n" code += "\n".join(functions) - eci = build_eci(False, export_symbols, code, use_micronumpy) + eci = build_eci(False, code, use_micronumpy) space.fromcache(State).install_dll(eci) From pypy.commits at gmail.com Thu Dec 29 09:47:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 29 Dec 2016 06:47:50 -0800 (PST) Subject: [pypy-commit] pypy default: Remove unused parameter Message-ID: <58652216.212dc20a.22a2c.acef@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89266:a98582230a9a Date: 2016-12-29 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/a98582230a9a/ Log: Remove unused parameter diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1047,8 +1047,7 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, export_symbols, - prefix='cpyexttest') + functions = generate_decls_and_callbacks(db, prefix='cpyexttest') global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): @@ -1097,7 +1096,6 @@ run_bootstrap_functions(space) # load the bridge, and init structure - import ctypes bridge = ctypes.CDLL(str(modulename), mode=ctypes.RTLD_GLOBAL) space.fromcache(State).install_dll(eci) @@ -1224,7 +1222,7 @@ renamed_symbols = [] for name in export_symbols: if '#' in name: - name,header = name.split('#') + name, header = name.split('#') else: header = pypy_decl newname = mangle_name(prefix, name) @@ -1254,7 +1252,7 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, export_symbols, api_struct=True, prefix=''): +def generate_decls_and_callbacks(db, api_struct=True, prefix=''): "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] @@ -1427,8 +1425,7 @@ generate_macros(export_symbols, prefix=prefix) - functions = generate_decls_and_callbacks(db, [], api_struct=False, - prefix=prefix) + functions = generate_decls_and_callbacks(db, api_struct=False, prefix=prefix) code = "#include \n" if use_micronumpy: code += "#include /* api.py line 1290 */\n" @@ -1586,7 +1583,7 @@ @specialize.memo() def make_generic_cpy_call(FT, expect_null): - from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef + from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj from pypy.module.cpyext.pyobject import get_w_obj_and_decref from pypy.module.cpyext.pyerrors import PyErr_Occurred From pypy.commits at gmail.com Thu Dec 29 10:35:06 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 29 Dec 2016 07:35:06 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: revert last commit, handle it different in _ctypes Message-ID: <58652d2a.820bc30a.5970b.c2c4@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89267:6381bf44e70c Date: 2016-12-29 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/6381bf44e70c/ Log: revert last commit, handle it different in _ctypes diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -84,10 +84,18 @@ return self.from_address(dll._handle.getaddressindll(name)) def from_buffer(self, obj, offset=0): + from array import array size = self._sizeofinstances() - buf = buffer(obj, offset, size) - if buf._pypy_is_readonly(): - raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) + if isinstance(obj, array): + # hack, buffer(array.array) will always return a readonly buffer. + # CPython calls PyObject_AsWriteBuffer(...) here! + # array.array does not implement the buffer interface so we cannot + # use memoryview here (neither on CPython)! + buf = buffer(obj, offset, size) + else: + buf = memoryview(obj)[offset:] + if buf.readonly: + raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) if len(buf) < size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -138,11 +138,6 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) return space.wrap(rffi.cast(lltype.Signed, ptr)) - def descr_is_readonly(self, space): - """ Needed in ctypes (from_buffer), CPython can check if a - buffer can be readonly (has a C Function/Macro for that) """ - return space.newbool(bool(self.buf.readonly)) - W_Buffer.typedef = TypeDef( "buffer", None, None, "read-write", __doc__ = """\ @@ -171,6 +166,5 @@ __repr__ = interp2app(W_Buffer.descr_repr), __buffer__ = interp2app(W_Buffer.descr_getbuffer), _pypy_raw_address = interp2app(W_Buffer.descr_pypy_raw_address), - _pypy_is_readonly = interp2app(W_Buffer.descr_is_readonly), ) W_Buffer.typedef.acceptable_as_base_class = False From pypy.commits at gmail.com Thu Dec 29 12:25:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 29 Dec 2016 09:25:44 -0800 (PST) Subject: [pypy-commit] pypy default: Simplify code Message-ID: <58654718.92ae1c0a.28a23.80af@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89268:7ea1e9aea7b0 Date: 2016-12-29 18:24 +0100 http://bitbucket.org/pypy/pypy/changeset/7ea1e9aea7b0/ Log: Simplify code diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1,6 +1,5 @@ import ctypes import sys, os -import atexit import py @@ -18,7 +17,6 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager from rpython.tool.udir import udir -from rpython.translator import platform from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.baseobjspace import W_Root @@ -1028,6 +1026,8 @@ generate_macros(export_symbols, prefix='cpyexttest') + functions = generate_decls_and_callbacks(db, prefix='cpyexttest') + # Structure declaration code members = [] structindex = {} @@ -1047,8 +1047,6 @@ RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) - functions = generate_decls_and_callbacks(db, prefix='cpyexttest') - global_objects = [] for name, (typ, expr) in GLOBALS.iteritems(): if '#' in name: @@ -1219,7 +1217,6 @@ def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] - renamed_symbols = [] for name in export_symbols: if '#' in name: name, header = name.split('#') @@ -1231,8 +1228,6 @@ pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) - renamed_symbols.append(newname) - export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ From pypy.commits at gmail.com Thu Dec 29 14:53:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 29 Dec 2016 11:53:10 -0800 (PST) Subject: [pypy-commit] pypy default: Refactor globals registration Message-ID: <586569a6.8d071c0a.32208.c948@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89269:1e93630b441a Date: 2016-12-29 20:52 +0100 http://bitbucket.org/pypy/pypy/changeset/1e93630b441a/ Log: Refactor globals registration diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -482,6 +482,12 @@ TYPES[configname] = forward return forward +GLOBALS = {} +def register_global(name, typ, expr, header=None): + if header is not None: + name = '%s#%s' % (name, header) + GLOBALS[name] = (typ, expr) + INTERPLEVEL_API = {} FUNCTIONS = {} FUNCTIONS_BY_HEADER = {} @@ -542,18 +548,23 @@ '_Py_QnewFlag', 'Py_Py3kWarningFlag', 'Py_HashRandomizationFlag', '_Py_PackageContext', ] TYPES = {} -GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur - '_Py_NoneStruct#%s' % pypy_decl: ('PyObject*', 'space.w_None'), - '_Py_TrueStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_True'), - '_Py_ZeroStruct#%s' % pypy_decl: ('PyIntObject*', 'space.w_False'), - '_Py_NotImplementedStruct#%s' % pypy_decl: ('PyObject*', 'space.w_NotImplemented'), - '_Py_EllipsisObject#%s' % pypy_decl: ('PyObject*', 'space.w_Ellipsis'), - 'PyDateTimeAPI': ('PyDateTime_CAPI*', 'None'), - } FORWARD_DECLS = [] INIT_FUNCTIONS = [] BOOTSTRAP_FUNCTIONS = [] +# this needs to include all prebuilt pto, otherwise segfaults occur +register_global('_Py_NoneStruct', + 'PyObject*', 'space.w_None', header=pypy_decl) +register_global('_Py_TrueStruct', + 'PyIntObject*', 'space.w_True', header=pypy_decl) +register_global('_Py_ZeroStruct', + 'PyIntObject*', 'space.w_False', header=pypy_decl) +register_global('_Py_NotImplementedStruct', + 'PyObject*', 'space.w_NotImplemented', header=pypy_decl) +register_global('_Py_EllipsisObject', + 'PyObject*', 'space.w_Ellipsis', header=pypy_decl) +register_global('PyDateTimeAPI', 'PyDateTime_CAPI*', 'None') + def build_exported_objects(): # Standard exceptions # PyExc_BaseException, PyExc_Exception, PyExc_ValueError, PyExc_KeyError, @@ -562,7 +573,7 @@ # PyExc_NameError, PyExc_MemoryError, PyExc_RuntimeError, # PyExc_UnicodeEncodeError, PyExc_UnicodeDecodeError, ... for exc_name in exceptions.Module.interpleveldefs.keys(): - GLOBALS['PyExc_' + exc_name] = ( + register_global('PyExc_' + exc_name, 'PyTypeObject*', 'space.gettypeobject(interp_exceptions.W_%s.typedef)'% (exc_name, )) @@ -597,7 +608,7 @@ 'PyCFunction_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCFunctionObject.typedef)', 'PyWrapperDescr_Type': 'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)' }.items(): - GLOBALS['%s#%s' % (cpyname, pypy_decl)] = ('PyTypeObject*', pypyexpr) + register_global(cpyname, 'PyTypeObject*', pypyexpr, header=pypy_decl) for cpyname in '''PyMethodObject PyListObject PyLongObject PyClassObject'''.split(): @@ -1403,10 +1414,12 @@ return use_micronumpy # import registers api functions by side-effect, we also need HEADER from pypy.module.cpyext.ndarrayobject import HEADER - global GLOBALS, FUNCTIONS_BY_HEADER, separate_module_files + global FUNCTIONS_BY_HEADER, separate_module_files for func_name in ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS']: FUNCTIONS_BY_HEADER.setdefault(HEADER, {})[func_name] = None - GLOBALS["PyArray_Type#%s" % HEADER] = ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") + register_global("PyArray_Type", + 'PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)", + header=HEADER) separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy From pypy.commits at gmail.com Thu Dec 29 14:57:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 29 Dec 2016 11:57:43 -0800 (PST) Subject: [pypy-commit] pypy default: Delete obsolete and undocumented file Message-ID: <58656ab7.cf3fc20a.aa7e8.27e3@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89270:6e6b17b429d7 Date: 2016-12-29 20:57 +0100 http://bitbucket.org/pypy/pypy/changeset/6e6b17b429d7/ Log: Delete obsolete and undocumented file diff --git a/pypy/module/cpyext/stubgen.py b/pypy/module/cpyext/stubgen.py deleted file mode 100644 --- a/pypy/module/cpyext/stubgen.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -from os import path - -from pypy.module.cpyext import api - -from sphinx import addnodes - - -TEMPLATE = """ - at cpython_api([%(paramtypes)s], %(rettype)s) -def %(functionname)s(%(params)s): -%(docstring)s raise NotImplementedError - %(borrows)s -""" - -C_TYPE_TO_PYPY_TYPE = { - "void": "lltype.Void", - "int": "rffi.INT_real", - "PyTypeObject*": "PyTypeObjectPtr", - "PyVarObject*": "PyObject", - "const char*": "rffi.CCHARP", - "double": "rffi.DOUBLE", - "PyObject*": "PyObject", - "PyObject**": "PyObjectP", - "char*": "rffi.CCHARP", - "PyMethodDef*": "PyMethodDef", - "Py_ssize_t": "Py_ssize_t", - "Py_ssize_t*": "Py_ssize_t", - "size_t": "rffi.SIZE_T", - "...": "...", - "char": "lltype.Char", - "long": "lltype.Signed", - "Py_buffer*": "Py_buffer", - "": "", - } - -C_TYPE_TO_PYPY_TYPE_ARGS = C_TYPE_TO_PYPY_TYPE.copy() -C_TYPE_TO_PYPY_TYPE_ARGS.update({ - "void": "rffi.VOIDP", - }) - - -def c_param_to_type_and_name(string, is_arg=True): - string = string.replace(" **", "** ").replace(" *", "* ") - try: - typ, name = string.rsplit(" ", 1) - except ValueError: - typ = string - name = "" - return [C_TYPE_TO_PYPY_TYPE, C_TYPE_TO_PYPY_TYPE_ARGS][is_arg]\ - .get(typ, "{" + typ + "}"), name - - -def process_doctree(app, doctree): - for node in doctree.traverse(addnodes.desc_content): - par = node.parent - if par['desctype'] != 'cfunction': - continue - if not par[0].has_key('names') or not par[0]['names']: - continue - functionname = par[0]['names'][0] - if (functionname in api.FUNCTIONS or - functionname in api.SYMBOLS_C): - print "Wow, you implemented already", functionname - continue - borrows = docstring = "" - crettype, _, cparameters = par[0] - crettype = crettype.astext() - cparameters = cparameters.astext() - rettype, _ = c_param_to_type_and_name(crettype, False) - params = ["space"] - paramtypes = [] - for param in cparameters.split(","): - typ, name = c_param_to_type_and_name(param.strip()) - params.append(name) - paramtypes.append(typ) - params = ", ".join(params) - paramtypes = ", ".join(paramtypes) - docstring = node.astext() - entry = app._refcounts.get(functionname) - if entry and entry.result_type in ("PyObject*", "PyVarObject*"): - if entry.result_refs is None: - docstring += "\nReturn value: always NULL." - else: - borrows = ("borrow_from()", "")[entry.result_refs] - docstring = "\n ".join(docstring.splitlines()) - if docstring: - docstring = ' """%s"""\n' % (docstring,) - code = TEMPLATE % locals() - app._stubgen_f.write(code) - - -def init_apidump(app): - fname = path.join(path.dirname(api.__file__), "stubs.py") - app._stubgen_f = file(fname, "w") - app.connect('doctree-read', process_doctree) - - -def setup(app): - app.connect('builder-inited', init_apidump) From pypy.commits at gmail.com Thu Dec 29 18:05:50 2016 From: pypy.commits at gmail.com (stefanor) Date: Thu, 29 Dec 2016 15:05:50 -0800 (PST) Subject: [pypy-commit] cffi default: We're interested in --with-pydebug not -d Message-ID: <586596ce.54b31c0a.2f251.e6d6@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2844:bd6789d993c6 Date: 2016-12-30 00:05 +0100 http://bitbucket.org/cffi/cffi/changeset/bd6789d993c6/ Log: We're interested in --with-pydebug not -d diff --git a/cffi/setuptools_ext.py b/cffi/setuptools_ext.py --- a/cffi/setuptools_ext.py +++ b/cffi/setuptools_ext.py @@ -79,9 +79,10 @@ CPython itself should ignore the flag in a debugging version (by not listing .abi3.so in the extensions it supports), but it doesn't so far, creating troubles. That's why we check - for "not sys.flags.debug". (http://bugs.python.org/issue28401) + for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent + of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) """ - if 'py_limited_api' not in kwds and not sys.flags.debug: + if 'py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount'): import setuptools try: setuptools_major_version = int(setuptools.__version__.partition('.')[0]) diff --git a/testing/cffi0/test_zintegration.py b/testing/cffi0/test_zintegration.py --- a/testing/cffi0/test_zintegration.py +++ b/testing/cffi0/test_zintegration.py @@ -156,20 +156,21 @@ except ImportError as e: py.test.skip(str(e)) orig_version = setuptools.__version__ + expecting_limited_api = not hasattr(sys, 'gettotalrefcount') try: setuptools.__version__ = '26.0.0' from setuptools import Extension kwds = _set_py_limited_api(Extension, {}) - assert kwds['py_limited_api'] == True + assert kwds.get('py_limited_api', False) == expecting_limited_api setuptools.__version__ = '25.0' kwds = _set_py_limited_api(Extension, {}) - assert not kwds + assert kwds.get('py_limited_api', False) == False setuptools.__version__ = 'development' kwds = _set_py_limited_api(Extension, {}) - assert kwds['py_limited_api'] == True + assert kwds.get('py_limited_api', False) == expecting_limited_api finally: setuptools.__version__ = orig_version From pypy.commits at gmail.com Fri Dec 30 03:58:50 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 30 Dec 2016 00:58:50 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: Update "Building from source" to remove the hack of bundles and mention Message-ID: <586621ca.c64bc20a.84589.10f7@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r840:647277f35266 Date: 2016-12-30 09:58 +0100 http://bitbucket.org/pypy/pypy.org/changeset/647277f35266/ Log: Update "Building from source" to remove the hack of bundles and mention instead that hg >= 3.7 will now be fast clone (great work, bitbucket!) diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -313,38 +313,20 @@ (see more build instructions_) -1. Get the source code. The following packages contain the source at +1. Get the source code. The preferred way is to checkout the current + trunk using Mercurial_. The trunk usually works and is of course + more up-to-date. The following command should run in about 7 minutes + nowadays if you have hg >= 3.7 (it is much slower with older versions):: + + hg clone https://bitbucket.org/pypy/pypy + + Alternatively, the following smaller package contains the source at the same revision as the above binaries: * `pypy2-v5.6.0-src.tar.bz2`__ (sources) .. __: https://bitbucket.org/pypy/pypy/downloads/pypy2-v5.6.0-src.tar.bz2 - Or you can checkout the current trunk using Mercurial_ (the trunk - usually works and is of course more up-to-date):: - - hg clone https://bitbucket.org/pypy/pypy - - The above command may take a long time to run and if it aborts, it - is not resumable. You may prefer this way:: - - hg clone -r null https://bitbucket.org/pypy/pypy - cd pypy - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-01.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-02.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-03.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-04.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-05.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-06.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-07.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-08.bz2 - hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-09.bz2 - hg pull - hg update - - If needed, you can also download the bz2 files by other means. - You can then replace the multiple ``unbundle`` commands above with - a single ``hg unbundle pypy-bundle-*.bz2``. 2. Make sure you **installed the dependencies.** See the list here__. From pypy.commits at gmail.com Fri Dec 30 03:59:04 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 30 Dec 2016 00:59:04 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: regenerate Message-ID: <586621d8.ce181c0a.bb41f.8090@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r841:0e7b448692ac Date: 2016-12-30 09:58 +0100 http://bitbucket.org/pypy/pypy.org/changeset/0e7b448692ac/ Log: regenerate diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -278,36 +278,18 @@

      Building from source

      (see more build instructions)

        -
      1. Get the source code. The following packages contain the source at +

      2. Get the source code. The preferred way is to checkout the current +trunk using Mercurial. The trunk usually works and is of course +more up-to-date. The following command should run in about 7 minutes +nowadays if you have hg >= 3.7 (it is much slower with older versions):

        +
        +hg clone https://bitbucket.org/pypy/pypy
        +
        +

        Alternatively, the following smaller package contains the source at the same revision as the above binaries:

        -

        Or you can checkout the current trunk using Mercurial (the trunk -usually works and is of course more up-to-date):

        -
        -hg clone https://bitbucket.org/pypy/pypy
        -
        -

        The above command may take a long time to run and if it aborts, it -is not resumable. You may prefer this way:

        -
        -hg clone -r null https://bitbucket.org/pypy/pypy
        -cd pypy
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-01.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-02.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-03.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-04.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-05.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-06.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-07.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-08.bz2
        -hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-09.bz2
        -hg pull
        -hg update
        -
        -

        If needed, you can also download the bz2 files by other means. -You can then replace the multiple unbundle commands above with -a single hg unbundle pypy-bundle-*.bz2.

      3. Make sure you installed the dependencies. See the list here.

      4. From pypy.commits at gmail.com Fri Dec 30 04:58:30 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:30 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: move tests around, some now fail Message-ID: <58662fc6.e626c20a.f471c.16e8@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89271:c0e51122aaf3 Date: 2016-12-28 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/c0e51122aaf3/ Log: move tests around, some now fail diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -664,30 +664,6 @@ assert module.tp_init(list, x, ("hi",)) is None assert x == ["h", "i"] - def test_tp_str(self): - module = self.import_extension('foo', [ - ("tp_str", "METH_VARARGS", - ''' - PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); - PyObject *obj = PyTuple_GET_ITEM(args, 1); - if (!type->tp_str) - { - PyErr_SetNone(PyExc_ValueError); - return NULL; - } - return type->tp_str(obj); - ''' - ) - ]) - class C: - def __str__(self): - return "text" - assert module.tp_str(type(C()), C()) == "text" - class D(int): - def __str__(self): - return "more text" - assert module.tp_str(int, D(42)) == "42" - def test_mp_ass_subscript(self): module = self.import_extension('foo', [ ("new_obj", "METH_NOARGS", diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -1,8 +1,9 @@ -from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi from pypy.module.cpyext.pyobject import make_ref, from_ref from pypy.module.cpyext.api import generic_cpy_call from pypy.module.cpyext.typeobject import PyTypeObjectPtr +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class TestAppLevelObject(BaseApiTest): @@ -22,20 +23,6 @@ py_date, py_date) assert space.str_w(w_obj) == 'sum!' - def test_tp_hash_from_python(self, space, api): - w_c = space.appexec([], """(): - class C: - def __hash__(self): - return -23 - return C() - """) - w_ctype = space.type(w_c) - py_c = make_ref(space, w_c) - py_ctype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_ctype)) - assert py_ctype.c_tp_hash - val = generic_cpy_call(space, py_ctype.c_tp_hash, py_c) - assert val == -23 - def test_tp_new_from_python(self, space, api): w_date = space.appexec([], """(): class Date(object): @@ -60,4 +47,49 @@ w_year = space.getattr(w_obj, space.newbytes('year')) assert space.int_w(w_year) == 1 +class AppTestUserSlots(AppTestCpythonExtensionBase): + def test_tp_hash_from_python(self): + # to see that the functions are being used, + # run pytest with -s + module = self.import_extension('foo', [ + ("use_hash", "METH_O", + ''' + long hash = args->ob_type->tp_hash(args); + return PyLong_FromLong(hash); + ''')]) + class C(object): + def __hash__(self): + return -23 + c = C() + # uses the userslot slot_tp_hash + ret = module.use_hash(C()) + assert hash(c) == ret + # uses the slotdef renamed cpyext_tp_hash_int + ret = module.use_hash(3) + assert hash(3) == ret + def test_tp_str(self): + module = self.import_extension('foo', [ + ("tp_str", "METH_VARARGS", + ''' + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + if (!type->tp_str) + { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + return type->tp_str(obj); + ''' + ) + ]) + class C: + def __str__(self): + return "text" + assert module.tp_str(type(C()), C()) == "text" + class D(int): + def __str__(self): + return "more text" + assert module.tp_str(int, D(42)) == "42" + + From pypy.commits at gmail.com Fri Dec 30 04:58:32 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:32 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: builtin->tp_str() resolves differently than applevel->tp_str. Add tp_hash Message-ID: <58662fc8.ce941c0a.d731b.9ee7@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89272:0b1631e69bc6 Date: 2016-12-28 20:58 +0200 http://bitbucket.org/pypy/pypy/changeset/0b1631e69bc6/ Log: builtin->tp_str() resolves differently than applevel->tp_str. Add tp_hash diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -556,6 +556,19 @@ return space.call_function(slot_fn, w_self) handled = True + for tp_name, attr in [('tp_hash', '__hash__'), + ]: + if name == tp_name: + slot_fn = w_type.getdictvalue(space, attr) + if slot_fn is None: + return + @cpython_api([PyObject], lltype.Signed, header=header, error=-1) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_func(space, w_obj): + return space.int_w(space.call_function(slot_fn, w_self)) + handled = True + + # binary functions for tp_name, attr in [('tp_as_number.c_nb_add', '__add__'), ('tp_as_number.c_nb_subtract', '__sub__'), diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -276,12 +276,11 @@ for method_name, slot_name, slot_names, slot_apifunc in slotdefs_for_tp_slots: if search_dict_w is not None: # heap type: only look in this exact class - #if method_name in search_dict_w and method_name == '__new__': - # import pdb;pdb.set_trace() w_descr = search_dict_w.get(method_name, None) else: # built-in types: expose as many slots as possible, even # if it happens to come from some parent class + slot_apifunc = None # use get_slot_tp_function lookup mechanism w_descr = w_type.lookup(method_name) if w_descr is None: From pypy.commits at gmail.com Fri Dec 30 04:58:34 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:34 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: show how not all PyTypeObject slots are filled Message-ID: <58662fca.8b9a1c0a.e728f.c54d@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89273:69d3b2eddefb Date: 2016-12-28 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/69d3b2eddefb/ Log: show how not all PyTypeObject slots are filled diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -76,7 +76,7 @@ PyObject *obj = PyTuple_GET_ITEM(args, 1); if (!type->tp_str) { - PyErr_SetNone(PyExc_ValueError); + PyErr_SetString(PyExc_ValueError, "no tp_str"); return NULL; } return type->tp_str(obj); @@ -91,5 +91,8 @@ def __str__(self): return "more text" assert module.tp_str(int, D(42)) == "42" + class A(object): + pass + s = module.tp_str(type(A()), A()) + assert 'A object' in s - From pypy.commits at gmail.com Fri Dec 30 04:58:36 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:36 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: prefer runtime lookup for app-level classes, import-time resolution for builtins Message-ID: <58662fcc.448e1c0a.328c8.936c@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89274:ba89162ba498 Date: 2016-12-28 23:27 +0200 http://bitbucket.org/pypy/pypy/changeset/ba89162ba498/ Log: prefer runtime lookup for app-level classes, import-time resolution for builtins diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -565,7 +565,7 @@ @cpython_api([PyObject], lltype.Signed, header=header, error=-1) @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) def slot_func(space, w_obj): - return space.int_w(space.call_function(slot_fn, w_self)) + return space.int_w(space.call_function(slot_fn, w_obj)) handled = True diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -37,7 +37,7 @@ from pypy.objspace.std.typeobject import W_TypeObject, find_best_base -WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False +#WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False PyType_Check, PyType_CheckExact = build_type_checkers("Type", "w_type") @@ -274,27 +274,21 @@ search_dict_w = None for method_name, slot_name, slot_names, slot_apifunc in slotdefs_for_tp_slots: - if search_dict_w is not None: - # heap type: only look in this exact class - w_descr = search_dict_w.get(method_name, None) - else: + if search_dict_w is None: # built-in types: expose as many slots as possible, even # if it happens to come from some parent class - slot_apifunc = None # use get_slot_tp_function lookup mechanism - w_descr = w_type.lookup(method_name) - - if w_descr is None: - # XXX special case iternext - continue + slot_apifunc = None # use get_slot_tp_function + else: + # use the slot_apifunc (userslots) to lookup at runtime + pass if typedef is not None: if slot_apifunc is None: slot_apifunc = get_slot_tp_function(space, typedef, slot_name) if not slot_apifunc: - if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: - os.write(2, - "%s defined by %s but no slot function defined!\n" % ( - method_name, w_type.getname(space))) + if not we_are_translated(): + print "missing slot %r/%r for %r" % ( + method_name, slot_name, w_type.getname(space)) continue slot_func_helper = slot_apifunc.get_llhelper(space) @@ -771,9 +765,9 @@ if pto.c_tp_itemsize < pto.c_tp_base.c_tp_itemsize: pto.c_tp_itemsize = pto.c_tp_base.c_tp_itemsize - # will be filled later on with the correct value - # may not be 0 if space.is_w(w_type, space.w_object): + # will be filled later on with the correct value + # may not be 0 pto.c_tp_new = rffi.cast(newfunc, 1) update_all_slots(space, w_type, pto) if not pto.c_tp_new: From pypy.commits at gmail.com Fri Dec 30 04:58:37 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:37 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: a passing test of subtype_dealloc Message-ID: <58662fcd.0b561c0a.9ea67.9873@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89275:c9438dc074aa Date: 2016-12-30 00:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c9438dc074aa/ Log: a passing test of subtype_dealloc diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -96,3 +96,68 @@ s = module.tp_str(type(A()), A()) assert 'A object' in s + def test_tp_deallocate(self): + module = self.import_extension('foo', [ + ("get_cnt", "METH_NOARGS", + ''' + return PyLong_FromLong(foocnt); + '''), + ("get_timestamp", "METH_NOARGS", + ''' + PyObject * one = PyLong_FromLong(1); + PyObject * a = PyTuple_Pack(3, one, one, one); + PyObject * k = NULL; + obj = _Timestamp.tp_new(&_Timestamp, a, k); + Py_DECREF(one); + return obj; + '''), + ], prologue=''' + static int foocnt = 0; + static PyTypeObject* datetime_cls = NULL; + static PyObject * obj = NULL; + static PyObject* + timestamp_new(PyTypeObject* t, PyObject* a, PyObject* k) + { + foocnt ++; + return datetime_cls->tp_new(t, a, k); + } + static void + timestamp_dealloc(PyObject *op) + { + foocnt --; + datetime_cls->tp_dealloc(op); + } + + + static PyTypeObject _Timestamp = { + PyObject_HEAD_INIT(NULL) + 0, /* ob_size */ + "foo._Timestamp", /* tp_name*/ + 0, /* tp_basicsize*/ + 0, /* tp_itemsize */ + timestamp_dealloc /* tp_dealloc */ + }; + ''', more_init=''' + PyObject * mod = PyImport_ImportModule("datetime"); + if (mod == NULL) INITERROR; + PyObject * dt = PyString_FromString("datetime"); + datetime_cls = (PyTypeObject*)PyObject_GetAttr(mod, dt); + if (datetime_cls == NULL) INITERROR; + _Timestamp.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + _Timestamp.tp_base = datetime_cls; + _Timestamp.tp_new = timestamp_new; + Py_DECREF(mod); + Py_DECREF(dt); + if (PyType_Ready(&_Timestamp) < 0) INITERROR; + ''') + import gc, sys + cnt = module.get_cnt() + assert cnt == 0 + obj = module.get_timestamp() + cnt = module.get_cnt() + assert cnt == 1 + assert obj.year == 1 + del obj + self.debug_collect() + cnt = module.get_cnt() + assert cnt == 0 From pypy.commits at gmail.com Fri Dec 30 04:58:41 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:41 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: print only first time missing function is noticed Message-ID: <58662fd1.d32f1c0a.9e8ad.7761@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89277:453e1a275dae Date: 2016-12-30 11:43 +0200 http://bitbucket.org/pypy/pypy/changeset/453e1a275dae/ Log: print only first time missing function is noticed diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -259,6 +259,7 @@ dict_w[name] = w_descr i += 1 +missing_slots={} def update_all_slots(space, w_type, pto): # fill slots in pto # Not very sure about it, but according to @@ -287,8 +288,10 @@ slot_apifunc = get_slot_tp_function(space, typedef, slot_name) if not slot_apifunc: if not we_are_translated(): - print "missing slot %r/%r for %r" % ( - method_name, slot_name, w_type.getname(space)) + if slot_name not in missing_slots: + missing_slots[slot_name] = w_type.getname(space) + print "missing slot %r/%r, discovered on %r" % ( + method_name, slot_name, w_type.getname(space)) continue slot_func_helper = slot_apifunc.get_llhelper(space) From pypy.commits at gmail.com Fri Dec 30 04:58:43 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:43 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: reproduce the recursive calls to tp_dealloc in pandas Message-ID: <58662fd3.849c1c0a.7f283.8eb6@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89278:ca60170ed0cb Date: 2016-12-30 11:57 +0200 http://bitbucket.org/pypy/pypy/changeset/ca60170ed0cb/ Log: reproduce the recursive calls to tp_dealloc in pandas diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -102,6 +102,15 @@ ''' return PyLong_FromLong(foocnt); '''), + ("get__timestamp", "METH_NOARGS", + ''' + PyObject * one = PyLong_FromLong(1); + PyObject * a = PyTuple_Pack(3, one, one, one); + PyObject * k = NULL; + obj = _Timestamp.tp_new(&_Timestamp, a, k); + Py_DECREF(one); + return obj; + '''), ("get_timestamp", "METH_NOARGS", ''' PyObject * one = PyLong_FromLong(1); @@ -121,18 +130,13 @@ foocnt ++; return datetime_cls->tp_new(t, a, k); } + static PyObject* timestamp_new(PyTypeObject* t, PyObject* a, PyObject* k) { - foocnt ++; return datetime_cls->tp_new(t, a, k); } - static void - timestamp_dealloc(PyObject *op) - { - foocnt --; - datetime_cls->tp_dealloc(op); - } + static void _timestamp_dealloc(PyObject *op) { @@ -147,15 +151,14 @@ "foo._Timestamp", /* tp_name*/ 0, /* tp_basicsize*/ 0, /* tp_itemsize */ - _timestamp_dealloc /* tp_dealloc */ + _timestamp_dealloc /* tp_dealloc */ }; static PyTypeObject Timestamp = { PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "foo.Timestamp", /* tp_name*/ 0, /* tp_basicsize*/ - 0, /* tp_itemsize */ - timestamp_dealloc /* tp_dealloc */ + 0 /* tp_itemsize */ }; ''', more_init=''' PyObject * mod = PyImport_ImportModule("datetime"); @@ -169,15 +172,21 @@ Py_DECREF(mod); Py_DECREF(dt); if (PyType_Ready(&_Timestamp) < 0) INITERROR; + Timestamp.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; Timestamp.tp_base = &_Timestamp; Timestamp.tp_new = timestamp_new; + Timestamp.tp_dealloc = datetime_cls->tp_dealloc; if (PyType_Ready(&Timestamp) < 0) INITERROR; ''') + # _Timestamp has __new__, __del__ and + # inherits from datetime.datetime + # Timestamp has __new__, default __del__ (subtype_dealloc) and + # inherits from _Timestamp import gc, sys cnt = module.get_cnt() assert cnt == 0 - obj = module.get_timestamp() + obj = module.get__timestamp() #_Timestamp cnt = module.get_cnt() assert cnt == 1 assert obj.year == 1 @@ -185,3 +194,15 @@ self.debug_collect() cnt = module.get_cnt() assert cnt == 0 + + obj = module.get_timestamp() #Timestamp + cnt = module.get_cnt() + assert cnt == 0 + assert obj.year == 1 + # XXX calling Timestamp.tp_dealloc which is subtype_dealloc + # causes infinite recursion + del obj + self.debug_collect() + cnt = module.get_cnt() + assert cnt == 0 + From pypy.commits at gmail.com Fri Dec 30 04:58:39 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 30 Dec 2016 01:58:39 -0800 (PST) Subject: [pypy-commit] pypy missing-tp_new: add another c-level type to the inheritance scheme, test still passes Message-ID: <58662fcf.c4811c0a.16ab2.9553@mx.google.com> Author: Matti Picus Branch: missing-tp_new Changeset: r89276:4fd301bef4de Date: 2016-12-30 08:01 +0200 http://bitbucket.org/pypy/pypy/changeset/4fd301bef4de/ Log: add another c-level type to the inheritance scheme, test still passes diff --git a/pypy/module/cpyext/test/test_userslots.py b/pypy/module/cpyext/test/test_userslots.py --- a/pypy/module/cpyext/test/test_userslots.py +++ b/pypy/module/cpyext/test/test_userslots.py @@ -107,7 +107,7 @@ PyObject * one = PyLong_FromLong(1); PyObject * a = PyTuple_Pack(3, one, one, one); PyObject * k = NULL; - obj = _Timestamp.tp_new(&_Timestamp, a, k); + obj = Timestamp.tp_new(&Timestamp, a, k); Py_DECREF(one); return obj; '''), @@ -116,6 +116,12 @@ static PyTypeObject* datetime_cls = NULL; static PyObject * obj = NULL; static PyObject* + _timestamp_new(PyTypeObject* t, PyObject* a, PyObject* k) + { + foocnt ++; + return datetime_cls->tp_new(t, a, k); + } + static PyObject* timestamp_new(PyTypeObject* t, PyObject* a, PyObject* k) { foocnt ++; @@ -127,6 +133,12 @@ foocnt --; datetime_cls->tp_dealloc(op); } + static void + _timestamp_dealloc(PyObject *op) + { + foocnt --; + datetime_cls->tp_dealloc(op); + } static PyTypeObject _Timestamp = { @@ -135,6 +147,14 @@ "foo._Timestamp", /* tp_name*/ 0, /* tp_basicsize*/ 0, /* tp_itemsize */ + _timestamp_dealloc /* tp_dealloc */ + }; + static PyTypeObject Timestamp = { + PyObject_HEAD_INIT(NULL) + 0, /* ob_size */ + "foo.Timestamp", /* tp_name*/ + 0, /* tp_basicsize*/ + 0, /* tp_itemsize */ timestamp_dealloc /* tp_dealloc */ }; ''', more_init=''' @@ -145,10 +165,14 @@ if (datetime_cls == NULL) INITERROR; _Timestamp.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; _Timestamp.tp_base = datetime_cls; - _Timestamp.tp_new = timestamp_new; + _Timestamp.tp_new = _timestamp_new; Py_DECREF(mod); Py_DECREF(dt); if (PyType_Ready(&_Timestamp) < 0) INITERROR; + Timestamp.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Timestamp.tp_base = &_Timestamp; + Timestamp.tp_new = timestamp_new; + if (PyType_Ready(&Timestamp) < 0) INITERROR; ''') import gc, sys cnt = module.get_cnt() From pypy.commits at gmail.com Fri Dec 30 11:15:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 30 Dec 2016 08:15:00 -0800 (PST) Subject: [pypy-commit] pypy strbuf-as-buffer: catch case if from_buffer is str/unicode Message-ID: <58668804.a351c20a.48d77.a121@mx.google.com> Author: Richard Plangger Branch: strbuf-as-buffer Changeset: r89279:d044ec26c066 Date: 2016-12-30 17:14 +0100 http://bitbucket.org/pypy/pypy/changeset/d044ec26c066/ Log: catch case if from_buffer is str/unicode diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -84,18 +84,15 @@ return self.from_address(dll._handle.getaddressindll(name)) def from_buffer(self, obj, offset=0): - from array import array size = self._sizeofinstances() - if isinstance(obj, array): - # hack, buffer(array.array) will always return a readonly buffer. + if isinstance(obj, (str, unicode)): + # hack, buffer(str) will always return a readonly buffer. # CPython calls PyObject_AsWriteBuffer(...) here! - # array.array does not implement the buffer interface so we cannot - # use memoryview here (neither on CPython)! - buf = buffer(obj, offset, size) - else: - buf = memoryview(obj)[offset:] - if buf.readonly: - raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) + # str cannot be modified, thus rase a type error in this case + raise TypeError("Cannot use %s as modifiable buffer" % str(type(obj))) + + buf = buffer(obj, offset, size) + if len(buf) < size: raise ValueError( "Buffer size too small (%d instead of at least %d bytes)" From pypy.commits at gmail.com Fri Dec 30 11:16:41 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 30 Dec 2016 08:16:41 -0800 (PST) Subject: [pypy-commit] pypy py3.5-time: make struct_time carry the fields tm_gmtoff/tm_zone, currently those attributes are not available even though the system provides them Message-ID: <58668869.86cbc20a.960e5.e9e2@mx.google.com> Author: Richard Plangger Branch: py3.5-time Changeset: r89280:dc076a581a0e Date: 2016-12-30 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/dc076a581a0e/ Log: make struct_time carry the fields tm_gmtoff/tm_zone, currently those attributes are not available even though the system provides them diff --git a/pypy/module/time/app_time.py b/pypy/module/time/app_time.py --- a/pypy/module/time/app_time.py +++ b/pypy/module/time/app_time.py @@ -3,6 +3,7 @@ from _structseq import structseqtype, structseqfield from types import SimpleNamespace import time + class struct_time(metaclass=structseqtype): __module__ = 'time' name = 'time.struct_time' @@ -16,6 +17,8 @@ tm_wday = structseqfield(6) tm_yday = structseqfield(7) tm_isdst = structseqfield(8) + tm_gmtoff = structseqfield(9) + tm_zone = structseqfield(10) def strptime(string, format="%a %b %d %H:%M:%S %Y"): """strptime(string, format) -> struct_time diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -164,6 +164,8 @@ CLOCKS_PER_SEC = platform.ConstantInteger("CLOCKS_PER_SEC") has_gettimeofday = platform.Has('gettimeofday') +HAS_TM_ZONE = False + if _POSIX: calling_conv = 'c' CConfig.timeval = platform.Struct("struct timeval", @@ -180,6 +182,9 @@ ("tm_mon", rffi.INT), ("tm_year", rffi.INT), ("tm_wday", rffi.INT), ("tm_yday", rffi.INT), ("tm_isdst", rffi.INT), ("tm_gmtoff", rffi.LONG), ("tm_zone", rffi.CCHARP)]) + + HAS_TM_ZONE = True + elif _WIN: calling_conv = 'win' CConfig.tm = platform.Struct("struct tm", [("tm_sec", rffi.INT), @@ -539,7 +544,11 @@ w_struct_time = _get_module_object(space, 'struct_time') w_time_tuple = space.newtuple(time_tuple) - return space.call_function(w_struct_time, w_time_tuple) + w_obj = space.call_function(w_struct_time, w_time_tuple) + if HAS_TM_ZONE: + space.setattr(w_obj, space.wrap("tm_gmoff"), space.wrap(rffi.getintfield(t, 'c_tm_gmtoff'))) + space.setattr(w_obj, space.wrap("tm_zone"), space.wrap(rffi.getintfield(t, 'c_tm_zone'))) + return w_obj def _gettmarg(space, w_tup, allowNone=True): if space.is_none(w_tup): diff --git a/pypy/module/time/test/test_time.py b/pypy/module/time/test/test_time.py --- a/pypy/module/time/test/test_time.py +++ b/pypy/module/time/test/test_time.py @@ -254,6 +254,22 @@ del os.environ['TZ'] time.tzset() + def test_localtime_timezone(self): + import os, time + org_TZ = os.environ.get('TZ', None) + try: + os.environ['TZ'] = 'Europe/Kiev' + time.tzset() + localtm = time.localtime(0) + assert localtm.tm_zone == "MSK" + assert localtm.tm_gmtoff == 10800 + finally: + if org_TZ is not None: + os.environ['TZ'] = org_TZ + elif 'TZ' in os.environ: + del os.environ['TZ'] + time.tzset() + def test_strftime(self): import time import os, sys From pypy.commits at gmail.com Fri Dec 30 13:11:37 2016 From: pypy.commits at gmail.com (stefanor) Date: Fri, 30 Dec 2016 10:11:37 -0800 (PST) Subject: [pypy-commit] cffi default: Explicitly flush the import machinery cache, in case the filesystem doesn't have enough mtime resolution to notice change Message-ID: <5866a359.2350c20a.e5711.c3c7@mx.google.com> Author: Stefano Rivera Branch: Changeset: r2845:ff406edc2609 Date: 2016-12-30 19:11 +0100 http://bitbucket.org/cffi/cffi/changeset/ff406edc2609/ Log: Explicitly flush the import machinery cache, in case the filesystem doesn't have enough mtime resolution to notice change diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1,5 +1,6 @@ import sys, os, py +import importlib from cffi import FFI, VerificationError, FFIError from cffi import recompiler from testing.udir import udir @@ -521,6 +522,8 @@ assert os.path.exists(str(package_dir.join('mymod.c'))) package_dir.join('__init__.py').write('') # + getattr(importlib, 'invalidate_caches', object)() + # sys.path.insert(0, str(udir)) import test_module_name_in_package.mymod assert test_module_name_in_package.mymod.lib.foo(10) == 42 From pypy.commits at gmail.com Fri Dec 30 13:27:36 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 30 Dec 2016 10:27:36 -0800 (PST) Subject: [pypy-commit] cffi default: Python 2.6 compat Message-ID: <5866a718.8c1f1c0a.f8a7f.6486@mx.google.com> Author: Armin Rigo Branch: Changeset: r2846:3cb58f9ed94d Date: 2016-12-30 19:27 +0100 http://bitbucket.org/cffi/cffi/changeset/3cb58f9ed94d/ Log: Python 2.6 compat diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1,12 +1,16 @@ import sys, os, py -import importlib from cffi import FFI, VerificationError, FFIError from cffi import recompiler from testing.udir import udir from testing.support import u, long from testing.support import FdWriteCapture, StdErrCapture +try: + import importlib +except ImportError: + importlib = None + def check_type_table(input, expected_output, included=None): ffi = FFI() From pypy.commits at gmail.com Fri Dec 30 16:45:47 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 30 Dec 2016 13:45:47 -0800 (PST) Subject: [pypy-commit] pypy default: Clean up; merge generate_macros() into generate_decls_and_callbacks() Message-ID: <5866d58b.4673c20a.fafa7.100a@mx.google.com> Author: Ronan Lamy Branch: Changeset: r89281:d02b1ad322c6 Date: 2016-12-30 10:36 +0100 http://bitbucket.org/pypy/pypy/changeset/d02b1ad322c6/ Log: Clean up; merge generate_macros() into generate_decls_and_callbacks() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1028,16 +1028,12 @@ def build_bridge(space): "NOT_RPYTHON" from pypy.module.cpyext.pyobject import make_ref + from rpython.translator.c.database import LowLevelDatabase + use_micronumpy = setup_micronumpy(space) + db = LowLevelDatabase() + prefix ='cpyexttest' - use_micronumpy = setup_micronumpy(space) - - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) - from rpython.translator.c.database import LowLevelDatabase - db = LowLevelDatabase() - - generate_macros(export_symbols, prefix='cpyexttest') - - functions = generate_decls_and_callbacks(db, prefix='cpyexttest') + functions = generate_decls_and_callbacks(db, prefix=prefix) # Structure declaration code members = [] @@ -1124,7 +1120,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'cpyexttest') + name = name.replace('Py', prefix) if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -1152,12 +1148,6 @@ pypyAPI = ctypes.POINTER(ctypes.c_void_p).in_dll(bridge, 'pypyAPI') # implement structure initialization code - #for name, func in FUNCTIONS.iteritems(): - # if name.startswith('cpyext_'): # XXX hack - # continue - # pypyAPI[structindex[name]] = ctypes.cast( - # ll2ctypes.lltype2ctypes(func.get_llhelper(space)), - # ctypes.c_void_p) for header, header_functions in FUNCTIONS_BY_HEADER.iteritems(): for name, func in header_functions.iteritems(): if name.startswith('cpyext_') or func is None: # XXX hack @@ -1225,9 +1215,10 @@ else: return None -def generate_macros(export_symbols, prefix): +def generate_decls_and_callbacks(db, api_struct=True, prefix=''): "NOT_RPYTHON" pypy_macros = [] + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) for name in export_symbols: if '#' in name: name, header = name.split('#') @@ -1258,8 +1249,6 @@ pypy_macros_h = udir.join('pypy_macros.h') pypy_macros_h.write('\n'.join(pypy_macros)) -def generate_decls_and_callbacks(db, api_struct=True, prefix=''): - "NOT_RPYTHON" # implement function callbacks and generate function decls functions = [] decls = {} @@ -1425,14 +1414,11 @@ def setup_library(space): "NOT_RPYTHON" + from rpython.translator.c.database import LowLevelDatabase use_micronumpy = setup_micronumpy(space) - export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) - from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() prefix = 'PyPy' - generate_macros(export_symbols, prefix=prefix) - functions = generate_decls_and_callbacks(db, api_struct=False, prefix=prefix) code = "#include \n" if use_micronumpy: