From pypy.commits at gmail.com Sun Dec 1 01:27:47 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 30 Nov 2019 22:27:47 -0800 (PST) Subject: [pypy-commit] buildbot default: add machine_cfg.py Message-ID: <5de35d63.1c69fb81.73e6e.c4ac@mx.google.com> Author: Matti Picus Branch: Changeset: r1113:0a46bac7a05a Date: 2019-12-01 07:27 +0100 http://bitbucket.org/pypy/buildbot/changeset/0a46bac7a05a/ Log: add machine_cfg.py diff --git a/docker/Dockerfile b/docker/Dockerfile --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -75,6 +75,7 @@ # Define a user ARG BUILDSLAVE_UID=1001 RUN adduser buildslave --uid=$BUILDSLAVE_UID +RUN echo parallel=4 > /home/buildslave/machine_cfg.py CMD if [ -e /build_dir/buildbot.tac ]; then \ su buildslave -c "buildslave start --nodaemon /build_dir"; \ diff --git a/docker/Dockerfile32 b/docker/Dockerfile32 --- a/docker/Dockerfile32 +++ b/docker/Dockerfile32 @@ -81,6 +81,7 @@ # Define a user ARG BUILDSLAVE_UID=1001 RUN adduser buildslave --uid=$BUILDSLAVE_UID +RUN echo parallel=4 > /home/buildslave/machine_cfg.py CMD if [ -e /build_dir/buildbot.tac ]; then \ su buildslave -c "buildslave start --nodaemon /build_dir"; \ From pypy.commits at gmail.com Sun Dec 1 11:43:05 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 08:43:05 -0800 (PST) Subject: [pypy-commit] pypy default: test, fix for importing with unicode in sys.path (issue 3112) Message-ID: <5de3ed99.1c69fb81.2d220.b76f@mx.google.com> Author: Matti Picus Branch: Changeset: r98203:4454c23fcc36 Date: 2019-12-01 18:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4454c23fcc36/ Log: test, fix for importing with unicode in sys.path (issue 3112) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -448,14 +448,19 @@ return w_loader def _getimporter(space, w_pathitem): - # the function 'imp._getimporter' is a pypy-only extension + # 'imp._getimporter' is somewhat like CPython's get_path_importer w_path_importer_cache = space.sys.get("path_importer_cache") w_importer = space.finditem(w_path_importer_cache, w_pathitem) if w_importer is None: space.setitem(w_path_importer_cache, w_pathitem, space.w_None) for w_hook in space.unpackiterable(space.sys.get("path_hooks")): + w_pathbytes = w_pathitem + if space.isinstance_w(w_pathitem, space.w_unicode): + from pypy.module.sys.interp_encoding import getfilesystemencoding + w_pathbytes = space.call_method(space.w_unicode, 'encode', + w_pathitem, getfilesystemencoding(space)) try: - w_importer = space.call_function(w_hook, w_pathitem) + w_importer = space.call_function(w_hook, w_pathbytes) except OperationError as e: if not e.match(space, space.w_ImportError): raise diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -4,7 +4,8 @@ class AppTestImpModule: spaceconfig = { - 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct'], + 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct', + 'zipimport'], } def setup_class(cls): @@ -246,3 +247,14 @@ assert marshal.loads == 42 marshal.loads = old + + def test_unicode_in_sys_path(self): + # issue 3112: when _getimporter calls + # for x in sys.path: for h in sys.path_hooks: h(x) + # make sure x is properly encoded + import sys + import zipimport # installs a sys.path_hook + if sys.getfilesystemencoding().lower() == 'utf-8': + sys.path.insert(0, u'\xef') + with raises(ImportError): + import impossible_module From pypy.commits at gmail.com Sun Dec 1 14:58:31 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 11:58:31 -0800 (PST) Subject: [pypy-commit] pypy py3.6: add rposix getgrouplist (part of issue 2375) Message-ID: <5de41b67.1c69fb81.bb0b4.b2d3@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98204:7a5929c7cc5e Date: 2019-12-01 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/7a5929c7cc5e/ Log: add rposix getgrouplist (part of issue 2375) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1931,8 +1931,7 @@ rffi.INT, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_sched_get_priority_min = external('sched_get_priority_min', [rffi.INT], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) - if not _WIN32: - c_sched_yield = external('sched_yield', [], rffi.INT) + c_sched_yield = external('sched_yield', [], rffi.INT) @enforceargs(int) def sched_get_priority_max(policy): @@ -1945,6 +1944,36 @@ def sched_yield(): return handle_posix_error('sched_yield', c_sched_yield()) + c_getgroupslist = external('getgrouplist', [rffi.CCHARP, GID_T, + GID_GROUPS_T, rffi.INTP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def getgrouplist(user, group): + groups_p = lltype.malloc(GID_GROUPS_T.TO, 64, flavor='raw') + ngroups_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + ngroups_p[0] = rffi.cast(rffi.INT, 64) + try: + n = handle_posix_error('getgrouplist', c_getgroupslist(user, group, + groups_p, ngroups_p)) + if n == -1: + if widen(ngroups_p[0]) > 64: + # reallocate. Should never happen + lltype.free(groups_p, flavor='raw') + groups_p = lltype.nullptr(GID_GROUPS_T.TO) + groups_p = lltype.malloc(GID_GROUPS_T.TO, widen(ngroups_p[0]), + flavor='raw') + + n = handle_posix_error('getgrouplist', c_getgroupslist(user, + group, groups_p, ngroups_p)) + ngroups = widen(ngroups_p[0]) + groups = [0] * ngroups + for i in range(ngroups): + groups[i] = groups_p[i] + return groups + finally: + lltype.free(ngroups_p, flavor='raw') + if groups_p: + lltype.free(groups_p, flavor='raw') #___________________________________________________________________ c_chroot = external('chroot', [rffi.CCHARP], rffi.INT, From pypy.commits at gmail.com Sun Dec 1 14:58:33 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 11:58:33 -0800 (PST) Subject: [pypy-commit] pypy py3.6: test, add posix.getgrouplist (part of issue 2375) Message-ID: <5de41b69.1c69fb81.4ca29.01c9@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98205:95e1a6902283 Date: 2019-12-01 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/95e1a6902283/ Log: test, add posix.getgrouplist (part of issue 2375) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1813,6 +1813,23 @@ except OSError as e: raise wrap_oserror(space, e, eintr_retry=False) + at unwrap_spec(username='text', gid=c_gid_t) +def getgrouplist(space, username, gid): + """ + getgrouplist(user, group) -> list of groups to which a user belongs + + Returns a list of groups to which a user belongs. + + user: username to lookup + group: base group id of the user + """ + try: + groups = rposix.getgrouplist(username, gid) + return space.newlist([space.newint(g) for g in groups]) + except OSError as e: + raise wrap_oserror(space, e) + + def getpgrp(space): """ getpgrp() -> pgrp diff --git a/pypy/module/posix/moduledef.py b/pypy/module/posix/moduledef.py --- a/pypy/module/posix/moduledef.py +++ b/pypy/module/posix/moduledef.py @@ -207,6 +207,7 @@ interpleveldefs['sync'] = 'interp_posix.sync' interpleveldefs['get_blocking'] = 'interp_posix.get_blocking' interpleveldefs['set_blocking'] = 'interp_posix.set_blocking' + interpleveldefs['getgrouplist'] = 'interp_posix.getgrouplist' if hasattr(rposix, 'getpriority'): interpleveldefs['getpriority'] = 'interp_posix.getpriority' diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1369,7 +1369,6 @@ raises(OSError, posix.get_blocking, 1234567) raises(OSError, posix.set_blocking, 1234567, True) - if sys.platform != 'win32': def test_sendfile(self): import _socket, posix s1, s2 = _socket.socketpair() @@ -1393,6 +1392,13 @@ fd = posix.open(memoryview(pdir), posix.O_RDONLY) posix.close(fd) + def test_getgrouplist(self): + import posix, getpass + gid = posix.getgid() + user = getpass.getuser() + groups = posix.getgrouplist(user, gid) + assert gid in groups + if sys.platform.startswith('linux'): def test_sendfile_no_offset(self): import _socket, posix From pypy.commits at gmail.com Sun Dec 1 14:58:35 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 11:58:35 -0800 (PST) Subject: [pypy-commit] pypy default: add rposix getgrouplist (part of issue 2375) Message-ID: <5de41b6b.1c69fb81.f740e.eccd@mx.google.com> Author: Matti Picus Branch: Changeset: r98206:a8a24325ea83 Date: 2019-12-01 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/a8a24325ea83/ Log: add rposix getgrouplist (part of issue 2375) (grafted from 7a5929c7cc5ed9b4c81ea0158bd035193cc08abb) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1931,8 +1931,7 @@ rffi.INT, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_sched_get_priority_min = external('sched_get_priority_min', [rffi.INT], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) - if not _WIN32: - c_sched_yield = external('sched_yield', [], rffi.INT) + c_sched_yield = external('sched_yield', [], rffi.INT) @enforceargs(int) def sched_get_priority_max(policy): @@ -1945,6 +1944,36 @@ def sched_yield(): return handle_posix_error('sched_yield', c_sched_yield()) + c_getgroupslist = external('getgrouplist', [rffi.CCHARP, GID_T, + GID_GROUPS_T, rffi.INTP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def getgrouplist(user, group): + groups_p = lltype.malloc(GID_GROUPS_T.TO, 64, flavor='raw') + ngroups_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + ngroups_p[0] = rffi.cast(rffi.INT, 64) + try: + n = handle_posix_error('getgrouplist', c_getgroupslist(user, group, + groups_p, ngroups_p)) + if n == -1: + if widen(ngroups_p[0]) > 64: + # reallocate. Should never happen + lltype.free(groups_p, flavor='raw') + groups_p = lltype.nullptr(GID_GROUPS_T.TO) + groups_p = lltype.malloc(GID_GROUPS_T.TO, widen(ngroups_p[0]), + flavor='raw') + + n = handle_posix_error('getgrouplist', c_getgroupslist(user, + group, groups_p, ngroups_p)) + ngroups = widen(ngroups_p[0]) + groups = [0] * ngroups + for i in range(ngroups): + groups[i] = groups_p[i] + return groups + finally: + lltype.free(ngroups_p, flavor='raw') + if groups_p: + lltype.free(groups_p, flavor='raw') #___________________________________________________________________ c_chroot = external('chroot', [rffi.CCHARP], rffi.INT, From pypy.commits at gmail.com Sun Dec 1 14:58:37 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 11:58:37 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into branch Message-ID: <5de41b6d.1c69fb81.71f6b.bed5@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98207:5c84cb16971d Date: 2019-12-01 21:57 +0200 http://bitbucket.org/pypy/pypy/changeset/5c84cb16971d/ Log: merge default into branch diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -7,8 +7,8 @@ class AppTestImpModule: # cpyext is required for _imp.create_dynamic() spaceconfig = { - 'usemodules': [ - 'binascii', 'imp', 'itertools', 'time', 'struct', 'cpyext'], + 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct', + 'zipimport', cpyext], } def setup_class(cls): @@ -316,3 +316,14 @@ if not hasattr(sys, 'pypy_version_info'): skip('This test is PyPy-only') assert imp.get_tag() == 'pypy%d%d' % (sys.version_info[:2]) + + def test_unicode_in_sys_path(self): + # issue 3112: when _getimporter calls + # for x in sys.path: for h in sys.path_hooks: h(x) + # make sure x is properly encoded + import sys + import zipimport # installs a sys.path_hook + if sys.getfilesystemencoding().lower() == 'utf-8': + sys.path.insert(0, u'\xef') + with raises(ImportError): + import impossible_module From pypy.commits at gmail.com Sun Dec 1 21:55:55 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 01 Dec 2019 18:55:55 -0800 (PST) Subject: [pypy-commit] buildbot default: refactor TMPDIR property creation, add to compress step Message-ID: <5de47d3b.1c69fb81.5b1bc.118b@mx.google.com> Author: Matti Picus Branch: Changeset: r1114:a77ac0fc2f3d Date: 2019-12-02 04:55 +0200 http://bitbucket.org/pypy/buildbot/changeset/a77ac0fc2f3d/ Log: refactor TMPDIR property creation, add to compress step diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -391,12 +391,21 @@ def setup_steps(platform, factory, workdir=None, repourl='https://bitbucket.org/pypy/pypy/', force_branch=None): - # XXX: this assumes that 'hg' is in the path - import getpass - if getpass.getuser() == 'antocuni': - # for debugging - repourl = '/home/antocuni/pypy/default' - # + factory.addStep(shell.SetPropertyFromCommand( + command=['python', '-c', "import tempfile, os ;print" + " tempfile.gettempdir() + os.path.sep"], + property="target_tmpdir")) + # If target_tmpdir is empty, crash. + factory.tmp_or_crazy = '%(prop:target_tmpdir:-crazy/name/so/mkdir/fails/)s' + factory.pytest = "pytest" + factory.addStep(ShellCmd( + description="mkdir for tests", + command=['python', '-c', Interpolate("import os; os.mkdir(r'" + \ + factory.tmp_or_crazy + factory.pytest + "') if not os.path.exists(r'" + \ + factory.tmp_or_crazy + factory.pytest + "') else True")], + haltOnFailure=True, + )) + factory.addStep(ParseRevision(hideStepIf=ParseRevision.hideStepIf, doStepIf=ParseRevision.doStepIf)) # @@ -447,27 +456,12 @@ return ".tar.bz2" def add_translated_tests(factory, prefix, platform, app_tests, lib_python, pypyjit): - factory.addStep(shell.SetPropertyFromCommand( - command=['python', '-c', "import tempfile, os ;print" - " tempfile.gettempdir() + os.path.sep"], - property="target_tmpdir")) - # If target_tmpdir is empty, crash. - tmp_or_crazy = '%(prop:target_tmpdir:-crazy/name/so/mkdir/fails/)s' - pytest = "pytest" - factory.addStep(ShellCmd( - description="mkdir for tests", - command=['python', '-c', Interpolate("import os; os.mkdir(r'" + \ - tmp_or_crazy + pytest + "') if not os.path.exists(r'" + \ - tmp_or_crazy + pytest + "') else True")], - haltOnFailure=True, - )) - nDays = '3' #str, not int if platform == 'win32': - command = ['FORFILES', '/P', Interpolate(tmp_or_crazy + pytest), + command = ['FORFILES', '/P', Interpolate(factory.tmp_or_crazy + factory.pytest), '/D', '-' + nDays, '/c', "cmd /c rmdir /q /s @path"] else: - command = ['find', Interpolate(tmp_or_crazy + pytest), '-mtime', + command = ['find', Interpolate(factory.tmp_or_crazy + factory.pytest), '-mtime', '+' + nDays, '-exec', 'rm', '-r', '{}', ';'] factory.addStep(SuccessAlways( description="cleanout old test files", @@ -485,7 +479,7 @@ ] + ["--config=%s" % cfg for cfg in app_tests], logfiles={'pytestLog': 'pytest-A.log'}, timeout=4000, - env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + pytest), + env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + factory.pytest), })) if platform == 'win32': virt_pypy = r'pypy-venv\Scripts\python.exe' @@ -543,7 +537,7 @@ command=prefix + ["python", "testrunner/lib_python_tests.py"], timeout=4000, logfiles={'pytestLog': 'cpython.log'}, - env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + pytest), + env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + factory.pytest), })) if pypyjit: @@ -552,7 +546,7 @@ command=prefix + ["python", "testrunner/pypyjit_tests.py"], timeout=4000, logfiles={'pytestLog': 'pypyjit_new.log'}, - env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + pytest), + env={"TMPDIR": Interpolate('%(prop:target_tmpdir)s' + factory.pytest), })) @@ -565,29 +559,14 @@ setup_steps(platform, self) - self.timeout=kwargs.get('timeout', 4000) - - self.addStep(shell.SetPropertyFromCommand( - command=['python', '-c', "import tempfile, os ;print" - " tempfile.gettempdir() + os.path.sep"], - property="target_tmpdir")) - # If target_tmpdir is empty, crash. - tmp_or_crazy = '%(prop:target_tmpdir:-crazy/name/so/mkdir/fails/)s' - self.pytest = "pytest" - self.addStep(ShellCmd( - description="mkdir for tests", - command=['python', '-c', Interpolate("import os; os.mkdir(r'" + \ - tmp_or_crazy + self.pytest + "') if not os.path.exists(r'" + \ - tmp_or_crazy + self.pytest + "') else True")], - haltOnFailure=True, - )) + self.timeout=kwargs.get('timeout', 1000) nDays = '3' #str, not int if platform == 'win32': - command = ['FORFILES', '/P', Interpolate(tmp_or_crazy + self.pytest), + command = ['FORFILES', '/P', Interpolate(self.tmp_or_crazy + self.pytest), '/D', '-' + nDays, '/c', "cmd /c rmdir /q /s @path"] else: - command = ['find', Interpolate(tmp_or_crazy + self.pytest), '-mtime', + command = ['find', Interpolate(self.tmp_or_crazy + self.pytest), '-mtime', '+' + nDays, '-exec', 'rm', '-r', '{}', ';'] self.addStep(SuccessAlways( description="cleanout old test files", @@ -690,7 +669,11 @@ command=prefix + ["python", "pypy/tool/release/package.py", "--targetdir=.", "--archive-name", WithProperties(name)], - workdir='build')) + workdir='build', + env={ + "TMPDIR": Interpolate('%(prop:target_tmpdir)s' + self.pytest), + }, + )) nightly = '~/nightly/' extension = get_extension(platform) pypy_c_rel = "build/" + name + extension From pypy.commits at gmail.com Mon Dec 2 06:24:57 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:24:57 -0800 (PST) Subject: [pypy-commit] pypy hpy: some progress in fixing test_ztranslation, which includes improving the fake objspace Message-ID: <5de4f489.1c69fb81.192cd.b65f@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98208:abdeda0fd3c6 Date: 2019-11-29 21:37 +0100 http://bitbucket.org/pypy/pypy/changeset/abdeda0fd3c6/ Log: some progress in fixing test_ztranslation, which includes improving the fake objspace diff --git a/pypy/module/hpy_universal/interp_unicode.py b/pypy/module/hpy_universal/interp_unicode.py --- a/pypy/module/hpy_universal/interp_unicode.py +++ b/pypy/module/hpy_universal/interp_unicode.py @@ -7,7 +7,7 @@ def _maybe_utf8_to_w(space, utf8): # should this be a method of space? - s = rffi.charp2str(utf8) + s = rffi.constcharp2str(utf8) try: length = rutf8.check_utf8(s, allow_surrogates=False) except rutf8.CheckError: diff --git a/pypy/module/hpy_universal/state.py b/pypy/module/hpy_universal/state.py --- a/pypy/module/hpy_universal/state.py +++ b/pypy/module/hpy_universal/state.py @@ -18,7 +18,7 @@ def missing_function(): print ("oops! calling the slot '%s', " "which is not implemented" % (name,)) - raise OperationError(space.w_NotImplementedError, space.wrap(name)) + raise OperationError(space.w_NotImplementedError, space.newtext(name)) return missing_function diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -77,7 +77,7 @@ return NonConstant(42) def _len(self): return self._length - + class W_MyType(W_MyObject): name = "foobar" @@ -153,7 +153,13 @@ # In Python2, this is triggered by W_InstanceObject.__getslice__. def build_slice(): self.newslice(self.w_None, self.w_None, self.w_None) + def attach_list_strategy(): + from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy + w_obj = w_some_obj() + if isinstance(w_obj, W_ListObject): + w_obj.strategy = EmptyListStrategy(self) self._seen_extras.append(build_slice) + self._seen_extras.append(attach_list_strategy) def _freeze_(self): return True @@ -208,6 +214,10 @@ def newlong(self, x): return w_some_obj() + @specialize.argtype(1) + def newlong_from_rarith_int(self, x): + return w_some_obj() + def newfloat(self, x): return w_some_obj() From pypy.commits at gmail.com Mon Dec 2 06:24:59 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:24:59 -0800 (PST) Subject: [pypy-commit] pypy hpy: update_vendored to e481b58; this change was made in the hpy-ctypespace branch but needed to be ported to pyhandle/hpy and properly vendored Message-ID: <5de4f48b.1c69fb81.e2805.7748@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98209:b1e1f0479f6b Date: 2019-12-01 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/b1e1f0479f6b/ Log: update_vendored to e481b58; this change was made in the hpy- ctypespace branch but needed to be ported to pyhandle/hpy and properly vendored diff --git a/pypy/module/hpy_universal/_vendored/include/universal/hpy.h b/pypy/module/hpy_universal/_vendored/include/universal/hpy.h --- a/pypy/module/hpy_universal/_vendored/include/universal/hpy.h +++ b/pypy/module/hpy_universal/_vendored/include/universal/hpy.h @@ -6,7 +6,9 @@ #include typedef intptr_t HPy_ssize_t; -typedef struct { HPy_ssize_t _i; } HPy; + +struct _HPy_s { HPy_ssize_t _i; }; +typedef struct _HPy_s HPy; typedef struct _HPyContext_s *HPyContext; struct _object; /* that's PyObject inside CPython */ From pypy.commits at gmail.com Mon Dec 2 06:25:01 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:01 -0800 (PST) Subject: [pypy-commit] pypy hpy: translation fix Message-ID: <5de4f48d.1c69fb81.b754.c41b@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98210:e716d29bf67d Date: 2019-12-01 11:54 +0100 http://bitbucket.org/pypy/pypy/changeset/e716d29bf67d/ Log: translation fix diff --git a/pypy/module/hpy_universal/interp_module.py b/pypy/module/hpy_universal/interp_module.py --- a/pypy/module/hpy_universal/interp_module.py +++ b/pypy/module/hpy_universal/interp_module.py @@ -1,4 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import widen from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.module import Module from pypy.module.hpy_universal.apiset import API @@ -17,7 +18,7 @@ p = hpydef.c_m_methods i = 0 while p[i].c_ml_name: - if not p[i].c_ml_flags & llapi._HPy_METH: + if not widen(p[i].c_ml_flags) & llapi._HPy_METH: # we need to add support for legacy methods through cpyext raise oefmt(space.w_NotImplementedError, "non-hpy method: %s", rffi.constcharp2str(p[i].c_ml_name)) From pypy.commits at gmail.com Mon Dec 2 06:25:02 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:02 -0800 (PST) Subject: [pypy-commit] pypy hpy: give a better name to this helper, it makes it much easier to debug translation errors Message-ID: <5de4f48e.1c69fb81.62233.cded@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98211:987c3b5d033e Date: 2019-12-01 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/987c3b5d033e/ Log: give a better name to this helper, it makes it much easier to debug translation errors diff --git a/pypy/module/hpy_universal/apiset.py b/pypy/module/hpy_universal/apiset.py --- a/pypy/module/hpy_universal/apiset.py +++ b/pypy/module/hpy_universal/apiset.py @@ -52,6 +52,7 @@ return wrapper def get_llhelper(space): return llhelper(ll_functype, make_wrapper(space)) + get_llhelper.__name__ = 'get_llhelper_%s' % fn.__name__ fn.get_llhelper = get_llhelper # basename From pypy.commits at gmail.com Mon Dec 2 06:25:05 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:05 -0800 (PST) Subject: [pypy-commit] pypy hpy: progress towards making test_ztranslation working again: these changes are needed to that functions inside hpy_universal.interp_list are annotated correctly, because they do 'assert isinstance(w_obj, W_ListObject)', so they bring in real code from the std objspace Message-ID: <5de4f491.1c69fb81.36a10.950a@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98212:121bac5f857c Date: 2019-12-02 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/121bac5f857c/ Log: progress towards making test_ztranslation working again: these changes are needed to that functions inside hpy_universal.interp_list are annotated correctly, because they do 'assert isinstance(w_obj, W_ListObject)', so they bring in real code from the std objspace diff --git a/pypy/module/hpy_universal/test/test_ztranslation.py b/pypy/module/hpy_universal/test/test_ztranslation.py --- a/pypy/module/hpy_universal/test/test_ztranslation.py +++ b/pypy/module/hpy_universal/test/test_ztranslation.py @@ -10,8 +10,10 @@ state = space.fromcache(State) state.setup() - config_opts = {'translation.gc': 'boehm'} + rpython_opts = {'translation.gc': 'boehm'} + pypy_opts = {'objspace.std.withliststrategies': False} checkmodule('hpy_universal', extra_func=extra_func, c_compile=True, - config_opts=config_opts) + rpython_opts=rpython_opts, + pypy_opts=pypy_opts) diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -3,7 +3,8 @@ def checkmodule(modname, translate_startup=True, ignore=(), - c_compile=False, extra_func=None, config_opts=None): + c_compile=False, extra_func=None, rpython_opts=None, + pypy_opts=None): """ Check that the module 'modname' translates. @@ -21,6 +22,8 @@ will be passed to TranslationContext """ config = get_pypy_config(translating=True) + if pypy_opts: + config.set(**pypy_opts) space = FakeObjSpace(config) seeobj_w = [] modules = [] @@ -51,7 +54,7 @@ func = None opts = {'translation.list_comprehension_operations': True} - if config_opts: - opts.update(config_opts) + if rpython_opts: + opts.update(rpython_opts) space.translates(func, seeobj_w=seeobj_w, c_compile=c_compile, extra_func=extra_func, **opts) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -78,7 +78,6 @@ def _len(self): return self._length - class W_MyType(W_MyObject): name = "foobar" flag_map_or_seq = '?' @@ -154,10 +153,18 @@ def build_slice(): self.newslice(self.w_None, self.w_None, self.w_None) def attach_list_strategy(): - from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy + # this is needed for modules which interacts directly with + # std.listobject.W_ListObject, e.g. after an isinstance check. For + # example, hpy_universal. We need to attach a couple of attributes + # so that the annotator annotates them with the correct types + from pypy.objspace.std.listobject import W_ListObject, ObjectListStrategy + space = self w_obj = w_some_obj() if isinstance(w_obj, W_ListObject): - w_obj.strategy = EmptyListStrategy(self) + w_obj.space = space + w_obj.strategy = ObjectListStrategy(space) + list_w = [w_some_obj(), w_some_obj()] + w_obj.lstorage = w_obj.strategy.erase(list_w) self._seen_extras.append(build_slice) self._seen_extras.append(attach_list_strategy) From pypy.commits at gmail.com Mon Dec 2 06:25:06 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:06 -0800 (PST) Subject: [pypy-commit] pypy hpy: fix the last bit of translation Message-ID: <5de4f492.1c69fb81.aadbf.2e25@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98213:4063b6b8b3f3 Date: 2019-12-02 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4063b6b8b3f3/ Log: fix the last bit of translation diff --git a/pypy/module/hpy_universal/interp_hpy.py b/pypy/module/hpy_universal/interp_hpy.py --- a/pypy/module/hpy_universal/interp_hpy.py +++ b/pypy/module/hpy_universal/interp_hpy.py @@ -12,8 +12,15 @@ # these imports have side effects, as they call @API.func() from pypy.module.hpy_universal import ( - interp_err, interp_long, interp_module, interp_number, interp_unicode, interp_float, - interp_bytes, interp_dict, interp_list, + interp_err, + interp_long, + interp_module, + interp_number, + interp_unicode, + interp_float, + interp_bytes, + interp_dict, + interp_list, ) diff --git a/pypy/module/hpy_universal/interp_unicode.py b/pypy/module/hpy_universal/interp_unicode.py --- a/pypy/module/hpy_universal/interp_unicode.py +++ b/pypy/module/hpy_universal/interp_unicode.py @@ -29,6 +29,8 @@ @API.func("HPy HPyUnicode_AsUTF8String(HPyContext ctx, HPy h)") def HPyUnicode_AsUTF8String(space, ctx, h): + from rpython.rlib.nonconst import NonConstant + if NonConstant(False): return 0 # needed for the annotator raise NotImplementedError @API.func("HPy HPyUnicode_FromWideChar(HPyContext ctx, const wchar_t *w, HPy_ssize_t size)") From pypy.commits at gmail.com Mon Dec 2 06:25:08 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:08 -0800 (PST) Subject: [pypy-commit] pypy hpy: finally fix hpy_universa:test_ztranslation: we need to ensure that the annotator sees some concrete implementation of W_DictMultiObject, else it only sees the base class which is full of abstract methods Message-ID: <5de4f494.1c69fb81.b6391.2af0@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98214:4aa8318c0c07 Date: 2019-12-02 12:23 +0100 http://bitbucket.org/pypy/pypy/changeset/4aa8318c0c07/ Log: finally fix hpy_universa:test_ztranslation: we need to ensure that the annotator sees some concrete implementation of W_DictMultiObject, else it only sees the base class which is full of abstract methods diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -165,8 +165,20 @@ w_obj.strategy = ObjectListStrategy(space) list_w = [w_some_obj(), w_some_obj()] w_obj.lstorage = w_obj.strategy.erase(list_w) + def attach_dict_strategy(): + # this is needed for modules which do e.g. "isinstance(w_obj, + # W_DictMultiObject)", like hpy_universal. Make sure that the + # annotator sees a concrete class, like W_DictObject, else lots of + # operations are blocked. + from pypy.objspace.std.dictmultiobject import W_DictObject, ObjectDictStrategy + space = self + strategy = ObjectDictStrategy(space) + storage = strategy.get_empty_storage() + w_obj = W_DictObject(space, strategy, storage) + self._seen_extras.append(build_slice) self._seen_extras.append(attach_list_strategy) + self._seen_extras.append(attach_dict_strategy) def _freeze_(self): return True From pypy.commits at gmail.com Mon Dec 2 06:25:10 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 03:25:10 -0800 (PST) Subject: [pypy-commit] pypy hpy: add an option to show Pdb+ in ztranslation tests, so that it is easier to debug translation errors Message-ID: <5de4f496.1c69fb81.e9abc.66b3@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98215:e042625501a0 Date: 2019-12-02 12:24 +0100 http://bitbucket.org/pypy/pypy/changeset/e042625501a0/ Log: add an option to show Pdb+ in ztranslation tests, so that it is easier to debug translation errors diff --git a/pypy/module/hpy_universal/test/test_ztranslation.py b/pypy/module/hpy_universal/test/test_ztranslation.py --- a/pypy/module/hpy_universal/test/test_ztranslation.py +++ b/pypy/module/hpy_universal/test/test_ztranslation.py @@ -16,4 +16,6 @@ extra_func=extra_func, c_compile=True, rpython_opts=rpython_opts, - pypy_opts=pypy_opts) + pypy_opts=pypy_opts, + show_pdbplus=False, + ) diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -1,25 +1,31 @@ +import sys +import traceback +from rpython.translator.tool.pdbplus import PdbPlusShow from pypy.objspace.fake.objspace import FakeObjSpace, W_Root from pypy.config.pypyoption import get_pypy_config def checkmodule(modname, translate_startup=True, ignore=(), c_compile=False, extra_func=None, rpython_opts=None, - pypy_opts=None): + pypy_opts=None, show_pdbplus=False): """ Check that the module 'modname' translates. Options: translate_startup: TODO, document me - ignore: list of module interpleveldefs/appleveldefs to ignore + ignore: list of module interpleveldefs/appleveldefs to ignore - c_compile: determine whether to inokve the C compiler after rtyping + c_compile: determine whether to inokve the C compiler after rtyping - extra_func: extra function which will be annotated and called. It takes - a single "space" argment + extra_func: extra function which will be annotated and called. It takes + a single "space" argment - config_opts: dictionary containing extra configuration options which - will be passed to TranslationContext + rpython_opts: dictionariy containing extra configuration options + pypy_opts: dictionariy containing extra configuration options + + show_pdbplus: show Pdb+ prompt on error. Useful for pdb commands such as + flowg, callg, etc. """ config = get_pypy_config(translating=True) if pypy_opts: @@ -56,5 +62,15 @@ opts = {'translation.list_comprehension_operations': True} if rpython_opts: opts.update(rpython_opts) - space.translates(func, seeobj_w=seeobj_w, - c_compile=c_compile, extra_func=extra_func, **opts) + + try: + space.translates(func, seeobj_w=seeobj_w, + c_compile=c_compile, extra_func=extra_func, **opts) + except: + if not show_pdbplus: + raise + print + exc, val, tb = sys.exc_info() + traceback.print_exc() + sys.p = p = PdbPlusShow(space.t) + p.start(tb) From pypy.commits at gmail.com Mon Dec 2 08:00:05 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 05:00:05 -0800 (PST) Subject: [pypy-commit] pypy default: adjust documentation for cffi _hashlib, _ssl Message-ID: <5de50ad5.1c69fb81.26eac.ee18@mx.google.com> Author: Matti Picus Branch: Changeset: r98216:6bb004a8914f Date: 2019-12-02 14:56 +0200 http://bitbucket.org/pypy/pypy/changeset/6bb004a8914f/ Log: adjust documentation for cffi _hashlib, _ssl diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -85,9 +85,6 @@ pyexpat libexpat1 -_ssl - libssl - _vmprof libunwind (optional, loaded dynamically at runtime) @@ -101,6 +98,9 @@ sqlite3 libsqlite3 +_ssl, _hashlib + libssl + curses libncurses-dev (for PyPy2) libncursesw-dev (for PyPy3) @@ -112,11 +112,12 @@ tk-dev lzma (PyPy3 only) - liblzma + liblzma or libxz, version 5 and up -To run untranslated tests, you need the Boehm garbage collector libgc. +To run untranslated tests, you need the Boehm garbage collector libgc, version +7.4 and up -On recent Debian and Ubuntu (16.04 onwards), this is the command to install +On Debian and Ubuntu (16.04 onwards), this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config zlib1g-dev libbz2-dev \ @@ -124,13 +125,6 @@ tk-dev libgc-dev python-cffi \ liblzma-dev libncursesw5-dev # these two only needed on PyPy3 -On older Debian and Ubuntu (12.04-14.04):: - - apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev libgc-dev python-cffi \ - liblzma-dev libncursesw-dev # these two only needed on PyPy3 - On Fedora:: dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ From pypy.commits at gmail.com Mon Dec 2 08:00:07 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 05:00:07 -0800 (PST) Subject: [pypy-commit] pypy default: remove unused rpython _hashlib module, we now use cffi Message-ID: <5de50ad7.1c69fb81.c0ed5.ca98@mx.google.com> Author: Matti Picus Branch: Changeset: r98217:85525c43dc44 Date: 2019-12-02 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/85525c43dc44/ Log: remove unused rpython _hashlib module, we now use cffi diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py deleted file mode 100644 diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/interp_hashlib.py +++ /dev/null @@ -1,204 +0,0 @@ -from __future__ import with_statement - -from rpython.rlib import rgc, ropenssl -from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rstring import StringBuilder -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.tool.sourcetools import func_renamer - -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, interp2app, WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.module.thread.os_lock import Lock - - -algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') - -def hash_name_mapper_callback(obj_name, userdata): - if not obj_name: - return - # Ignore aliased names, they pollute the list and OpenSSL appears - # to have a its own definition of alias as the resulting list - # still contains duplicate and alternate names for several - # algorithms. - if rffi.cast(lltype.Signed, obj_name[0].c_alias): - return - name = rffi.charp2str(obj_name[0].c_name) - global_name_fetcher.meth_names.append(name) - -class NameFetcher: - def setup(self): - self.meth_names = [] - def _cleanup_(self): - self.__dict__.clear() -global_name_fetcher = NameFetcher() - -def fetch_names(space): - global_name_fetcher.setup() - ropenssl.init_digests() - ropenssl.OBJ_NAME_do_all(ropenssl.OBJ_NAME_TYPE_MD_METH, - hash_name_mapper_callback, None) - meth_names = global_name_fetcher.meth_names - global_name_fetcher.meth_names = None - return space.call_function(space.w_frozenset, space.newlist( - [space.newtext(name) for name in meth_names])) - -class W_Hash(W_Root): - NULL_CTX = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ctx = NULL_CTX - - def __init__(self, space, name, copy_from=NULL_CTX): - self.name = name - digest_type = self.digest_type_by_name(space) - self.digest_size = ropenssl.EVP_MD_size(digest_type) - - # Allocate a lock for each HASH object. - # An optimization would be to not release the GIL on small requests, - # and use a custom lock only when needed. - self.lock = Lock(space) - - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size, - self) - try: - if copy_from: - if not ropenssl.EVP_MD_CTX_copy(ctx, copy_from): - raise ValueError - else: - ropenssl.EVP_DigestInit(ctx, digest_type) - self.ctx = ctx - except: - ropenssl.EVP_MD_CTX_free(ctx) - raise - self.register_finalizer(space) - - def _finalize_(self): - ctx = self.ctx - if ctx: - self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ropenssl.EVP_MD_CTX_free(ctx) - - def digest_type_by_name(self, space): - digest_type = ropenssl.EVP_get_digestbyname(self.name) - if not digest_type: - raise oefmt(space.w_ValueError, "unknown hash function") - return digest_type - - def descr_repr(self, space): - addrstring = self.getaddrstring(space) - return space.newtext("<%s HASH object at 0x%s>" % ( - self.name, addrstring)) - - @unwrap_spec(string='bufferstr') - def update(self, space, string): - with rffi.scoped_nonmovingbuffer(string) as buf: - with self.lock: - # XXX try to not release the GIL for small requests - ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string)) - - def copy(self, space): - "Return a copy of the hash object." - with self.lock: - return W_Hash(space, self.name, copy_from=self.ctx) - - def digest(self, space): - "Return the digest value as a string of binary data." - digest = self._digest(space) - return space.newbytes(digest) - - def hexdigest(self, space): - "Return the digest value as a string of hexadecimal digits." - digest = self._digest(space) - hexdigits = '0123456789abcdef' - result = StringBuilder(self.digest_size * 2) - for c in digest: - result.append(hexdigits[(ord(c) >> 4) & 0xf]) - result.append(hexdigits[ ord(c) & 0xf]) - return space.newtext(result.build()) - - def get_digest_size(self, space): - return space.newint(self.digest_size) - - def get_block_size(self, space): - digest_type = self.digest_type_by_name(space) - block_size = ropenssl.EVP_MD_block_size(digest_type) - return space.newint(block_size) - - def get_name(self, space): - return space.newtext(self.name) - - def _digest(self, space): - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - try: - with self.lock: - if not ropenssl.EVP_MD_CTX_copy(ctx, self.ctx): - raise ValueError - digest_size = self.digest_size - with rffi.scoped_alloc_buffer(digest_size) as buf: - ropenssl.EVP_DigestFinal(ctx, buf.raw, None) - return buf.str(digest_size) - finally: - ropenssl.EVP_MD_CTX_free(ctx) - - -W_Hash.typedef = TypeDef( - 'HASH', - __repr__=interp2app(W_Hash.descr_repr), - update=interp2app(W_Hash.update), - copy=interp2app(W_Hash.copy), - digest=interp2app(W_Hash.digest), - hexdigest=interp2app(W_Hash.hexdigest), - # - digest_size=GetSetProperty(W_Hash.get_digest_size), - digestsize=GetSetProperty(W_Hash.get_digest_size), - block_size=GetSetProperty(W_Hash.get_block_size), - name=GetSetProperty(W_Hash.get_name), -) -W_Hash.typedef.acceptable_as_base_class = False - - at unwrap_spec(name='text', string='bufferstr') -def new(space, name, string=''): - w_hash = W_Hash(space, name) - w_hash.update(space, string) - return w_hash - -# shortcut functions -def make_new_hash(name, funcname): - @func_renamer(funcname) - @unwrap_spec(string='bufferstr') - def new_hash(space, string=''): - return new(space, name, string) - return new_hash - -for _name in algorithms: - _newname = 'new_%s' % (_name,) - globals()[_newname] = make_new_hash(_name, _newname) - - -HAS_FAST_PKCS5_PBKDF2_HMAC = ropenssl.PKCS5_PBKDF2_HMAC is not None -if HAS_FAST_PKCS5_PBKDF2_HMAC: - @unwrap_spec(name='text', password='bytes', salt='bytes', rounds=int, - w_dklen=WrappedDefault(None)) - def pbkdf2_hmac(space, name, password, salt, rounds, w_dklen): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise oefmt(space.w_ValueError, "unknown hash function") - if space.is_w(w_dklen, space.w_None): - dklen = ropenssl.EVP_MD_size(digest) - else: - dklen = space.int_w(w_dklen) - if dklen < 1: - raise oefmt(space.w_ValueError, - "key length must be greater than 0.") - with rffi.scoped_alloc_buffer(dklen) as buf: - r = ropenssl.PKCS5_PBKDF2_HMAC( - password, len(password), salt, len(salt), rounds, digest, - dklen, buf.raw) - if not r: - raise ValueError - return space.newbytes(buf.str(dklen)) diff --git a/pypy/module/_hashlib/moduledef.py b/pypy/module/_hashlib/moduledef.py deleted file mode 100644 --- a/pypy/module/_hashlib/moduledef.py +++ /dev/null @@ -1,22 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule -from pypy.module._hashlib.interp_hashlib import ( - algorithms, fetch_names, HAS_FAST_PKCS5_PBKDF2_HMAC) - - -class Module(MixedModule): - interpleveldefs = { - 'new' : 'interp_hashlib.new', - } - - appleveldefs = { - } - - for name in algorithms: - interpleveldefs['openssl_' + name] = 'interp_hashlib.new_' + name - - if HAS_FAST_PKCS5_PBKDF2_HMAC: - interpleveldefs['pbkdf2_hmac'] = 'interp_hashlib.pbkdf2_hmac' - - def startup(self, space): - w_meth_names = fetch_names(space) - space.setattr(self, space.newtext('openssl_md_meth_names'), w_meth_names) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_hashlib.py +++ /dev/null @@ -1,123 +0,0 @@ -class AppTestHashlib: - spaceconfig = { - "usemodules": ['_hashlib', 'array', 'struct', 'binascii'], - } - - def test_method_names(self): - import _hashlib - assert isinstance(_hashlib.openssl_md_meth_names, frozenset) - assert "md5" in _hashlib.openssl_md_meth_names - - def test_simple(self): - import _hashlib - assert _hashlib.new('md5').__class__.__name__ == 'HASH' - assert len(_hashlib.new('md5').hexdigest()) == 32 - - def test_attributes(self): - import hashlib - for name, (expected_size, expected_block_size) in { - 'md5': (16, 64), - 'sha1': (20, 64), - 'sha224': (28, 64), - 'sha256': (32, 64), - 'sha384': (48, 128), - 'sha512': (64, 128), - }.items(): - h = hashlib.new(name) - assert h.name == name - assert h.digest_size == expected_size - assert h.digestsize == expected_size - assert h.block_size == expected_block_size - # - h.update('abc') - h2 = h.copy() - h.update('def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update('d') - h2.update('ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - assert len(digest) == h.digest_size - assert len(hexdigest) == h.digest_size * 2 - c_digest = digest - c_hexdigest = hexdigest - - # also test the pure Python implementation - py_new = getattr(hashlib, '__get_builtin_constructor') - h = py_new(name)('') - assert h.digest_size == expected_size - assert h.digestsize == expected_size - assert h.block_size == expected_block_size - # - h.update('abc') - h2 = h.copy() - h.update('def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update('d') - h2.update('ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - - # compare both implementations - assert c_digest == digest - assert c_hexdigest == hexdigest - - def test_shortcut(self): - import hashlib - assert repr(hashlib.md5()).startswith("= 1.1") - out = pbkdf2_hmac('sha1', 'password', 'salt', 1) - assert out == '0c60c80f961f0e71f3a9b524af6012062fe037a6'.decode('hex') - out = pbkdf2_hmac('sha1', 'password', 'salt', 2, None) - assert out == 'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'.decode('hex') diff --git a/pypy/module/_hashlib/test/test_ztranslation.py b/pypy/module/_hashlib/test/test_ztranslation.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_ztranslation.py +++ /dev/null @@ -1,4 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test_checkmodule(): - checkmodule('_hashlib') From pypy.commits at gmail.com Mon Dec 2 08:48:10 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 05:48:10 -0800 (PST) Subject: [pypy-commit] buildbot default: parallel -> parallel_runs, update 64-bit Dockerfile with lessons learned Message-ID: <5de5161a.1c69fb81.f27d9.353e@mx.google.com> Author: Matti Picus Branch: Changeset: r1117:36c2b9791482 Date: 2019-12-02 15:47 +0200 http://bitbucket.org/pypy/buildbot/changeset/36c2b9791482/ Log: parallel -> parallel_runs, update 64-bit Dockerfile with lessons learned diff --git a/docker/Dockerfile b/docker/Dockerfile --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # Build with something like this, where # -t is the name of the image # -f is this file -# 1001 is the UID of the user to run as +# 1001 is the UID of the user to run as, you might want to use $UID # docker is the directory where install_ffi.sh is # docker build -t buildslave_x86_64 --build-arg BUILDSLAVE_UID=1001 -f docker/Dockerfile docker # @@ -24,27 +24,29 @@ # docker run -it --rm -v:/build_dir> \ # -ePYPY_MAKE_PORTABLE=1 buildslave_x86_64 # -# You might want to keep the PYPY_USESSION_DIR where the testing/building -# artifacts are. Docker will not do this for you, so do something like this +# You might want to keep the TMPDIR where the testing/building +# artifacts are. This will normally be inside the docker, so do something like this # to save the files outside the docker # # mkdir -p build_dir/tmp # docker run -it --rm -v:/build_dir> \ -# -ePYPY_USESSION_DIR=/build_dir/tmp -ePYPY_MAKE_PORTABLE=1 buildslave_x86_64 +# -eTMPDIR=/build_dir/tmp -ePYPY_MAKE_PORTABLE=1 buildslave_x86_64 # # To enter the buildslave image, add a shell command to the end # # docker run -it -v:/build_dir> \ -# -ePYPY_USESSION_DIR=/build_dir/tmp buildslave_x86_64 /bin/bash +# -eTMPDIR=/build_dir/tmp buildslave_x86_64 /bin/bash # +# This will enter the docker as root. Don't do "su - buildslave", +# do "su buildslave" FROM quay.io/pypa/manylinux2010_x86_64:latest WORKDIR /root RUN yum -y update RUN yum install -y wget bzip2-devel zlib-devel glibc-devel libX11-devel \ - libXt-devel patch expat-devel libXft-devel tk-devel gdbm-devel gdb \ - perl xz-devel ncurses-devel sqlite-devel gc-devel prelink python-virtualenv + libXt-devel patch expat-devel libXft-devel tk-devel gdbm-devel gdb vim \ + perl xz-devel ncurses-devel sqlite-devel prelink python-virtualenv # Taken from pyca/infra/cryptography-manylinux # centos6 libffi is buggy, download and use a newer one @@ -54,12 +56,24 @@ ADD install_libffi.sh /root/install_libffi.sh RUN sh install_libffi.sh manylinux2010 2>&1 | tee /root/install_libffi.log RUN sh install_openssl.sh manylinux2010 2>&1 | tee /root/install_openssl.log +# Use an up-to-date version of xz for lzma ADD install_xz5.sh /root/install_xz5.sh ADD lasse_collin_pubkey.txt /root/lasse_collin_pubkey.txt ADD xz-5.2.4.tar.gz.sig /root/xz-5.2.4.tar.gz.sig -RUN sh install_xz5.sh manylinux2010 m32 # 2>&1 | tee /root/install_xz5.log +RUN sh install_xz5.sh manylinux2010 2>&1 | tee /root/install_xz5.log +# centos6 provides gc7.1, which does not work in a docker. Use a newer version +# This is for testing only +Add install_gc.sh /root/install_gc.sh +RUN sh install_gc.sh 2>&1 | tee /root/install_gc.sh + +# prefer our libraries in /usr/local/lib +ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH # get a pypy for translation. On x86_64 we can use portable pypy +# XXX update this to use release versions when we have one + +# RUN wget -q http://buildbot.pypy.org/nightly/trunk/pypy-c-jit-98204-a8a24325ea83-linux.tar.bz2 -O - | tar -C /opt -xj +# RUN ln -s /opt/pypy-c-jit-98204-a8a24325ea83-linux/bin/pypy /usr/local/bin/pypy RUN wget -q https://bitbucket.org/squeaky/portable-pypy/downloads/pypy-7.0.0-linux_x86_64-portable.tar.bz2 -O - | tar -C /opt -xj RUN ln -s /opt/pypy-7.0.0-linux_x86_64-portable/bin/pypy /usr/local/bin/pypy @@ -75,8 +89,13 @@ # Define a user ARG BUILDSLAVE_UID=1001 RUN adduser buildslave --uid=$BUILDSLAVE_UID -RUN echo parallel=4 > /home/buildslave/machine_cfg.py +RUN echo parallel_runs=4 > /home/buildslave/machine_cfg.py +# NOTE: always use +# su buildslave +# not +# su - buildslave +# to preserve env variables CMD if [ -e /build_dir/buildbot.tac ]; then \ su buildslave -c "buildslave start --nodaemon /build_dir"; \ else \ diff --git a/docker/Dockerfile32 b/docker/Dockerfile32 --- a/docker/Dockerfile32 +++ b/docker/Dockerfile32 @@ -89,7 +89,7 @@ # Define a user ARG BUILDSLAVE_UID=1001 RUN adduser buildslave --uid=$BUILDSLAVE_UID -RUN echo parallel=4 > /home/buildslave/machine_cfg.py +RUN echo parallel_runs=4 > /home/buildslave/machine_cfg.py # NOTE: always use # su buildslave From pypy.commits at gmail.com Mon Dec 2 10:10:21 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 02 Dec 2019 07:10:21 -0800 (PST) Subject: [pypy-commit] pypy hpy: fix translation, and make it possible to show pdb+ even when the test passes Message-ID: <5de5295d.1c69fb81.362d6.16f4@mx.google.com> Author: Antonio Cuni Branch: hpy Changeset: r98218:9227c0af2212 Date: 2019-12-02 15:07 +0100 http://bitbucket.org/pypy/pypy/changeset/9227c0af2212/ Log: fix translation, and make it possible to show pdb+ even when the test passes diff --git a/pypy/module/hpy_universal/interp_unicode.py b/pypy/module/hpy_universal/interp_unicode.py --- a/pypy/module/hpy_universal/interp_unicode.py +++ b/pypy/module/hpy_universal/interp_unicode.py @@ -35,6 +35,8 @@ @API.func("HPy HPyUnicode_FromWideChar(HPyContext ctx, const wchar_t *w, HPy_ssize_t size)") def HPyUnicode_FromWideChar(space, ctx, wchar_p, size): + # remove the "const", else we can't call wcharpsize2utf8 later + wchar_p = rffi.cast(rffi.CWCHARP, wchar_p) if wchar_p: if size == -1: size = wcharplen(wchar_p) diff --git a/pypy/objspace/fake/checkmodule.py b/pypy/objspace/fake/checkmodule.py --- a/pypy/objspace/fake/checkmodule.py +++ b/pypy/objspace/fake/checkmodule.py @@ -72,5 +72,9 @@ print exc, val, tb = sys.exc_info() traceback.print_exc() - sys.p = p = PdbPlusShow(space.t) + sys.pdbplus = p = PdbPlusShow(space.t) p.start(tb) + else: + if show_pdbplus: + sys.pdbplus = p = PdbPlusShow(space.t) + p.start(None) From pypy.commits at gmail.com Mon Dec 2 10:44:35 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 07:44:35 -0800 (PST) Subject: [pypy-commit] buildbot default: add prefix target for gc install Message-ID: <5de53163.1c69fb81.2b6c4.09cc@mx.google.com> Author: Matti Picus Branch: Changeset: r1118:c7153bb2f863 Date: 2019-12-02 16:44 +0100 http://bitbucket.org/pypy/buildbot/changeset/c7153bb2f863/ Log: add prefix target for gc install diff --git a/docker/install_gc.sh b/docker/install_gc.sh --- a/docker/install_gc.sh +++ b/docker/install_gc.sh @@ -13,7 +13,7 @@ cd bdwgc autoreconf -vif automake --add-missing -./configure +./configure --prefix=/usr/local make make install popd From pypy.commits at gmail.com Mon Dec 2 11:04:00 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 08:04:00 -0800 (PST) Subject: [pypy-commit] pypy default: centos6 has an old version of zlib Message-ID: <5de535f0.1c69fb81.d7d69.094c@mx.google.com> Author: Matti Picus Branch: Changeset: r98219:b0234b636414 Date: 2019-12-02 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b0234b636414/ Log: centos6 has an old version of zlib diff --git a/rpython/rlib/test/test_rzlib.py b/rpython/rlib/test/test_rzlib.py --- a/rpython/rlib/test/test_rzlib.py +++ b/rpython/rlib/test/test_rzlib.py @@ -274,7 +274,7 @@ rzlib.deflateEnd(copied) assert bytes1 + bytes_copy == compressed - at py.test.mark.skipif(rzlib.ZLIB_VERSION == '1.2.8', reason='does not error check') + at py.test.mark.skipif(rzlib.ZLIB_VERSION in ('1.2.3', '1.2.8'), reason='does not error check') def test_unsuccessful_compress_copy(): """ Errors during unsuccesful deflateCopy operations raise RZlibErrors. From pypy.commits at gmail.com Mon Dec 2 16:04:18 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 02 Dec 2019 13:04:18 -0800 (PST) Subject: [pypy-commit] pypy py3.6: typo Message-ID: <5de57c52.1c69fb81.151a1.4644@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98220:b5cc5bc24633 Date: 2019-12-02 23:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b5cc5bc24633/ Log: typo diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -8,7 +8,7 @@ # cpyext is required for _imp.create_dynamic() spaceconfig = { 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct', - 'zipimport', cpyext], + 'zipimport', 'cpyext'], } def setup_class(cls): From pypy.commits at gmail.com Tue Dec 3 15:50:01 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 12:50:01 -0800 (PST) Subject: [pypy-commit] pypy default: Modernise raises() syntax Message-ID: <5de6ca79.1c69fb81.151a1.a33e@mx.google.com> Author: Ronan Lamy Branch: Changeset: r98224:ad72005340fd Date: 2019-12-03 20:38 +0000 http://bitbucket.org/pypy/pypy/changeset/ad72005340fd/ Log: Modernise raises() syntax diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -10,7 +10,8 @@ yield 1 g = f() assert g.next() == 1 - raises(StopIteration, g.next) + with raises(StopIteration): + g.next() def test_attributes(self): def f(): @@ -23,7 +24,8 @@ assert not g.gi_running g.next() assert not g.gi_running - raises(StopIteration, g.next) + with raises(StopIteration): + g.next() assert not g.gi_running assert g.gi_frame is None assert g.gi_code is f.__code__ @@ -58,14 +60,16 @@ yield 2 g = f() # two arguments version - raises(NameError, g.throw, NameError, "Error") + with raises(NameError): + g.throw(NameError, "Error") def test_throw2(self): def f(): yield 2 g = f() # single argument version - raises(NameError, g.throw, NameError("Error")) + with raises(NameError): + g.throw(NameError("Error")) def test_throw3(self): def f(): @@ -77,7 +81,8 @@ g = f() assert g.next() == 1 assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) + with raises(StopIteration): + g.next() def test_throw4(self): d = {} @@ -94,7 +99,8 @@ assert g.next() == 1 assert g.next() == 2 assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) + with raises(StopIteration): + g.next() def test_throw5(self): def f(): @@ -109,41 +115,48 @@ g = f() g.next() # String exceptions are not allowed anymore - raises(TypeError, g.throw, "Error") + with raises(TypeError): + g.throw("Error") assert g.throw(Exception) == 3 - raises(StopIteration, g.throw, Exception) + with raises(StopIteration): + g.throw(Exception) def test_throw6(self): def f(): yield 2 g = f() - raises(NameError, g.throw, NameError, "Error", None) + with raises(NameError): + g.throw(NameError, "Error", None) def test_throw_fail(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, NameError("Error"), "error") + with raises(TypeError): + g.throw(NameError("Error"), "error") def test_throw_fail2(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, list()) + with raises(TypeError): + g.throw(list()) def test_throw_fail3(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, NameError("Error"), None, "not tb object") + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") def test_throw_finishes_generator(self): def f(): yield 1 g = f() assert g.gi_frame is not None - raises(ValueError, g.throw, ValueError) + with raises(ValueError): + g.throw(ValueError) assert g.gi_frame is None def test_throw_bug(self): @@ -162,8 +175,10 @@ g = f() res = g.next() assert res == 1 - raises(StopIteration, g.next) - raises(NameError, g.throw, NameError) + with raises(StopIteration): + g.next() + with raises(NameError): + g.throw(NameError) def test_close(self): def f(): @@ -189,7 +204,8 @@ raise NameError g = f() g.next() - raises(NameError, g.close) + with raises(NameError): + g.close() def test_close_fail(self): def f(): @@ -199,7 +215,8 @@ yield 2 g = f() g.next() - raises(RuntimeError, g.close) + with raises(RuntimeError): + g.close() def test_close_on_collect(self): ## we need to exec it, else it won't run on python2.4 @@ -223,8 +240,10 @@ def f(): yield 1 g = f() - raises(TypeError, g.send) # one argument required - raises(TypeError, g.send, 1) # not started, must send None + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None def test_generator_explicit_stopiteration(self): def f(): @@ -245,7 +264,8 @@ i = me.next() yield i me = g() - raises(ValueError, me.next) + with raises(ValueError): + me.next() def test_generator_expression(self): exec "res = sum(i*i for i in range(5))" @@ -301,8 +321,10 @@ def mygen(): yield 42 g = mygen() - raises(TypeError, g.send, 2) - raises(TypeError, g.send, 2) + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) def test_should_not_inline(space): From pypy.commits at gmail.com Tue Dec 3 15:50:03 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 12:50:03 -0800 (PST) Subject: [pypy-commit] pypy default: Remove unnecessary exec Message-ID: <5de6ca7b.1c69fb81.23ce1.5f24@mx.google.com> Author: Ronan Lamy Branch: Changeset: r98225:fa48adb1543c Date: 2019-12-03 20:41 +0000 http://bitbucket.org/pypy/pypy/changeset/fa48adb1543c/ Log: Remove unnecessary exec diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -219,22 +219,17 @@ g.close() def test_close_on_collect(self): - ## we need to exec it, else it won't run on python2.4 - d = {} - exec """ + import gc def f(): try: yield finally: f.x = 42 - """.strip() in d - - g = d['f']() + g = f() g.next() del g - import gc gc.collect() - assert d['f'].x == 42 + assert f.x == 42 def test_generator_raises_typeerror(self): def f(): From pypy.commits at gmail.com Tue Dec 3 15:50:05 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 12:50:05 -0800 (PST) Subject: [pypy-commit] pypy py3.6: hg merge default Message-ID: <5de6ca7d.1c69fb81.fd6e7.c57d@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98226:53a811878c10 Date: 2019-12-03 20:49 +0000 http://bitbucket.org/pypy/pypy/changeset/53a811878c10/ Log: hg merge default diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -58,14 +58,16 @@ yield 2 g = f() # two arguments version - raises(NameError, g.throw, NameError, "Error") + with raises(NameError): + g.throw(NameError, "Error") def test_throw2(self): def f(): yield 2 g = f() # single argument version - raises(NameError, g.throw, NameError("Error")) + with raises(NameError): + g.throw(NameError("Error")) def test_throw3(self): def f(): @@ -109,41 +111,48 @@ g = f() next(g) # String exceptions are not allowed anymore - raises(TypeError, g.throw, "Error") + with raises(TypeError): + g.throw("Error") assert g.throw(Exception) == 3 - raises(StopIteration, g.throw, Exception) + with raises(StopIteration): + g.throw(Exception) def test_throw6(self): def f(): yield 2 g = f() - raises(NameError, g.throw, NameError, "Error", None) + with raises(NameError): + g.throw(NameError, "Error", None) def test_throw_fail(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, NameError("Error"), "error") + with raises(TypeError): + g.throw(NameError("Error"), "error") def test_throw_fail2(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, list()) + with raises(TypeError): + g.throw(list()) def test_throw_fail3(self): def f(): yield 1 g = f() - raises(TypeError, g.throw, NameError("Error"), None, "not tb object") + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") def test_throw_finishes_generator(self): def f(): yield 1 g = f() assert g.gi_frame is not None - raises(ValueError, g.throw, ValueError) + with raises(ValueError): + g.throw(ValueError) assert g.gi_frame is None def test_throw_bug(self): @@ -230,7 +239,8 @@ raise NameError g = f() next(g) - raises(NameError, g.close) + with raises(NameError): + g.close() def test_close_fail(self): def f(): @@ -240,9 +250,11 @@ yield 2 g = f() next(g) - raises(RuntimeError, g.close) + with raises(RuntimeError): + g.close() def test_close_on_collect(self): + import gc def f(): try: yield @@ -251,7 +263,6 @@ g = f() next(g) del g - import gc gc.collect() assert f.x == 42 @@ -259,8 +270,10 @@ def f(): yield 1 g = f() - raises(TypeError, g.send) # one argument required - raises(TypeError, g.send, 1) # not started, must send None + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None def test_generator_explicit_stopiteration(self): def f(): @@ -535,8 +548,10 @@ def mygen(): yield 42 g = mygen() - raises(TypeError, g.send, 2) - raises(TypeError, g.send, 2) + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) class AppTestAsyncGenerator(object): From pypy.commits at gmail.com Tue Dec 3 16:44:18 2019 From: pypy.commits at gmail.com (arigo) Date: Tue, 03 Dec 2019 13:44:18 -0800 (PST) Subject: [pypy-commit] pypy default: Issue #3125 Message-ID: <5de6d732.1c69fb81.45dc6.97bc@mx.google.com> Author: Armin Rigo Branch: Changeset: r98227:c3817c90d7af Date: 2019-12-03 22:43 +0100 http://bitbucket.org/pypy/pypy/changeset/c3817c90d7af/ Log: Issue #3125 Ooops! diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -800,7 +800,7 @@ DIRENT = rffi_platform.Struct('struct dirent', [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1)), ('d_ino', lltype.Signed)] - + [('d_type', rffi.INT)] if HAVE_D_TYPE else []) + + ([('d_type', rffi.INT)] if HAVE_D_TYPE else [])) if HAVE_D_TYPE: DT_UNKNOWN = rffi_platform.ConstantInteger('DT_UNKNOWN') DT_REG = rffi_platform.ConstantInteger('DT_REG') From pypy.commits at gmail.com Tue Dec 3 16:47:51 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 13:47:51 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Move an asyncgen test to apptest_coroutine, next to the other ones Message-ID: <5de6d807.1c69fb81.e8ff7.9354@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98228:840f39388700 Date: 2019-12-03 21:02 +0000 http://bitbucket.org/pypy/pypy/changeset/840f39388700/ Log: Move an asyncgen test to apptest_coroutine, next to the other ones diff --git a/pypy/interpreter/test/apptest_coroutine.py b/pypy/interpreter/test/apptest_coroutine.py --- a/pypy/interpreter/test/apptest_coroutine.py +++ b/pypy/interpreter/test/apptest_coroutine.py @@ -699,6 +699,99 @@ assert run_async(run()) == ([], (1,)) +def test_async_gen_exception_11(): + # bpo-33786 + def compare_generators(sync_gen, async_gen): + def sync_iterate(g): + res = [] + while True: + try: + res.append(g.__next__()) + except StopIteration: + res.append('STOP') + break + except Exception as ex: + res.append(str(type(ex))) + return res + + def async_iterate(g): + res = [] + while True: + an = g.__anext__() + try: + while True: + try: + an.__next__() + except StopIteration as ex: + if ex.args: + res.append(ex.args[0]) + break + else: + res.append('EMPTY StopIteration') + break + except StopAsyncIteration: + raise + except Exception as ex: + res.append(str(type(ex))) + break + except StopAsyncIteration: + res.append('STOP') + break + return res + + def async_iterate(g): + res = [] + while True: + try: + g.__anext__().__next__() + except StopAsyncIteration: + res.append('STOP') + break + except StopIteration as ex: + if ex.args: + res.append(ex.args[0]) + else: + res.append('EMPTY StopIteration') + break + except Exception as ex: + res.append(str(type(ex))) + return res + + sync_gen_result = sync_iterate(sync_gen) + async_gen_result = async_iterate(async_gen) + assert sync_gen_result == async_gen_result, "%s != %s" % (str(sync_gen_result), str(async_gen_result)) + return async_gen_result + + def sync_gen(): + yield 10 + yield 20 + + def sync_gen_wrapper(): + yield 1 + sg = sync_gen() + sg.send(None) + try: + sg.throw(GeneratorExit()) + except GeneratorExit: + yield 2 + yield 3 + + async def async_gen(): + yield 10 + yield 20 + + async def async_gen_wrapper(): + yield 1 + asg = async_gen() + await asg.asend(None) + try: + await asg.athrow(GeneratorExit()) + except GeneratorExit: + yield 2 + yield 3 + + compare_generators(sync_gen_wrapper(), async_gen_wrapper()) + def test_asyncgen_yield_stopiteration(): async def foo(): yield 1 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -554,104 +554,6 @@ g.send(2) -class AppTestAsyncGenerator(object): - - def test_async_gen_exception_11(self): - """ - # bpo-33786 - def compare_generators(sync_gen, async_gen): - def sync_iterate(g): - res = [] - while True: - try: - res.append(g.__next__()) - except StopIteration: - res.append('STOP') - break - except Exception as ex: - res.append(str(type(ex))) - return res - - def async_iterate(g): - res = [] - while True: - an = g.__anext__() - try: - while True: - try: - an.__next__() - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - break - else: - res.append('EMPTY StopIteration') - break - except StopAsyncIteration: - raise - except Exception as ex: - res.append(str(type(ex))) - break - except StopAsyncIteration: - res.append('STOP') - break - return res - - def async_iterate(g): - res = [] - while True: - try: - g.__anext__().__next__() - except StopAsyncIteration: - res.append('STOP') - break - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - else: - res.append('EMPTY StopIteration') - break - except Exception as ex: - res.append(str(type(ex))) - return res - - sync_gen_result = sync_iterate(sync_gen) - async_gen_result = async_iterate(async_gen) - assert sync_gen_result == async_gen_result, "%s != %s" % (str(sync_gen_result), str(async_gen_result)) - return async_gen_result - - def sync_gen(): - yield 10 - yield 20 - - def sync_gen_wrapper(): - yield 1 - sg = sync_gen() - sg.send(None) - try: - sg.throw(GeneratorExit()) - except GeneratorExit: - yield 2 - yield 3 - - async def async_gen(): - yield 10 - yield 20 - - async def async_gen_wrapper(): - yield 1 - asg = async_gen() - await asg.asend(None) - try: - await asg.athrow(GeneratorExit()) - except GeneratorExit: - yield 2 - yield 3 - - compare_generators(sync_gen_wrapper(), async_gen_wrapper()) - """ - - def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline w_co = space.appexec([], '''(): From pypy.commits at gmail.com Tue Dec 3 16:47:52 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 13:47:52 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Clean up test Message-ID: <5de6d808.1c69fb81.515e.752c@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98229:be7ab887a851 Date: 2019-12-03 21:13 +0000 http://bitbucket.org/pypy/pypy/changeset/be7ab887a851/ Log: Clean up test diff --git a/pypy/interpreter/test/apptest_coroutine.py b/pypy/interpreter/test/apptest_coroutine.py --- a/pypy/interpreter/test/apptest_coroutine.py +++ b/pypy/interpreter/test/apptest_coroutine.py @@ -699,69 +699,40 @@ assert run_async(run()) == ([], (1,)) +# Helpers for test_async_gen_exception_11() below +def sync_iterate(g): + res = [] + while True: + try: + res.append(g.__next__()) + except StopIteration: + res.append('STOP') + break + except Exception as ex: + res.append(str(type(ex))) + return res + +def async_iterate(g): + res = [] + while True: + try: + g.__anext__().__next__() + except StopAsyncIteration: + res.append('STOP') + break + except StopIteration as ex: + if ex.args: + res.append(ex.args[0]) + else: + res.append('EMPTY StopIteration') + break + except Exception as ex: + res.append(str(type(ex))) + return res + + def test_async_gen_exception_11(): # bpo-33786 - def compare_generators(sync_gen, async_gen): - def sync_iterate(g): - res = [] - while True: - try: - res.append(g.__next__()) - except StopIteration: - res.append('STOP') - break - except Exception as ex: - res.append(str(type(ex))) - return res - - def async_iterate(g): - res = [] - while True: - an = g.__anext__() - try: - while True: - try: - an.__next__() - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - break - else: - res.append('EMPTY StopIteration') - break - except StopAsyncIteration: - raise - except Exception as ex: - res.append(str(type(ex))) - break - except StopAsyncIteration: - res.append('STOP') - break - return res - - def async_iterate(g): - res = [] - while True: - try: - g.__anext__().__next__() - except StopAsyncIteration: - res.append('STOP') - break - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - else: - res.append('EMPTY StopIteration') - break - except Exception as ex: - res.append(str(type(ex))) - return res - - sync_gen_result = sync_iterate(sync_gen) - async_gen_result = async_iterate(async_gen) - assert sync_gen_result == async_gen_result, "%s != %s" % (str(sync_gen_result), str(async_gen_result)) - return async_gen_result - def sync_gen(): yield 10 yield 20 @@ -790,7 +761,9 @@ yield 2 yield 3 - compare_generators(sync_gen_wrapper(), async_gen_wrapper()) + sync_gen_result = sync_iterate(sync_gen_wrapper()) + async_gen_result = async_iterate(async_gen_wrapper()) + assert sync_gen_result == async_gen_result def test_asyncgen_yield_stopiteration(): async def foo(): From pypy.commits at gmail.com Tue Dec 3 16:47:54 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 03 Dec 2019 13:47:54 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Modernise raises() syntax Message-ID: <5de6d80a.1c69fb81.df4e6.b408@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98230:df95b7b0b5c6 Date: 2019-12-03 21:47 +0000 http://bitbucket.org/pypy/pypy/changeset/df95b7b0b5c6/ Log: Modernise raises() syntax diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -10,7 +10,8 @@ yield 1 g = f() assert next(g) == 1 - raises(StopIteration, next, g) + with raises(StopIteration): + next(g) def test_attributes(self): def f(): @@ -23,7 +24,8 @@ assert not g.gi_running next(g) assert not g.gi_running - raises(StopIteration, next, g) + with raises(StopIteration): + next(g) assert not g.gi_running assert g.gi_frame is None assert g.gi_code is f.__code__ @@ -79,7 +81,8 @@ g = f() assert next(g) == 1 assert g.throw(NameError("Error")) == 3 - raises(StopIteration, next, g) + with raises(StopIteration): + next(g) def test_throw4(self): d = {} @@ -96,7 +99,8 @@ assert next(g) == 1 assert next(g) == 2 assert g.throw(NameError("Error")) == 3 - raises(StopIteration, next, g) + with raises(StopIteration): + next(g) def test_throw5(self): def f(): @@ -171,8 +175,10 @@ g = f() res = next(g) assert res == 1 - raises(StopIteration, next, g) - raises(NameError, g.throw, NameError) + with raises(StopIteration): + next(g) + with raises(NameError): + g.throw(NameError) def test_throw_tb(self): def f(): @@ -294,7 +300,8 @@ i = next(me) yield i me = g() - raises(ValueError, next, me) + with raises(ValueError): + next(me) def test_generator_expression(self): d = {} @@ -427,10 +434,14 @@ g.__qualname__ = "j.k" assert g.__name__ == "h.i" assert g.__qualname__ == "j.k" - raises(TypeError, "g.__name__ = 42") - raises(TypeError, "g.__qualname__ = 42") - raises((TypeError, AttributeError), "del g.__name__") - raises((TypeError, AttributeError), "del g.__qualname__") + with raises(TypeError): + g.__name__ = 42 + with raises(TypeError): + g.__qualname__ = 42 + with raises((TypeError, AttributeError)): + del g.__name__ + with raises((TypeError, AttributeError)): + del g.__qualname__ def test_gi_yieldfrom(self): """ def g(x): @@ -466,7 +477,8 @@ gen = g() assert next(gen) == 42 closed = [] - raises(GeneratorExit, gen.throw, GeneratorExit) + with raises(GeneratorExit): + gen.throw(GeneratorExit) assert closed == [True] """ @@ -487,7 +499,8 @@ assert sys.exc_info()[0] is IndexError assert next(gen) is ValueError assert sys.exc_info()[0] is IndexError - raises(StopIteration, next, gen) + with raises(StopIteration): + next(gen) assert sys.exc_info()[0] is IndexError def test_exc_info_in_generator_2(self): @@ -638,9 +651,10 @@ for i in range(2): x = next(g) trace.append("Yielded %s" % (x,)) - exc = raises(ValueError, g.close) - assert exc.value.args[0] == "nybbles have exploded with delight" - assert isinstance(exc.value.__context__, GeneratorExit) + with raises(ValueError) as excinfo: + g.close() + assert excinfo.value.args[0] == "nybbles have exploded with delight" + assert isinstance(excinfo.value.__context__, GeneratorExit) assert trace == [ "Starting g1", "Yielded g1 ham", @@ -679,8 +693,9 @@ x = next(g) trace.append("Yielded %s" % (x,)) e = ValueError("tomato ejected") - exc = raises(ValueError, g.throw, e) - assert exc.value.args[0] == "tomato ejected" + with raises(ValueError) as excinfo: + g.throw(e) + assert excinfo.value.args[0] == "tomato ejected" assert trace == [ "Starting g1", "Yielded g1 ham", @@ -709,8 +724,9 @@ for i in range(5): x = next(gi) trace.append("Yielded %s" % (x,)) - exc = raises(ValueError, gi.throw, ValueError("tomato ejected")) - assert exc.value.args[0] == "tomato ejected" + with raises(ValueError) as excinfo: + gi.throw(ValueError("tomato ejected")) + assert excinfo.value.args[0] == "tomato ejected" assert trace == [ "Starting g", "Yielded 0", @@ -742,16 +758,18 @@ gi = g() assert next(gi) == 1 - raises(ZeroDivisionError, gi.send, 1) + with raises(ZeroDivisionError): + gi.send(1) gi = g() assert next(gi) == 1 - raises(ZeroDivisionError, gi.throw, RuntimeError) + with raises(ZeroDivisionError): + gi.throw(RuntimeError) gi = g() assert next(gi) == 1 - import io, sys - sys.stderr = io.StringIO() + import _io, sys + sys.stderr = _io.StringIO() gi.close() assert 'ZeroDivisionError' in sys.stderr.getvalue() @@ -859,7 +877,8 @@ def f(x): raise StopIteration yield x - raises(StopIteration, next, f(5)) + with raises(StopIteration): + next(f(5)) def test_future_generator_stop(self): d = {} @@ -870,7 +889,8 @@ yield x """, d) f = d['f'] - raises(RuntimeError, next, f(5)) + with raises(RuntimeError): + next(f(5)) def test_generator_stop_cause(self): d = {} @@ -882,7 +902,8 @@ my_gen = d['gen1']() assert next(my_gen) == 42 stop_exc = StopIteration('spam') - e = raises(RuntimeError, my_gen.throw, StopIteration, stop_exc, None) + with raises(RuntimeError) as e: + my_gen.throw(StopIteration, stop_exc, None) assert e.value.__cause__ is stop_exc assert e.value.__context__ is stop_exc @@ -892,8 +913,9 @@ gen = d['gen1']() assert next(gen) == 1 - exc = raises(StopIteration, gen.send, (2,)) - assert exc.value.value == (2,) + with raises(StopIteration) as excinfo: + gen.send((2,)) + assert excinfo.value.value == (2,) def test_return_stopiteration(self): d = {} @@ -901,6 +923,7 @@ gen = d['gen1']() assert next(gen) == 1 - exc = raises(StopIteration, gen.send, StopIteration(2)) - assert isinstance(exc.value, StopIteration) - assert exc.value.value.value == 2 + with raises(StopIteration) as excinfo: + gen.send(StopIteration(2)) + assert isinstance(excinfo.value, StopIteration) + assert excinfo.value.value.value == 2 From pypy.commits at gmail.com Wed Dec 4 11:12:29 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 04 Dec 2019 08:12:29 -0800 (PST) Subject: [pypy-commit] pypy default: typo Message-ID: <5de7daed.1c69fb81.45272.b4e9@mx.google.com> Author: Matti Picus Branch: Changeset: r98231:94907f4adb70 Date: 2019-12-04 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/94907f4adb70/ Log: typo diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -28,7 +28,7 @@ # Always remove the cached files # Before translation this is done via "py.path.local(CACHE_DIR).remove()" print 'removing %s/rpython/_cache' % toplevel - shutil.rmtree('%s/rpython/_cache', ignore_errors=True) + shutil.rmtree('%s/rpython/_cache' % toplevel, ignore_errors=True) # Add toplevel repository dir to sys.path sys.path.insert(0, toplevel) import pytest From pypy.commits at gmail.com Wed Dec 4 11:12:31 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 04 Dec 2019 08:12:31 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into branch Message-ID: <5de7daef.1c69fb81.ce26f.09dc@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98232:5a724d59f277 Date: 2019-12-04 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5a724d59f277/ Log: merge default into branch diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -28,7 +28,7 @@ # Always remove the cached files # Before translation this is done via "py.path.local(CACHE_DIR).remove()" print 'removing %s/rpython/_cache' % toplevel - shutil.rmtree('%s/rpython/_cache', ignore_errors=True) + shutil.rmtree('%s/rpython/_cache' % toplevel, ignore_errors=True) # Add toplevel repository dir to sys.path sys.path.insert(0, toplevel) import pytest diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -800,7 +800,7 @@ DIRENT = rffi_platform.Struct('struct dirent', [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1)), ('d_ino', lltype.Signed)] - + [('d_type', rffi.INT)] if HAVE_D_TYPE else []) + + ([('d_type', rffi.INT)] if HAVE_D_TYPE else [])) if HAVE_D_TYPE: DT_UNKNOWN = rffi_platform.ConstantInteger('DT_UNKNOWN') DT_REG = rffi_platform.ConstantInteger('DT_REG') From pypy.commits at gmail.com Wed Dec 4 21:16:56 2019 From: pypy.commits at gmail.com (rlamy) Date: Wed, 04 Dec 2019 18:16:56 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Move app-level tests from test_generator.py to apptest_generator.py and simplify them Message-ID: <5de86898.1c69fb81.4f6b0.5c7f@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98233:17dfad3241f2 Date: 2019-12-05 01:48 +0000 http://bitbucket.org/pypy/pypy/changeset/17dfad3241f2/ Log: Move app-level tests from test_generator.py to apptest_generator.py and simplify them diff too long, truncating to 2000 out of 2660 lines diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/apptest_generator.py copy from pypy/interpreter/test/test_generator.py copy to pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,929 +1,857 @@ -class AppTestGenerator: +from pytest import raises, skip - def test_generator(self): - def f(): +def test_generator(): + def f(): + yield 1 + assert next(f()) == 1 + +def test_generator2(): + def f(): + yield 1 + g = f() + assert next(g) == 1 + with raises(StopIteration): + next(g) + +def test_attributes(): + def f(): + yield 1 + assert g.gi_running + g = f() + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + assert g.gi_frame is not None + assert not g.gi_running + next(g) + assert not g.gi_running + with raises(StopIteration): + next(g) + assert not g.gi_running + assert g.gi_frame is None + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + +def test_generator3(): + def f(): + yield 1 + g = f() + assert list(g) == [1] + +def test_generator4(): + def f(): + yield 1 + g = f() + assert [x for x in g] == [1] + +def test_generator5(): + def f(): + v = (yield) + yield v + g = f() + next(g) + assert g.send(42) == 42 + +def test_throw1(): + def f(): + yield 2 + g = f() + # two arguments version + with raises(NameError): + g.throw(NameError, "Error") + +def test_throw2(): + def f(): + yield 2 + g = f() + # single argument version + with raises(NameError): + g.throw(NameError("Error")) + +def test_throw3(): + def f(): + try: yield 1 - assert next(f()) == 1 + yield 2 + except NameError: + yield 3 + g = f() + assert next(g) == 1 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + next(g) - def test_generator2(self): - def f(): +def test_throw4(): + def f(): + try: yield 1 - g = f() - assert next(g) == 1 - with raises(StopIteration): - next(g) + v = (yield 2) + except NameError: + yield 3 + g = f() + assert next(g) == 1 + assert next(g) == 2 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + next(g) - def test_attributes(self): - def f(): +def test_throw5(): + def f(): + try: yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running + except Exception: + x = 3 + try: + yield x + except Exception: + pass + g = f() + next(g) + # String exceptions are not allowed anymore + with raises(TypeError): + g.throw("Error") + assert g.throw(Exception) == 3 + with raises(StopIteration): + g.throw(Exception) + +def test_throw6(): + def f(): + yield 2 + g = f() + with raises(NameError): + g.throw(NameError, "Error", None) + + +def test_throw_fail(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), "error") + +def test_throw_fail2(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(list()) + +def test_throw_fail3(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") + +def test_throw_finishes_generator(): + def f(): + yield 1 + g = f() + assert g.gi_frame is not None + with raises(ValueError): + g.throw(ValueError) + assert g.gi_frame is None + +def test_throw_bug(): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + +def test_throw_on_finished_generator(): + def f(): + yield 1 + g = f() + res = next(g) + assert res == 1 + with raises(StopIteration): next(g) - assert not g.gi_running - with raises(StopIteration): - next(g) - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' + with raises(NameError): + g.throw(NameError) - def test_generator3(self): - def f(): +def test_throw_tb(): + def f(): + try: + yield + except ZeroDivisionError: + raise + g = f() + try: + 1 / 0 + except ZeroDivisionError as v: + try: + g.throw(v) + except Exception as w: + tb = w.__traceback__ + levels = 0 + while tb: + levels += 1 + tb = tb.tb_next + assert levels == 3 + +def test_throw_context(): + # gen.throw(exc) must not modify exc.__context__ + def gen(): + try: + yield + except Exception: + raise ValueError + + try: + raise KeyError + except KeyError: + g = gen() + next(g) + exc1 = Exception(1) + exc2 = Exception(2) + exc2.__context__ = exc1 + try: + g.throw(exc2) + except ValueError: + assert exc2.__context__ is exc1 + +def test_close(): + def f(): + yield 1 + g = f() + assert g.close() is None + +def test_close2(): + def f(): + try: yield 1 - g = f() - assert list(g) == [1] + except GeneratorExit: + raise StopIteration + g = f() + next(g) + assert g.close() is None - def test_generator4(self): - def f(): +def test_close3(): + def f(): + try: yield 1 - g = f() - assert [x for x in g] == [1] + except GeneratorExit: + raise NameError + g = f() + next(g) + with raises(NameError): + g.close() - def test_generator5(self): - d = {} - exec("""if 1: - def f(): - v = (yield ) - yield v - g = f() +def test_close_fail(): + def f(): + try: + yield 1 + except GeneratorExit: + yield 2 + g = f() + next(g) + with raises(RuntimeError): + g.close() + +def test_close_on_collect(): + import gc + def f(): + try: + yield + finally: + f.x = 42 + g = f() + next(g) + del g + gc.collect() + assert f.x == 42 + +def test_generator_raises_typeerror(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None + +def test_generator_explicit_stopiteration(): + def f(): + yield 1 + raise StopIteration + g = f() + assert [x for x in g] == [1] + +def test_generator_propagate_stopiteration(): + def f(): + it = iter([1]) + while 1: + yield next(it) + g = f() + assert [x for x in g] == [1] + +def test_generator_restart(): + def g(): + i = next(me) + yield i + me = g() + with raises(ValueError): + next(me) + +def test_generator_expression(): + d = {} + exec("res = sum(i*i for i in range(5))", d, d) + assert d['res'] == 30 + +def test_generator_expression_2(): + def f(): + total = sum(i for i in [x for x in z]) + return total + z = [1, 2, 7] + assert f() == 10 + +def test_repr(): + def myFunc(): + yield 1 + g = myFunc() + r = repr(g) + assert r.startswith(".myFunc at 0x") + assert list(g) == [1] + assert repr(g) == r + +def test_unpackiterable_gen(): + g = (i * i for i in range(-5, 3)) + assert set(g) == set([0, 1, 4, 9, 16, 25]) + assert set(g) == set() + assert set(i for i in range(0)) == set() + +def test_explicit_stop_iteration_unpackiterable(): + def f(): + yield 1 + raise StopIteration + assert tuple(f()) == (1,) + +def test_exception_is_cleared_by_yield(): + def f(): + try: + foobar + except NameError: + yield 5 + raise + gen = f() + next(gen) # --> 5 + try: + next(gen) + except NameError: + pass + +def test_yield_return(): + def f(): + yield 1 + return 2 + g = f() + assert next(g) == 1 + try: next(g) - """, d, d) - g = d['g'] - assert g.send(42) == 42 + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - with raises(NameError): - g.throw(NameError, "Error") +def test_yield_from_basic(): + def f1(): + yield from [] + yield from [1, 2, 3] + yield from f2() + def f2(): + yield 4 + yield 5 + gen = f1() + assert next(gen) == 1 + assert next(gen) == 2 + assert next(gen) == 3 + assert next(gen) == 4 + assert next(gen) == 5 + assert list(gen) == [] - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - with raises(NameError): - g.throw(NameError("Error")) +def test_yield_from_return(): + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return 2 + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert next(g) == 1 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - next(g) +def test_yield_from_return_tuple(): + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return (1, 2) + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == (1, 2) + else: + assert False, 'Expected StopIteration' - def test_throw4(self): - d = {} - exec("""if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """, d, d) - g = d['g'] - assert next(g) == 1 - assert next(g) == 2 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - next(g) +def test_set_name_qualname(): + class A: + def f(self): + yield 5 + g = A().f() + assert g.__name__ == "f" + assert g.__qualname__ == "test_set_name_qualname..A.f" + g.__name__ = "h.i" + g.__qualname__ = "j.k" + assert g.__name__ == "h.i" + assert g.__qualname__ == "j.k" + with raises(TypeError): + g.__name__ = 42 + with raises(TypeError): + g.__qualname__ = 42 + with raises((TypeError, AttributeError)): + del g.__name__ + with raises((TypeError, AttributeError)): + del g.__qualname__ - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - next(g) - # String exceptions are not allowed anymore - with raises(TypeError): - g.throw("Error") - assert g.throw(Exception) == 3 - with raises(StopIteration): - g.throw(Exception) +def test_gi_yieldfrom(): + def g(x): + assert gen.gi_yieldfrom is None + yield x + assert gen.gi_yieldfrom is None + def f(x): + assert gen.gi_yieldfrom is None + yield from g(x) + assert gen.gi_yieldfrom is None + yield 42 + assert gen.gi_yieldfrom is None + gen = f(5) + assert gen.gi_yieldfrom is None + assert next(gen) == 5 + assert gen.gi_yieldfrom.__name__ == 'g' + assert next(gen) == 42 + assert gen.gi_yieldfrom is None - def test_throw6(self): - def f(): - yield 2 - g = f() - with raises(NameError): - g.throw(NameError, "Error", None) +def test_gi_running_in_throw_generatorexit(): + # We must force gi_running to be True on the outer generators + # when running an inner custom close() method. + class A: + def __iter__(self): + return self + def __next__(self): + return 42 + def close(self): + closed.append(gen.gi_running) + def g(): + yield from A() + gen = g() + assert next(gen) == 42 + closed = [] + with raises(GeneratorExit): + gen.throw(GeneratorExit) + assert closed == [True] - - def test_throw_fail(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), "error") - - def test_throw_fail2(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(list()) - - def test_throw_fail3(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), None, "not tb object") - - def test_throw_finishes_generator(self): - def f(): - yield 1 - g = f() - assert g.gi_frame is not None - with raises(ValueError): - g.throw(ValueError) - assert g.gi_frame is None - - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = next(g) - assert res == 1 - with raises(StopIteration): - next(g) - with raises(NameError): - g.throw(NameError) - - def test_throw_tb(self): - def f(): - try: - yield - except: - raise - g = f() - try: - 1/0 - except ZeroDivisionError as v: - try: - g.throw(v) - except Exception as w: - tb = w.__traceback__ - levels = 0 - while tb: - levels += 1 - tb = tb.tb_next - assert levels == 3 - - def test_throw_context(self): - # gen.throw(exc) must not modify exc.__context__ - def gen(): - try: - yield - except: - raise ValueError - - try: - raise KeyError - except KeyError: - g = gen() - next(g) - exc1 = Exception(1) - exc2 = Exception(2) - exc2.__context__ = exc1 - try: - g.throw(exc2) - except ValueError: - assert exc2.__context__ is exc1 - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - next(g) - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - next(g) - with raises(NameError): - g.close() - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - next(g) - with raises(RuntimeError): - g.close() - - def test_close_on_collect(self): - import gc - def f(): - try: - yield - finally: - f.x = 42 - g = f() - next(g) - del g - gc.collect() - assert f.x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.send() # one argument required - with raises(TypeError): - g.send(1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield next(it) - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = next(me) - yield i - me = g() - with raises(ValueError): - next(me) - - def test_generator_expression(self): - d = {} - exec("res = sum(i*i for i in range(5))", d, d) - assert d['res'] == 30 - - def test_generator_expression_2(self): - d = {} - exec(""" -def f(): - total = sum(i for i in [x for x in z]) - return total -z = [1, 2, 7] -res = f() -""", d, d) - assert d['res'] == 10 - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(".myFunc at 0x") - assert list(g) == [1] - assert repr(g) == r - - def test_unpackiterable_gen(self): - g = (i*i for i in range(-5, 3)) - assert set(g) == set([0, 1, 4, 9, 16, 25]) - assert set(g) == set() - assert set(i for i in range(0)) == set() - - def test_explicit_stop_iteration_unpackiterable(self): - def f(): - yield 1 - raise StopIteration - assert tuple(f()) == (1,) - - def test_exception_is_cleared_by_yield(self): - def f(): - try: - foobar - except NameError: - yield 5 - raise - gen = f() - next(gen) # --> 5 - try: - next(gen) - except NameError: - pass - - def test_yield_return(self): - """ - def f(): - yield 1 - return 2 - g = f() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == 2 - else: - assert False, 'Expected StopIteration' - """ - - def test_yield_from_basic(self): - """ - def f1(): - yield from [] - yield from [1, 2, 3] - yield from f2() - def f2(): - yield 4 - yield 5 - gen = f1() - assert next(gen) == 1 - assert next(gen) == 2 - assert next(gen) == 3 - assert next(gen) == 4 - assert next(gen) == 5 - assert list(gen) == [] - """ - - def test_yield_from_return(self): - """ - def f1(): - result = yield from f2() - return result - def f2(): - yield 1 - return 2 - g = f1() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == 2 - else: - assert False, 'Expected StopIteration' - """ - - def test_yield_from_return_tuple(self): - """ - def f1(): - result = yield from f2() - return result - def f2(): - yield 1 - return (1, 2) - g = f1() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == (1, 2) - else: - assert False, 'Expected StopIteration' - """ - - def test_set_name_qualname(self): - class A: - def f(self): - yield 5 - g = A().f() - assert g.__name__ == "f" - assert g.__qualname__ == "test_set_name_qualname..A.f" - g.__name__ = "h.i" - g.__qualname__ = "j.k" - assert g.__name__ == "h.i" - assert g.__qualname__ == "j.k" - with raises(TypeError): - g.__name__ = 42 - with raises(TypeError): - g.__qualname__ = 42 - with raises((TypeError, AttributeError)): - del g.__name__ - with raises((TypeError, AttributeError)): - del g.__qualname__ - - def test_gi_yieldfrom(self): """ - def g(x): - assert gen.gi_yieldfrom is None - yield x - assert gen.gi_yieldfrom is None - def f(x): - assert gen.gi_yieldfrom is None - yield from g(x) - assert gen.gi_yieldfrom is None - yield 42 - assert gen.gi_yieldfrom is None - gen = f(5) - assert gen.gi_yieldfrom is None - assert next(gen) == 5 - assert gen.gi_yieldfrom.__name__ == 'g' - assert next(gen) == 42 - assert gen.gi_yieldfrom is None - """ - - def test_gi_running_in_throw_generatorexit(self): """ - # We must force gi_running to be True on the outer generators - # when running an inner custom close() method. - class A: - def __iter__(self): - return self - def __next__(self): - return 42 - def close(self): - closed.append(gen.gi_running) - def g(): - yield from A() - gen = g() - assert next(gen) == 42 - closed = [] - with raises(GeneratorExit): - gen.throw(GeneratorExit) - assert closed == [True] - """ - - def test_exc_info_in_generator(self): - import sys - def g(): - try: - raise ValueError - except ValueError: - yield sys.exc_info()[0] - yield sys.exc_info()[0] - try: - raise IndexError - except IndexError: - gen = g() - assert sys.exc_info()[0] is IndexError - assert next(gen) is ValueError - assert sys.exc_info()[0] is IndexError - assert next(gen) is ValueError - assert sys.exc_info()[0] is IndexError - with raises(StopIteration): - next(gen) - assert sys.exc_info()[0] is IndexError - - def test_exc_info_in_generator_2(self): - import sys - def g(): - yield sys.exc_info()[0] - try: - raise LookupError - except LookupError: - yield sys.exc_info()[0] - yield sys.exc_info()[0] - try: - raise IndexError - except IndexError: - gen = g() # the IndexError is not captured at all +def test_exc_info_in_generator(): + import sys + def g(): try: raise ValueError except ValueError: - assert next(gen) is ValueError - assert next(gen) is LookupError - assert next(gen) is ValueError + yield sys.exc_info()[0] + yield sys.exc_info()[0] + try: + raise IndexError + except IndexError: + gen = g() + assert sys.exc_info()[0] is IndexError + assert next(gen) is ValueError + assert sys.exc_info()[0] is IndexError + assert next(gen) is ValueError + assert sys.exc_info()[0] is IndexError + with raises(StopIteration): + next(gen) + assert sys.exc_info()[0] is IndexError - def test_exc_info_in_generator_3(self): - import sys - def g(): +def test_exc_info_in_generator_2(): + import sys + def g(): + yield sys.exc_info()[0] + try: + raise LookupError + except LookupError: yield sys.exc_info()[0] - yield sys.exc_info()[0] - yield sys.exc_info()[0] - gen = g() - try: - raise IndexError - except IndexError: - assert next(gen) is IndexError - assert next(gen) is None + yield sys.exc_info()[0] + try: + raise IndexError + except IndexError: + gen = g() # the IndexError is not captured at all + try: + raise ValueError + except ValueError: + assert next(gen) is ValueError + assert next(gen) is LookupError + assert next(gen) is ValueError + +def test_exc_info_in_generator_3(): + import sys + def g(): + yield sys.exc_info()[0] + yield sys.exc_info()[0] + yield sys.exc_info()[0] + gen = g() + try: + raise IndexError + except IndexError: + assert next(gen) is IndexError + assert next(gen) is None + try: + raise ValueError + except ValueError: + assert next(gen) is ValueError + +def test_exc_info_in_generator_4(): + skip("buggy behavior, both in CPython and in PyPy") + import sys + def g(): try: raise ValueError except ValueError: - assert next(gen) is ValueError + yield 1 + assert sys.exc_info() == (None, None, None) + yield 2 + gen = g() + try: + raise IndexError + except IndexError: + assert next(gen) is 1 + assert next(gen) is 2 - def test_exc_info_in_generator_4(self): - skip("buggy behavior, both in CPython and in PyPy") - import sys - def g(): - try: - raise ValueError - except ValueError: - yield 1 - assert sys.exc_info() == (None, None, None) - yield 2 - gen = g() +def test_multiple_invalid_sends(): + def mygen(): + yield 42 + g = mygen() + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) + +def test_delegating_close(): + """ + Test delegating 'close' + """ + trace = [] + def g1(): try: - raise IndexError - except IndexError: - assert next(gen) is 1 - assert next(gen) is 2 + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + g.close() + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1" + ] - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - with raises(TypeError): - g.send(2) - with raises(TypeError): - g.send(2) +def test_handing_exception_while_delegating_close(): + """ + Test handling exception while delegating 'close' + """ + trace = [] + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + raise ValueError("nybbles have exploded with delight") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + with raises(ValueError) as excinfo: + g.close() + assert excinfo.value.args[0] == "nybbles have exploded with delight" + assert isinstance(excinfo.value.__context__, GeneratorExit) + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1", + ] +def test_delegating_throw(): + """ + Test delegating 'throw' + """ + trace = [] + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + e = ValueError("tomato ejected") + with raises(ValueError) as excinfo: + g.throw(e) + assert excinfo.value.args[0] == "tomato ejected" + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1", + ] -def test_should_not_inline(space): - from pypy.interpreter.generator import should_not_inline - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - return g.__code__ - ''') - assert should_not_inline(w_co) == False - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - yield x + 6 - return g.__code__ - ''') - assert should_not_inline(w_co) == True +def test_delegating_throw_to_non_generator(): + """ + Test delegating 'throw' to non-generator + """ + trace = [] + def g(): + try: + trace.append("Starting g") + yield from range(10) + finally: + trace.append("Finishing g") + gi = g() + for i in range(5): + x = next(gi) + trace.append("Yielded %s" % (x,)) + with raises(ValueError) as excinfo: + gi.throw(ValueError("tomato ejected")) + assert excinfo.value.args[0] == "tomato ejected" + assert trace == [ + "Starting g", + "Yielded 0", + "Yielded 1", + "Yielded 2", + "Yielded 3", + "Yielded 4", + "Finishing g", + ] -class AppTestYieldFrom: - def test_delegating_close(self): - """ - Test delegating 'close' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - g.close() - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1" - ] +def test_broken_getattr_handling(): + """ + Test subiterator with a broken getattr implementation + """ + import _io, sys + class Broken: + def __iter__(self): + return self + def __next__(self): + return 1 + def __getattr__(self, attr): + 1 / 0 - def test_handing_exception_while_delegating_close(self): - """ - Test handling exception while delegating 'close' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - raise ValueError("nybbles have exploded with delight") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - with raises(ValueError) as excinfo: - g.close() - assert excinfo.value.args[0] == "nybbles have exploded with delight" - assert isinstance(excinfo.value.__context__, GeneratorExit) - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1", - ] + def g(): + yield from Broken() - def test_delegating_throw(self): - """ - Test delegating 'throw' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - e = ValueError("tomato ejected") - with raises(ValueError) as excinfo: - g.throw(e) - assert excinfo.value.args[0] == "tomato ejected" - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1", - ] + gi = g() + assert next(gi) == 1 + with raises(ZeroDivisionError): + gi.send(1) - def test_delegating_throw_to_non_generator(self): - """ - Test delegating 'throw' to non-generator - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g(): - try: - trace.append("Starting g") - yield from range(10) - finally: - trace.append("Finishing g") - ''', d) - g = d['g'] - gi = g() - for i in range(5): - x = next(gi) - trace.append("Yielded %s" % (x,)) - with raises(ValueError) as excinfo: - gi.throw(ValueError("tomato ejected")) - assert excinfo.value.args[0] == "tomato ejected" - assert trace == [ - "Starting g", - "Yielded 0", - "Yielded 1", - "Yielded 2", - "Yielded 3", - "Yielded 4", - "Finishing g", - ] + gi = g() + assert next(gi) == 1 + with raises(ZeroDivisionError): + gi.throw(RuntimeError) - def test_broken_getattr_handling(self): - """ - Test subiterator with a broken getattr implementation - """ - class Broken: - def __iter__(self): - return self - def __next__(self): - return 1 - def __getattr__(self, attr): - 1/0 + gi = g() + assert next(gi) == 1 + sys.stderr = _io.StringIO() + gi.close() + assert 'ZeroDivisionError' in sys.stderr.getvalue() - d = dict(Broken=Broken) - exec('''if 1: - def g(): - yield from Broken() - ''', d) - g = d['g'] +def test_returning_value_from_delegated_throw(): + """ + Test returning value from delegated 'throw' + """ + trace = [] + class LunchError(Exception): + pass + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + except LunchError: + trace.append("Caught LunchError in g2") + yield "g2 lunch saved" + yield "g2 yet more spam" + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + e = LunchError("tomato ejected") + g.throw(e) + for x in g: + trace.append("Yielded %s" % (x,)) + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Caught LunchError in g2", + "Yielded g2 yet more spam", + "Yielded g1 eggs", + "Finishing g1", + ] - gi = g() - assert next(gi) == 1 - with raises(ZeroDivisionError): - gi.send(1) +def test_catching_exception_from_subgen_and_returning(): + """ + Test catching an exception thrown into a + subgenerator and returning a value + """ + trace = [] + def inner(): + try: + yield 1 + except ValueError: + trace.append("inner caught ValueError") + return 2 - gi = g() - assert next(gi) == 1 - with raises(ZeroDivisionError): - gi.throw(RuntimeError) + def outer(): + v = yield from inner() + trace.append("inner returned %r to outer" % v) + yield v + g = outer() + trace.append(next(g)) + trace.append(g.throw(ValueError)) + assert trace == [ + 1, + "inner caught ValueError", + "inner returned 2 to outer", + 2, + ] - gi = g() - assert next(gi) == 1 - import _io, sys - sys.stderr = _io.StringIO() - gi.close() - assert 'ZeroDivisionError' in sys.stderr.getvalue() +def test_exception_context(): + import operator + def f(): + try: + raise ValueError + except ValueError: + yield from map(operator.truediv, [2, 3], [4, 0]) + gen = f() + assert next(gen) == 0.5 + try: + next(gen) + except ZeroDivisionError as e: + assert e.__context__ is not None + assert isinstance(e.__context__, ValueError) + else: + assert False, "should have raised" - def test_returning_value_from_delegated_throw(self): - """ - Test returning value from delegated 'throw' - """ - trace = [] - class LunchError(Exception): - pass - d = dict(trace=trace, LunchError=LunchError) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - except LunchError: - trace.append("Caught LunchError in g2") - yield "g2 lunch saved" - yield "g2 yet more spam" - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - e = LunchError("tomato ejected") - g.throw(e) - for x in g: - trace.append("Yielded %s" % (x,)) - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Caught LunchError in g2", - "Yielded g2 yet more spam", - "Yielded g1 eggs", - "Finishing g1", - ] - def test_catching_exception_from_subgen_and_returning(self): - """ - Test catching an exception thrown into a - subgenerator and returning a value - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def inner(): - try: - yield 1 - except ValueError: - trace.append("inner caught ValueError") - return 2 +def test_past_generator_stop(): + # how it works without 'from __future__' import generator_stop + def f(x): + raise StopIteration + yield x + with raises(StopIteration): + next(f(5)) - def outer(): - v = yield from inner() - trace.append("inner returned %r to outer" % v) - yield v - ''', d) - inner, outer = d['inner'], d['outer'] - g = outer() - trace.append(next(g)) - trace.append(g.throw(ValueError)) - assert trace == [ - 1, - "inner caught ValueError", - "inner returned 2 to outer", - 2, - ] - - def test_exception_context(self): """ - import operator - def f(): - try: - raise ValueError - except ValueError: - yield from map(operator.truediv, [2, 3], [4, 0]) - gen = f() - assert next(gen) == 0.5 - try: - next(gen) - except ZeroDivisionError as e: - assert e.__context__ is not None - assert isinstance(e.__context__, ValueError) - else: - assert False, "should have raised" - """ - - -class AppTestGeneratorStop: - - def test_past_generator_stop(self): - # how it works without 'from __future__' import generator_stop - def f(x): - raise StopIteration - yield x - with raises(StopIteration): - next(f(5)) - - def test_future_generator_stop(self): - d = {} - exec("""from __future__ import generator_stop +def test_future_generator_stop(): + d = {} + exec("""from __future__ import generator_stop def f(x): raise StopIteration yield x """, d) - f = d['f'] - with raises(RuntimeError): - next(f(5)) + f = d['f'] + with raises(RuntimeError): + next(f(5)) - def test_generator_stop_cause(self): - d = {} - exec("""from __future__ import generator_stop +def test_generator_stop_cause(): + d = {} + exec("""from __future__ import generator_stop def gen1(): yield 42 """, d) - my_gen = d['gen1']() - assert next(my_gen) == 42 - stop_exc = StopIteration('spam') - with raises(RuntimeError) as e: - my_gen.throw(StopIteration, stop_exc, None) - assert e.value.__cause__ is stop_exc - assert e.value.__context__ is stop_exc + my_gen = d['gen1']() + assert next(my_gen) == 42 + stop_exc = StopIteration('spam') + with raises(RuntimeError) as e: + my_gen.throw(StopIteration, stop_exc, None) + assert e.value.__cause__ is stop_exc + assert e.value.__context__ is stop_exc - def test_return_tuple(self): - d = {} - exec("def gen1(): return (yield 1)", d) +def test_return_tuple(): + def gen1(): + return (yield 1) + gen = gen1() + assert next(gen) == 1 + with raises(StopIteration) as excinfo: + gen.send((2,)) + assert excinfo.value.value == (2,) - gen = d['gen1']() - assert next(gen) == 1 - with raises(StopIteration) as excinfo: - gen.send((2,)) - assert excinfo.value.value == (2,) - - def test_return_stopiteration(self): - d = {} - exec("def gen1(): return (yield 1)", d) - - gen = d['gen1']() - assert next(gen) == 1 - with raises(StopIteration) as excinfo: - gen.send(StopIteration(2)) - assert isinstance(excinfo.value, StopIteration) - assert excinfo.value.value.value == 2 +def test_return_stopiteration(): + def gen1(): + return (yield 1) + gen = gen1() + assert next(gen) == 1 + with raises(StopIteration) as excinfo: + gen.send(StopIteration(2)) + assert isinstance(excinfo.value, StopIteration) + assert excinfo.value.value.value == 2 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -1,572 +1,3 @@ -class AppTestGenerator: - - def test_generator(self): - def f(): - yield 1 - assert next(f()) == 1 - - def test_generator2(self): - def f(): - yield 1 - g = f() - assert next(g) == 1 - with raises(StopIteration): - next(g) - - def test_attributes(self): - def f(): - yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running - next(g) - assert not g.gi_running - with raises(StopIteration): - next(g) - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - - def test_generator3(self): - def f(): - yield 1 - g = f() - assert list(g) == [1] - - def test_generator4(self): - def f(): - yield 1 - g = f() - assert [x for x in g] == [1] - - def test_generator5(self): - d = {} - exec("""if 1: - def f(): - v = (yield ) - yield v - g = f() - next(g) - """, d, d) - g = d['g'] - assert g.send(42) == 42 - - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - with raises(NameError): - g.throw(NameError, "Error") - - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - with raises(NameError): - g.throw(NameError("Error")) - - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert next(g) == 1 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - next(g) - - def test_throw4(self): - d = {} - exec("""if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """, d, d) - g = d['g'] - assert next(g) == 1 - assert next(g) == 2 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - next(g) - - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - next(g) - # String exceptions are not allowed anymore - with raises(TypeError): - g.throw("Error") - assert g.throw(Exception) == 3 - with raises(StopIteration): - g.throw(Exception) - - def test_throw6(self): - def f(): - yield 2 - g = f() - with raises(NameError): - g.throw(NameError, "Error", None) - - - def test_throw_fail(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), "error") - - def test_throw_fail2(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(list()) - - def test_throw_fail3(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), None, "not tb object") - - def test_throw_finishes_generator(self): - def f(): - yield 1 - g = f() - assert g.gi_frame is not None - with raises(ValueError): - g.throw(ValueError) - assert g.gi_frame is None - - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = next(g) - assert res == 1 - with raises(StopIteration): - next(g) - with raises(NameError): - g.throw(NameError) - - def test_throw_tb(self): - def f(): - try: - yield - except: - raise - g = f() - try: - 1/0 - except ZeroDivisionError as v: - try: - g.throw(v) - except Exception as w: - tb = w.__traceback__ - levels = 0 - while tb: - levels += 1 - tb = tb.tb_next - assert levels == 3 - - def test_throw_context(self): - # gen.throw(exc) must not modify exc.__context__ - def gen(): - try: - yield - except: - raise ValueError - - try: - raise KeyError - except KeyError: - g = gen() - next(g) - exc1 = Exception(1) - exc2 = Exception(2) - exc2.__context__ = exc1 - try: - g.throw(exc2) - except ValueError: - assert exc2.__context__ is exc1 - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - next(g) - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - next(g) - with raises(NameError): - g.close() - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - next(g) - with raises(RuntimeError): - g.close() - From pypy.commits at gmail.com Wed Dec 4 21:16:59 2019 From: pypy.commits at gmail.com (rlamy) Date: Wed, 04 Dec 2019 18:16:59 -0800 (PST) Subject: [pypy-commit] pypy default: Move app-level tests from test_generator.py to apptest_generator.py Message-ID: <5de8689b.1c69fb81.7219e.ce11@mx.google.com> Author: Ronan Lamy Branch: Changeset: r98234:72f3423c43e5 Date: 2019-12-05 02:13 +0000 http://bitbucket.org/pypy/pypy/changeset/72f3423c43e5/ Log: Move app-level tests from test_generator.py to apptest_generator.py diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/apptest_generator.py copy from pypy/interpreter/test/test_generator.py copy to pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,339 +1,311 @@ -class AppTestGenerator: +from pytest import raises, skip - def test_generator(self): - def f(): +def test_generator(): + def f(): + yield 1 + assert f().next() == 1 + +def test_generator2(): + def f(): + yield 1 + g = f() + assert g.next() == 1 + with raises(StopIteration): + g.next() + +def test_attributes(): + def f(): + yield 1 + assert g.gi_running + g = f() + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + assert g.gi_frame is not None + assert not g.gi_running + g.next() + assert not g.gi_running + with raises(StopIteration): + g.next() + assert not g.gi_running + assert g.gi_frame is None + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + +def test_generator3(): + def f(): + yield 1 + g = f() + assert list(g) == [1] + +def test_generator4(): + def f(): + yield 1 + g = f() + assert [x for x in g] == [1] + +def test_generator5(): + def f(): + v = (yield) + yield v + g = f() + g.next() + assert g.send(42) == 42 + +def test_throw1(): + def f(): + yield 2 + g = f() + # two arguments version + with raises(NameError): + g.throw(NameError, "Error") + +def test_throw2(): + def f(): + yield 2 + g = f() + # single argument version + with raises(NameError): + g.throw(NameError("Error")) + +def test_throw3(): + def f(): + try: yield 1 - assert f().next() == 1 + yield 2 + except NameError: + yield 3 + g = f() + assert g.next() == 1 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + g.next() - def test_generator2(self): - def f(): +def test_throw4(): + def f(): + try: yield 1 - g = f() - assert g.next() == 1 - with raises(StopIteration): - g.next() + v = (yield 2) + except NameError: + yield 3 + g = f() + assert g.next() == 1 + assert g.next() == 2 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + g.next() - def test_attributes(self): - def f(): +def test_throw5(): + def f(): + try: yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running + except Exception: + x = 3 + try: + yield x + except Exception: + pass + g = f() + g.next() + # String exceptions are not allowed anymore + with raises(TypeError): + g.throw("Error") + assert g.throw(Exception) == 3 + with raises(StopIteration): + g.throw(Exception) + +def test_throw6(): + def f(): + yield 2 + g = f() + with raises(NameError): + g.throw(NameError, "Error", None) + + +def test_throw_fail(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), "error") + +def test_throw_fail2(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(list()) + +def test_throw_fail3(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") + +def test_throw_finishes_generator(): + def f(): + yield 1 + g = f() + assert g.gi_frame is not None + with raises(ValueError): + g.throw(ValueError) + assert g.gi_frame is None + +def test_throw_bug(): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + +def test_throw_on_finished_generator(): + def f(): + yield 1 + g = f() + res = g.next() + assert res == 1 + with raises(StopIteration): g.next() - assert not g.gi_running - with raises(StopIteration): - g.next() - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' + with raises(NameError): + g.throw(NameError) - def test_generator3(self): - def f(): +def test_close(): + def f(): + yield 1 + g = f() + assert g.close() is None + +def test_close2(): + def f(): + try: yield 1 - g = f() - assert list(g) == [1] + except GeneratorExit: + raise StopIteration + g = f() + g.next() + assert g.close() is None - def test_generator4(self): - def f(): +def test_close3(): + def f(): + try: yield 1 - g = f() - assert [x for x in g] == [1] + except GeneratorExit: + raise NameError + g = f() + g.next() + with raises(NameError): + g.close() - def test_generator5(self): - d = {} - exec """if 1: - def f(): - v = (yield ) - yield v - g = f() - g.next() - """ in d - g = d['g'] - assert g.send(42) == 42 +def test_close_fail(): + def f(): + try: + yield 1 + except GeneratorExit: + yield 2 + g = f() + g.next() + with raises(RuntimeError): + g.close() - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - with raises(NameError): - g.throw(NameError, "Error") +def test_close_on_collect(): + import gc + def f(): + try: + yield + finally: + f.x = 42 + g = f() + g.next() + del g + gc.collect() + assert f.x == 42 - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - with raises(NameError): - g.throw(NameError("Error")) +def test_generator_raises_typeerror(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert g.next() == 1 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - g.next() +def test_generator_explicit_stopiteration(): + def f(): + yield 1 + raise StopIteration + g = f() + assert [x for x in g] == [1] - def test_throw4(self): - d = {} - exec """if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """ in d - g = d['g'] - assert g.next() == 1 - assert g.next() == 2 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - g.next() +def test_generator_propagate_stopiteration(): + def f(): + it = iter([1]) + while 1: + yield it.next() + g = f() + assert [x for x in g] == [1] - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - g.next() - # String exceptions are not allowed anymore - with raises(TypeError): - g.throw("Error") - assert g.throw(Exception) == 3 - with raises(StopIteration): - g.throw(Exception) +def test_generator_restart(): + def g(): + i = me.next() + yield i + me = g() + with raises(ValueError): + me.next() - def test_throw6(self): - def f(): - yield 2 - g = f() - with raises(NameError): - g.throw(NameError, "Error", None) +def test_generator_expression(): + exec "res = sum(i*i for i in range(5))" + assert res == 30 +def test_generator_expression_2(): + def f(): + total = sum(i for i in [x for x in z]) + return total, x + z = [1, 2, 7] + assert f() == (10, 7) - def test_throw_fail(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), "error") +def test_repr(): + def myFunc(): + yield 1 + g = myFunc() + r = repr(g) + assert r.startswith(" 5 + try: + next(gen) + except TypeError: + pass - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = g.next() - assert res == 1 - with raises(StopIteration): - g.next() - with raises(NameError): - g.throw(NameError) - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - g.next() - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - g.next() - with raises(NameError): - g.close() - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - g.next() - with raises(RuntimeError): - g.close() - - def test_close_on_collect(self): - import gc - def f(): - try: - yield - finally: - f.x = 42 - g = f() - g.next() - del g - gc.collect() - assert f.x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.send() # one argument required - with raises(TypeError): - g.send(1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield it.next() - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = me.next() - yield i - me = g() - with raises(ValueError): - me.next() - - def test_generator_expression(self): - exec "res = sum(i*i for i in range(5))" - assert res == 30 - - def test_generator_expression_2(self): - d = {} - exec """ -def f(): - total = sum(i for i in [x for x in z]) - return total, x -z = [1, 2, 7] -res = f() -""" in d - assert d['res'] == (10, 7) - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(" 5 - try: - next(gen) - except TypeError: - pass - - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - with raises(TypeError): - g.send(2) - with raises(TypeError): - g.send(2) - - -def test_should_not_inline(space): - from pypy.interpreter.generator import should_not_inline - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - return g.__code__ - ''') - assert should_not_inline(w_co) == False - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - yield x + 6 - return g.__code__ - ''') - assert should_not_inline(w_co) == True +def test_multiple_invalid_sends(): + def mygen(): + yield 42 + g = mygen() + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -1,327 +1,3 @@ -class AppTestGenerator: - - def test_generator(self): - def f(): - yield 1 - assert f().next() == 1 - - def test_generator2(self): - def f(): - yield 1 - g = f() - assert g.next() == 1 - with raises(StopIteration): - g.next() - - def test_attributes(self): - def f(): - yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running - g.next() - assert not g.gi_running - with raises(StopIteration): - g.next() - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - - def test_generator3(self): - def f(): - yield 1 - g = f() - assert list(g) == [1] - - def test_generator4(self): - def f(): - yield 1 - g = f() - assert [x for x in g] == [1] - - def test_generator5(self): - d = {} - exec """if 1: - def f(): - v = (yield ) - yield v - g = f() - g.next() - """ in d - g = d['g'] - assert g.send(42) == 42 - - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - with raises(NameError): - g.throw(NameError, "Error") - - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - with raises(NameError): - g.throw(NameError("Error")) - - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert g.next() == 1 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - g.next() - - def test_throw4(self): - d = {} - exec """if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """ in d - g = d['g'] - assert g.next() == 1 - assert g.next() == 2 - assert g.throw(NameError("Error")) == 3 - with raises(StopIteration): - g.next() - - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - g.next() - # String exceptions are not allowed anymore - with raises(TypeError): - g.throw("Error") - assert g.throw(Exception) == 3 - with raises(StopIteration): - g.throw(Exception) - - def test_throw6(self): - def f(): - yield 2 - g = f() - with raises(NameError): - g.throw(NameError, "Error", None) - - - def test_throw_fail(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), "error") - - def test_throw_fail2(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(list()) - - def test_throw_fail3(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.throw(NameError("Error"), None, "not tb object") - - def test_throw_finishes_generator(self): - def f(): - yield 1 - g = f() - assert g.gi_frame is not None - with raises(ValueError): - g.throw(ValueError) - assert g.gi_frame is None - - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = g.next() - assert res == 1 - with raises(StopIteration): - g.next() - with raises(NameError): - g.throw(NameError) - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - g.next() - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - g.next() - with raises(NameError): - g.close() - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - g.next() - with raises(RuntimeError): - g.close() - - def test_close_on_collect(self): - import gc - def f(): - try: - yield - finally: - f.x = 42 - g = f() - g.next() - del g - gc.collect() - assert f.x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - with raises(TypeError): - g.send() # one argument required - with raises(TypeError): - g.send(1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield it.next() - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = me.next() - yield i - me = g() - with raises(ValueError): - me.next() - - def test_generator_expression(self): - exec "res = sum(i*i for i in range(5))" - assert res == 30 - - def test_generator_expression_2(self): - d = {} - exec """ -def f(): - total = sum(i for i in [x for x in z]) - return total, x -z = [1, 2, 7] -res = f() -""" in d - assert d['res'] == (10, 7) - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(" 5 - try: - next(gen) - except TypeError: - pass - - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - with raises(TypeError): - g.send(2) - with raises(TypeError): - g.send(2) - - def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline w_co = space.appexec([], '''(): From pypy.commits at gmail.com Wed Dec 4 21:17:00 2019 From: pypy.commits at gmail.com (rlamy) Date: Wed, 04 Dec 2019 18:17:00 -0800 (PST) Subject: [pypy-commit] pypy py3.6: dummy merge from default Message-ID: <5de8689c.1c69fb81.31b8b.e65a@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98235:94a7a1568272 Date: 2019-12-05 02:15 +0000 http://bitbucket.org/pypy/pypy/changeset/94a7a1568272/ Log: dummy merge from default From pypy.commits at gmail.com Thu Dec 5 07:50:28 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 05 Dec 2019 04:50:28 -0800 (PST) Subject: [pypy-commit] buildbot default: aarch64 has timeout failures Message-ID: <5de8fd14.1c69fb81.8a6b5.d078@mx.google.com> Author: Matti Picus Branch: Changeset: r1119:ad9098b06ba1 Date: 2019-12-05 14:49 +0200 http://bitbucket.org/pypy/buildbot/changeset/ad9098b06ba1/ Log: aarch64 has timeout failures diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -559,7 +559,7 @@ setup_steps(platform, self) - self.timeout=kwargs.get('timeout', 1000) + self.timeout=kwargs.get('timeout', 2000) nDays = '3' #str, not int if platform == 'win32': From pypy.commits at gmail.com Thu Dec 5 12:34:51 2019 From: pypy.commits at gmail.com (arigo) Date: Thu, 05 Dec 2019 09:34:51 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Add comment Message-ID: <5de93fbb.1c69fb81.ccb1a.3282@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r98236:21d58e234583 Date: 2019-12-05 18:34 +0100 http://bitbucket.org/pypy/pypy/changeset/21d58e234583/ Log: Add comment diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -208,6 +208,9 @@ def reinit_lock(self): # Called after fork() to ensure that newly created child # processes do not share locks with the parent + # (Note that this runs after interp_imp.acquire_lock() + # done in the "before" fork hook, so that's why we decrease + # the lockcounter here) if self.lockcounter > 1: # Forked as a side effect of import self.lock = self.space.allocate_lock() From pypy.commits at gmail.com Fri Dec 6 09:35:15 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 06:35:15 -0800 (PST) Subject: [pypy-commit] pypy default: move slow test to its own class and skip it Message-ID: <5dea6723.1c69fb81.d5ef9.5c29@mx.google.com> Author: Matti Picus Branch: Changeset: r98238:9c171d039841 Date: 2019-12-06 16:30 +0200 http://bitbucket.org/pypy/pypy/changeset/9c171d039841/ Log: move slow test to its own class and skip it diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -3,36 +3,6 @@ class AppTestThread(GenericTestThread): - def setup_class(cls): - GenericTestThread.setup_class.im_func(cls) - # if we cannot start more than, say, 1000 threads on this OS, then - # we can check that we get the proper error at app-level - space = cls.space - lock = thread.allocate_lock() - lock.acquire() - def f(): - lock.acquire() - lock.release() - start = thread._count() - try: - try: - for i in range(1000): - thread.start_new_thread(f, ()) - finally: - lock.release() - except (thread.error, MemoryError): - cls.w_can_start_many_threads = space.wrap(False) - else: - cls.w_can_start_many_threads = space.wrap(True) - # wait a bit to allow all threads to finish now - remaining = thread._count() - retries = 0 - while remaining > start: - retries += 1 - if retries == 200: - raise Exception("the test's threads don't stop!") - time.sleep(0.2) - remaining = thread._count() def test_start_new_thread(self): import thread @@ -189,35 +159,6 @@ assert done # see stderr for failures in threads assert sorted(lst) == range(120) - def test_many_threads(self): - import thread, time - if self.can_start_many_threads: - skip("this OS supports too many threads to check (> 1000)") - lock = thread.allocate_lock() - lock.acquire() - count = [0] - def f(): - count[0] += 1 - lock.acquire() - lock.release() - count[0] -= 1 - try: - try: - for i in range(1000): - thread.start_new_thread(f, ()) - finally: - lock.release() - # wait a bit to allow most threads to finish now - while count[0] > 10: - print count[0] # <- releases the GIL - print "ok." - except (thread.error, MemoryError): - pass - else: - raise Exception("could unexpectedly start 1000 threads") - # safety: check that we can start a new thread here - thread.start_new_thread(lambda: None, ()) - def test_stack_size(self): import thread thread.stack_size(0) @@ -256,3 +197,74 @@ waiting = [] thread.start_new_thread(f, ()) raises(KeyboardInterrupt, busy_wait) + + at pytest.mark.skip("too slow") +class _AppTestThread(GenericTestThread): + ''' + This test is very slow, do not run it by default. + ''' + def setup_class(cls): + GenericTestThread.setup_class.im_func(cls) + # if we cannot start more than, say, 1000 threads on this OS, then + # we can check that we get the proper error at app-level + space = cls.space + lock = thread.allocate_lock() + lock.acquire() + def f(): + lock.acquire() + lock.release() + start = thread._count() + try: + try: + for i in range(1000): + thread.start_new_thread(f, ()) + finally: + lock.release() + except (thread.error, MemoryError): + cls.w_can_start_many_threads = space.wrap(False) + else: + cls.w_can_start_many_threads = space.wrap(True) + # wait a bit to allow all threads to finish now + remaining = thread._count() + retries = 0 + while remaining > start: + retries += 1 + if retries == 200: + raise Exception("the test's threads don't stop!") + time.sleep(0.2) + remaining = thread._count() + + def test_many_threads(self): + import time, sys + if sys.version_info[0] < 3: + import thread as _thread + else: + import _thread + if self.can_start_many_threads or sys.platform == 'win32': + skip("this OS supports too many threads to check (> 1000)") + lock = _thread.allocate_lock() + lock.acquire() + count = [0] + def f(): + count[0] += 1 + lock.acquire() + lock.release() + count[0] -= 1 + try: + try: + for i in range(1000): + _thread.start_new_thread(f, ()) + finally: + lock.release() + # wait a bit to allow most threads to finish now + while count[0] > 10: + print(count[0]) # <- releases the GIL + print("ok.") + except (_thread.error, MemoryError): + pass + else: + raise Exception("could unexpectedly start 1000 threads") + # safety: check that we can start a new thread here + _thread.start_new_thread(lambda: None, ()) + + From pypy.commits at gmail.com Fri Dec 6 09:35:17 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 06:35:17 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into branch Message-ID: <5dea6725.1c69fb81.e8153.4a48@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98239:aa3b8c5bd232 Date: 2019-12-06 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/aa3b8c5bd232/ Log: merge default into branch diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -3,36 +3,6 @@ class AppTestThread(GenericTestThread): - def setup_class(cls): - GenericTestThread.setup_class.im_func(cls) - # if we cannot start more than, say, 1000 threads on this OS, then - # we can check that we get the proper error at app-level - space = cls.space - lock = thread.allocate_lock() - lock.acquire() - def f(): - lock.acquire() - lock.release() - start = thread._count() - try: - try: - for i in range(1000): - thread.start_new_thread(f, ()) - finally: - lock.release() - except (thread.error, MemoryError): - cls.w_can_start_many_threads = space.wrap(False) - else: - cls.w_can_start_many_threads = space.wrap(True) - # wait a bit to allow all threads to finish now - remaining = thread._count() - retries = 0 - while remaining > start: - retries += 1 - if retries == 200: - raise Exception("the test's threads don't stop!") - time.sleep(0.2) - remaining = thread._count() def test_thread_error(self): import _thread @@ -193,35 +163,6 @@ assert done # see stderr for failures in threads assert sorted(lst) == list(range(120)) - def test_many_threads(self): - import _thread, time, sys - if self.can_start_many_threads or sys.platform == 'win32': - skip("this OS supports too many threads to check (> 1000)") - lock = _thread.allocate_lock() - lock.acquire() - count = [0] - def f(): - count[0] += 1 - lock.acquire() - lock.release() - count[0] -= 1 - try: - try: - for i in range(1000): - _thread.start_new_thread(f, ()) - finally: - lock.release() - # wait a bit to allow most threads to finish now - while count[0] > 10: - print(count[0]) # <- releases the GIL - print("ok.") - except (_thread.error, MemoryError): - pass - else: - raise Exception("could unexpectedly start 1000 threads") - # safety: check that we can start a new thread here - _thread.start_new_thread(lambda: None, ()) - def test_stack_size(self): import _thread _thread.stack_size(0) @@ -260,3 +201,74 @@ waiting = [] _thread.start_new_thread(f, ()) raises(KeyboardInterrupt, busy_wait) + + at pytest.mark.skip("too slow") +class _AppTestThread(GenericTestThread): + ''' + This test is very slow, do not run it by default. + ''' + def setup_class(cls): + GenericTestThread.setup_class.im_func(cls) + # if we cannot start more than, say, 1000 threads on this OS, then + # we can check that we get the proper error at app-level + space = cls.space + lock = thread.allocate_lock() + lock.acquire() + def f(): + lock.acquire() + lock.release() + start = thread._count() + try: + try: + for i in range(1000): + thread.start_new_thread(f, ()) + finally: + lock.release() + except (thread.error, MemoryError): + cls.w_can_start_many_threads = space.wrap(False) + else: + cls.w_can_start_many_threads = space.wrap(True) + # wait a bit to allow all threads to finish now + remaining = thread._count() + retries = 0 + while remaining > start: + retries += 1 + if retries == 200: + raise Exception("the test's threads don't stop!") + time.sleep(0.2) + remaining = thread._count() + + def test_many_threads(self): + import time, sys + if sys.version_info[0] < 3: + import thread as _thread + else: + import _thread + if self.can_start_many_threads or sys.platform == 'win32': + skip("this OS supports too many threads to check (> 1000)") + lock = _thread.allocate_lock() + lock.acquire() + count = [0] + def f(): + count[0] += 1 + lock.acquire() + lock.release() + count[0] -= 1 + try: + try: + for i in range(1000): + _thread.start_new_thread(f, ()) + finally: + lock.release() + # wait a bit to allow most threads to finish now + while count[0] > 10: + print(count[0]) # <- releases the GIL + print("ok.") + except (_thread.error, MemoryError): + pass + else: + raise Exception("could unexpectedly start 1000 threads") + # safety: check that we can start a new thread here + _thread.start_new_thread(lambda: None, ()) + + From pypy.commits at gmail.com Fri Dec 6 09:35:19 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 06:35:19 -0800 (PST) Subject: [pypy-commit] pypy default: add missing import Message-ID: <5dea6727.1c69fb81.889bb.4418@mx.google.com> Author: Matti Picus Branch: Changeset: r98240:890c142fd3b8 Date: 2019-12-06 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/890c142fd3b8/ Log: add missing import diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -1,5 +1,6 @@ import thread, time from pypy.module.thread.test.support import GenericTestThread +import pytest class AppTestThread(GenericTestThread): From pypy.commits at gmail.com Fri Dec 6 09:35:21 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 06:35:21 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5dea6729.1c69fb81.55c47.8af9@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98241:d91c0d495118 Date: 2019-12-06 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/d91c0d495118/ Log: merge default into py3.6 diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -1,5 +1,6 @@ import thread, time from pypy.module.thread.test.support import GenericTestThread +import pytest class AppTestThread(GenericTestThread): From pypy.commits at gmail.com Fri Dec 6 10:33:31 2019 From: pypy.commits at gmail.com (antocuni) Date: Fri, 06 Dec 2019 07:33:31 -0800 (PST) Subject: [pypy-commit] pypy default: Use a cache to avoid parsing the same cdecl again and again, which is done Message-ID: <5dea74cb.1c69fb81.60f86.42dc@mx.google.com> Author: Antonio Cuni Branch: Changeset: r98242:317104f1b067 Date: 2019-12-06 16:32 +0100 http://bitbucket.org/pypy/pypy/changeset/317104f1b067/ Log: Use a cache to avoid parsing the same cdecl again and again, which is done e.g. for all the various cts.cast(...) around. On my machine, with this patch the time needed to run a single cpyext test goes from ~54s to ~29s diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -705,6 +705,7 @@ self.struct_typedefs = {} self._handled = set() self._frozen = False + self._cdecl_type_cache = {} # {cdecl: TYPE} cache if includes is not None: for header in includes: self.include(header) @@ -840,6 +841,14 @@ raise NotImplementedError def gettype(self, cdecl): + try: + return self._cdecl_type_cache[cdecl] + except KeyError: + result = self._real_gettype(cdecl) + self._cdecl_type_cache[cdecl] = result + return result + + def _real_gettype(self, cdecl): obj = self.ctx.parse_type(cdecl) result = self.convert_type(obj) if isinstance(result, DelayedStruct): From pypy.commits at gmail.com Sat Dec 7 00:14:39 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 21:14:39 -0800 (PST) Subject: [pypy-commit] pypy default: revert centos6 special case Message-ID: <5deb353f.1c69fb81.c7ba8.11bd@mx.google.com> Author: Matti Picus Branch: Changeset: r98244:acd5d36a7b4c Date: 2019-12-07 07:06 +0200 http://bitbucket.org/pypy/pypy/changeset/acd5d36a7b4c/ Log: revert centos6 special case diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -9,12 +9,6 @@ # various versions. For example it might not list -ltinfo even though # it's needed, or --cflags might be completely empty. Crap. -IS_CENTOS_6_10 = False -try: - with open('/etc/redhat-release') as fid: - for line in fid: - if 'CentOS release 6.10' in line: - IS_CENTOS_6_10 = True except IOError: pass @@ -38,8 +32,6 @@ library_dirs=['/usr/lib64']) def try_tools(): - if IS_CENTOS_6_10: - return try: yield ExternalCompilationInfo.from_pkg_config("ncurses") except Exception: From pypy.commits at gmail.com Sat Dec 7 00:14:41 2019 From: pypy.commits at gmail.com (mattip) Date: Fri, 06 Dec 2019 21:14:41 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into branch Message-ID: <5deb3541.1c69fb81.59205.0929@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98245:d4e5e044a1b2 Date: 2019-12-07 07:13 +0200 http://bitbucket.org/pypy/pypy/changeset/d4e5e044a1b2/ Log: merge default into branch diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -9,12 +9,6 @@ # various versions. For example it might not list -ltinfo even though # it's needed, or --cflags might be completely empty. Crap. -IS_CENTOS_6_10 = False -try: - with open('/etc/redhat-release') as fid: - for line in fid: - if 'CentOS release 6.10' in line: - IS_CENTOS_6_10 = True except IOError: pass @@ -38,8 +32,6 @@ library_dirs=['/usr/lib64']) def try_tools(): - if IS_CENTOS_6_10: - return try: yield ExternalCompilationInfo.from_pkg_config("ncurses") except Exception: From pypy.commits at gmail.com Sat Dec 7 12:33:11 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 09:33:11 -0800 (PST) Subject: [pypy-commit] buildbot default: typos Message-ID: <5debe257.1c69fb81.3f17d.da6f@mx.google.com> Author: Matti Picus Branch: Changeset: r1121:887ddad23b84 Date: 2019-12-07 19:32 +0200 http://bitbucket.org/pypy/buildbot/changeset/887ddad23b84/ Log: typos diff --git a/docker/Dockerfile b/docker/Dockerfile --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -62,11 +62,11 @@ ADD xz-5.2.4.tar.gz.sig /root/xz-5.2.4.tar.gz.sig RUN sh install_xz5.sh manylinux2010 2>&1 | tee /root/install_xz5.log ADD install_ncurses.sh /root/install_ncurses.sh -RUN /root/install_ncurses.sh 2>&1 | tee /root/install_ncurses.sh +RUN /root/install_ncurses.sh 2>&1 | tee /root/install_ncurses.log # centos6 provides gc7.1, which does not work in a docker. Use a newer version # This is for testing only Add install_gc.sh /root/install_gc.sh -RUN sh /root/install_gc.sh 2>&1 | tee /root/install_gc.sh +RUN sh /root/install_gc.sh 2>&1 | tee /root/install_gc.log # prefer our libraries in /usr/local/lib ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH diff --git a/docker/Dockerfile32 b/docker/Dockerfile32 --- a/docker/Dockerfile32 +++ b/docker/Dockerfile32 @@ -62,11 +62,11 @@ ADD xz-5.2.4.tar.gz.sig /root/xz-5.2.4.tar.gz.sig RUN sh install_xz5.sh manylinux2010 m32 2>&1 | tee /root/install_xz5.log ADD install_ncurses.sh /root/install_ncurses.sh -RUN /root/install_ncurses.sh 2>&1 | tee /root/install_ncurses.sh +RUN /root/install_ncurses.sh 2>&1 | tee /root/install_ncurses.log # centos6 provides gc7.1, which does not work in a docker. Use a newer version # This is for testing only Add install_gc.sh /root/install_gc.sh -RUN sh install_gc.sh 2>&1 | tee /root/install_gc.sh +RUN sh /root/install_gc.sh 2>&1 | tee /root/install_gc.log # prefer our libraries in /usr/local/lib ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH From pypy.commits at gmail.com Sat Dec 7 12:36:51 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 09:36:51 -0800 (PST) Subject: [pypy-commit] pypy default: fix ncurses include/lib discovery: add from_config_tool("ncursesw6-config"), reverse order Message-ID: <5debe333.1c69fb81.708f5.c16b@mx.google.com> Author: Matti Picus Branch: Changeset: r98247:838c25e04d2a Date: 2019-12-07 19:35 +0200 http://bitbucket.org/pypy/pypy/changeset/838c25e04d2a/ Log: fix ncurses include/lib discovery: add from_config_tool("ncursesw6-config"), reverse order diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -5,13 +5,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -# We cannot trust ncurses5-config, it's broken in various ways in -# various versions. For example it might not list -ltinfo even though -# it's needed, or --cflags might be completely empty. Crap. - -except IOError: - pass - def try_cflags(): yield ExternalCompilationInfo(includes=['curses.h', 'term.h']) yield ExternalCompilationInfo(includes=['curses.h', 'term.h'], @@ -33,7 +26,11 @@ def try_tools(): try: - yield ExternalCompilationInfo.from_pkg_config("ncurses") + yield ExternalCompilationInfo.from_config_tool("ncursesw6-config") + except Exception: + pass + try: + yield ExternalCompilationInfo.from_config_tool("ncurses5-config") except Exception: pass try: @@ -41,7 +38,7 @@ except Exception: pass try: - yield ExternalCompilationInfo.from_config_tool("ncurses5-config") + yield ExternalCompilationInfo.from_pkg_config("ncursesw") except Exception: pass From pypy.commits at gmail.com Sat Dec 7 12:36:53 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 09:36:53 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5debe335.1c69fb81.3308c.d2d7@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98248:c7b17edd132e Date: 2019-12-07 19:36 +0200 http://bitbucket.org/pypy/pypy/changeset/c7b17edd132e/ Log: merge default into py3.6 diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -5,13 +5,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -# We cannot trust ncurses5-config, it's broken in various ways in -# various versions. For example it might not list -ltinfo even though -# it's needed, or --cflags might be completely empty. Crap. - -except IOError: - pass - def try_cflags(): yield ExternalCompilationInfo(includes=['curses.h', 'term.h']) yield ExternalCompilationInfo(includes=['curses.h', 'term.h'], @@ -33,7 +26,11 @@ def try_tools(): try: - yield ExternalCompilationInfo.from_pkg_config("ncurses") + yield ExternalCompilationInfo.from_config_tool("ncursesw6-config") + except Exception: + pass + try: + yield ExternalCompilationInfo.from_config_tool("ncurses5-config") except Exception: pass try: @@ -41,7 +38,7 @@ except Exception: pass try: - yield ExternalCompilationInfo.from_config_tool("ncurses5-config") + yield ExternalCompilationInfo.from_pkg_config("ncursesw") except Exception: pass From pypy.commits at gmail.com Sun Dec 8 02:02:36 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:36 -0800 (PST) Subject: [pypy-commit] pypy default: redo _curses_build library detection Message-ID: <5deca00c.1c69fb81.f7ee4.5a10@mx.google.com> Author: Matti Picus Branch: Changeset: r98249:cf0ed0f35064 Date: 2019-12-08 08:26 +0200 http://bitbucket.org/pypy/pypy/changeset/cf0ed0f35064/ Log: redo _curses_build library detection diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -28,13 +28,25 @@ # error message raise e_last -def find_curses_include_dirs(): - if os.path.exists('/usr/include/ncurses'): - return ['/usr/include/ncurses'] - if os.path.exists('/usr/include/ncursesw'): - return ['/usr/include/ncursesw'] - return [] +def find_curses_dir_and_name(): + for base in ('/usr', '/usr/local'): + if os.path.exists(os.path.join(base, 'include', 'ncursesw')): + return base, 'ncursesw' + if os.path.exists(os.path.join(base, 'include', 'ncurses')): + return base, 'ncurses' + return '', None +base, name = find_curses_dir_and_name() +if base: + include_dirs = [os.path.join(base, 'include', name)] + library_dirs = [os.path.join(base, 'lib')] + libs = [name, name.replace('ncurses', 'panel')] +else: + include_dirs = [] + library_dirs = [] + libs = [find_library(['ncursesw', 'ncurses']), + find_library(['panelw', 'panel']), + ] ffi = FFI() ffi.set_source("_curses_cffi", """ @@ -83,9 +95,10 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_library(['ncurses', 'ncursesw']), - find_library(['panel', 'panelw'])], - include_dirs=find_curses_include_dirs()) +""", libraries=libs, + library_dirs = library_dirs, + include_dirs=include_dirs, +) ffi.cdef(""" From pypy.commits at gmail.com Sun Dec 8 02:02:38 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:38 -0800 (PST) Subject: [pypy-commit] pypy default: ensure package name has "portable" in it if iti is portable Message-ID: <5deca00e.1c69fb81.10999.5ef3@mx.google.com> Author: Matti Picus Branch: Changeset: r98250:04b691bc3ffa Date: 2019-12-08 08:27 +0200 http://bitbucket.org/pypy/pypy/changeset/04b691bc3ffa/ Log: ensure package name has "portable" in it if iti is portable diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,6 +67,8 @@ name = options.name if not name: name = 'pypy-nightly' + if options.make_portable and 'portable' not in name: + name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Sun Dec 8 02:02:40 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:40 -0800 (PST) Subject: [pypy-commit] pypy default: commit to releasing portable tarballs for 7.3 Message-ID: <5deca010.1c69fb81.95b1a.f11c@mx.google.com> Author: Matti Picus Branch: Changeset: r98251:b7a0f51bb830 Date: 2019-12-08 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/b7a0f51bb830/ Log: commit to releasing portable tarballs for 7.3 diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -18,6 +18,12 @@ building third party packages for python, so this release changes the ABI tag for PyPy. +Based on the great work done in `portable-pypy`_, the linux downloads we +provide are now built on top of the `manylinux2010`_ CentOS6 docker image. +The tarballs include the needed shared objects to run on any platform that +supports manylinux2010 wheels, which should include all supported versions of +debian- and RedHat-based distributions (including Ubuntu, CentOS, and Fedora). + The `CFFI`_ backend has been updated to version 1.13.1. We recommend using CFFI rather than c-extensions to interact with C. @@ -57,6 +63,8 @@ .. _`CFFI`: http://cffi.readthedocs.io .. _`cppyy`: https://cppyy.readthedocs.io .. _`available as wheels`: https://github.com/antocuni/pypy-wheels +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy +.. _`manylinux2010`: https://github.com/pypa/manylinux What is PyPy? ============= From pypy.commits at gmail.com Sun Dec 8 02:02:41 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:41 -0800 (PST) Subject: [pypy-commit] pypy default: update release note for current HEADs Message-ID: <5deca011.1c69fb81.4284.2ec6@mx.google.com> Author: Matti Picus Branch: Changeset: r98252:e53d4169f58d Date: 2019-12-08 08:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e53d4169f58d/ Log: update release note for current HEADs diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -132,6 +132,7 @@ * Check for overflow in ctypes array creation * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) +* Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -174,6 +175,8 @@ it. (`issue 3096`_) * Remove incorrect clobbering of the ``locals`` after running ``exec()`` * Adds encoding, decoding codepages on win32 +* Remove socket error attributes from ``_ssl`` (`issue 3119`_) +* Add missing ``os.getgrouplist`` (part of `issue 2375`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -190,6 +193,7 @@ .. _`manylinux2010`: fix broken link .. _`macports pypy`: https://github.com/macports/macports-ports/blob/master/lang/pypy/files/darwin.py.diff +.. _`issue 2375`: https://bitbucket.com/pypy/pypy/issues/2375 .. _`issue 2389`: https://bitbucket.com/pypy/pypy/issues/2389 .. _`issue 2687`: https://bitbucket.com/pypy/pypy/issues/2687 .. _`issue 2970`: https://bitbucket.com/pypy/pypy/issues/2970 @@ -206,8 +210,10 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 +.. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 From pypy.commits at gmail.com Sun Dec 8 02:02:44 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:44 -0800 (PST) Subject: [pypy-commit] pypy release-pypy2.7-v7.x: merge default into release branch Message-ID: <5deca014.1c69fb81.b2bf8.c0b9@mx.google.com> Author: Matti Picus Branch: release-pypy2.7-v7.x Changeset: r98253:1c36a847ec09 Date: 2019-12-08 09:00 +0200 http://bitbucket.org/pypy/pypy/changeset/1c36a847ec09/ Log: merge default into release branch diff --git a/lib_pypy/_cffi_ssl/_stdssl/error.py b/lib_pypy/_cffi_ssl/_stdssl/error.py --- a/lib_pypy/_cffi_ssl/_stdssl/error.py +++ b/lib_pypy/_cffi_ssl/_stdssl/error.py @@ -27,13 +27,14 @@ if self.strerror and isinstance(self.strerror, str): return self.strerror return str(self.args) -# these are expected on socket as well -socket.sslerror = SSLError -for v in [ 'SSL_ERROR_ZERO_RETURN', 'SSL_ERROR_WANT_READ', - 'SSL_ERROR_WANT_WRITE', 'SSL_ERROR_WANT_X509_LOOKUP', 'SSL_ERROR_SYSCALL', - 'SSL_ERROR_SSL', 'SSL_ERROR_WANT_CONNECT', 'SSL_ERROR_EOF', - 'SSL_ERROR_INVALID_ERROR_CODE' ]: - setattr(socket, v, locals()[v]) +# these are expected on socket in python2 as well +if sys.version_info[0] < 3: + socket.sslerror = SSLError + for v in [ 'SSL_ERROR_ZERO_RETURN', 'SSL_ERROR_WANT_READ', + 'SSL_ERROR_WANT_WRITE', 'SSL_ERROR_WANT_X509_LOOKUP', 'SSL_ERROR_SYSCALL', + 'SSL_ERROR_SSL', 'SSL_ERROR_WANT_CONNECT', 'SSL_ERROR_EOF', + 'SSL_ERROR_INVALID_ERROR_CODE' ]: + setattr(socket, v, locals()[v]) class SSLZeroReturnError(SSLError): """ SSL/TLS session closed cleanly. """ diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -28,13 +28,25 @@ # error message raise e_last -def find_curses_include_dirs(): - if os.path.exists('/usr/include/ncurses'): - return ['/usr/include/ncurses'] - if os.path.exists('/usr/include/ncursesw'): - return ['/usr/include/ncursesw'] - return [] +def find_curses_dir_and_name(): + for base in ('/usr', '/usr/local'): + if os.path.exists(os.path.join(base, 'include', 'ncursesw')): + return base, 'ncursesw' + if os.path.exists(os.path.join(base, 'include', 'ncurses')): + return base, 'ncurses' + return '', None +base, name = find_curses_dir_and_name() +if base: + include_dirs = [os.path.join(base, 'include', name)] + library_dirs = [os.path.join(base, 'lib')] + libs = [name, name.replace('ncurses', 'panel')] +else: + include_dirs = [] + library_dirs = [] + libs = [find_library(['ncursesw', 'ncurses']), + find_library(['panelw', 'panel']), + ] ffi = FFI() ffi.set_source("_curses_cffi", """ @@ -83,9 +95,10 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_library(['ncurses', 'ncursesw']), - find_library(['panel', 'panelw'])], - include_dirs=find_curses_include_dirs()) +""", libraries=libs, + library_dirs = library_dirs, + include_dirs=include_dirs, +) ffi.cdef(""" diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -60,12 +60,9 @@ Install build-time dependencies ------------------------------- (**Note**: for some hints on how to translate the Python interpreter under -Windows, see the `windows document`_ . For hints on how to cross-compile in -a chroot using scratchbox2, see the `arm document`_ in the -`RPython documentation`_) +Windows, see the `windows document`_ . .. _`windows document`: windows.html -.. _`arm document`: http://rpython.readthedocs.org/en/latest/arm.html .. _`RPython documentation`: http://rpython.readthedocs.org The host Python needs to have CFFI installed. If translating on PyPy, CFFI is @@ -88,9 +85,6 @@ pyexpat libexpat1 -_ssl - libssl - _vmprof libunwind (optional, loaded dynamically at runtime) @@ -104,6 +98,9 @@ sqlite3 libsqlite3 +_ssl, _hashlib + libssl + curses libncurses-dev (for PyPy2) libncursesw-dev (for PyPy3) @@ -115,11 +112,12 @@ tk-dev lzma (PyPy3 only) - liblzma + liblzma or libxz, version 5 and up -To run untranslated tests, you need the Boehm garbage collector libgc. +To run untranslated tests, you need the Boehm garbage collector libgc, version +7.4 and up -On recent Debian and Ubuntu (16.04 onwards), this is the command to install +On Debian and Ubuntu (16.04 onwards), this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config zlib1g-dev libbz2-dev \ @@ -127,18 +125,11 @@ tk-dev libgc-dev python-cffi \ liblzma-dev libncursesw5-dev # these two only needed on PyPy3 -On older Debian and Ubuntu (12.04-14.04):: - - apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev libgc-dev python-cffi \ - liblzma-dev libncursesw-dev # these two only needed on PyPy3 - On Fedora:: dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ sqlite-devel ncurses-devel expat-devel openssl-devel tk-devel \ - gdbm-devel python-cffi\ + gdbm-devel python-cffi gc-devel\ xz-devel # For lzma on PyPy3. On SLES11:: diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -18,6 +18,12 @@ building third party packages for python, so this release changes the ABI tag for PyPy. +Based on the great work done in `portable-pypy`_, the linux downloads we +provide are now built on top of the `manylinux2010`_ CentOS6 docker image. +The tarballs include the needed shared objects to run on any platform that +supports manylinux2010 wheels, which should include all supported versions of +debian- and RedHat-based distributions (including Ubuntu, CentOS, and Fedora). + The `CFFI`_ backend has been updated to version 1.13.1. We recommend using CFFI rather than c-extensions to interact with C. @@ -57,6 +63,8 @@ .. _`CFFI`: http://cffi.readthedocs.io .. _`cppyy`: https://cppyy.readthedocs.io .. _`available as wheels`: https://github.com/antocuni/pypy-wheels +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy +.. _`manylinux2010`: https://github.com/pypa/manylinux What is PyPy? ============= @@ -124,6 +132,7 @@ * Check for overflow in ctypes array creation * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) +* Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,6 +175,8 @@ it. (`issue 3096`_) * Remove incorrect clobbering of the ``locals`` after running ``exec()`` * Adds encoding, decoding codepages on win32 +* Remove socket error attributes from ``_ssl`` (`issue 3119`_) +* Add missing ``os.getgrouplist`` (part of `issue 2375`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -182,6 +193,7 @@ .. _`manylinux2010`: fix broken link .. _`macports pypy`: https://github.com/macports/macports-ports/blob/master/lang/pypy/files/darwin.py.diff +.. _`issue 2375`: https://bitbucket.com/pypy/pypy/issues/2375 .. _`issue 2389`: https://bitbucket.com/pypy/pypy/issues/2389 .. _`issue 2687`: https://bitbucket.com/pypy/pypy/issues/2687 .. _`issue 2970`: https://bitbucket.com/pypy/pypy/issues/2970 @@ -198,8 +210,10 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 +.. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,6 @@ .. this is a revision shortly after release-pypy-7.3.0 .. startrev: dbbbae99135f +.. branch: backport-decode_timeval_ns-py3.7 +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -155,7 +155,7 @@ the `get_externals.py` utility to checkout the proper branch for your platform and PyPy version. -.. _subrepository: https://bitbucket.org/pypy/external +.. _subrepository: https://bitbucket.org/pypy/externals Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/apptest_generator.py copy from pypy/interpreter/test/test_generator.py copy to pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,322 +1,311 @@ -class AppTestGenerator: +from pytest import raises, skip - def test_generator(self): - def f(): +def test_generator(): + def f(): + yield 1 + assert f().next() == 1 + +def test_generator2(): + def f(): + yield 1 + g = f() + assert g.next() == 1 + with raises(StopIteration): + g.next() + +def test_attributes(): + def f(): + yield 1 + assert g.gi_running + g = f() + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + assert g.gi_frame is not None + assert not g.gi_running + g.next() + assert not g.gi_running + with raises(StopIteration): + g.next() + assert not g.gi_running + assert g.gi_frame is None + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + +def test_generator3(): + def f(): + yield 1 + g = f() + assert list(g) == [1] + +def test_generator4(): + def f(): + yield 1 + g = f() + assert [x for x in g] == [1] + +def test_generator5(): + def f(): + v = (yield) + yield v + g = f() + g.next() + assert g.send(42) == 42 + +def test_throw1(): + def f(): + yield 2 + g = f() + # two arguments version + with raises(NameError): + g.throw(NameError, "Error") + +def test_throw2(): + def f(): + yield 2 + g = f() + # single argument version + with raises(NameError): + g.throw(NameError("Error")) + +def test_throw3(): + def f(): + try: yield 1 - assert f().next() == 1 + yield 2 + except NameError: + yield 3 + g = f() + assert g.next() == 1 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + g.next() - def test_generator2(self): - def f(): +def test_throw4(): + def f(): + try: yield 1 - g = f() - assert g.next() == 1 - raises(StopIteration, g.next) + v = (yield 2) + except NameError: + yield 3 + g = f() + assert g.next() == 1 + assert g.next() == 2 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + g.next() - def test_attributes(self): - def f(): +def test_throw5(): + def f(): + try: yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running + except Exception: + x = 3 + try: + yield x + except Exception: + pass + g = f() + g.next() + # String exceptions are not allowed anymore + with raises(TypeError): + g.throw("Error") + assert g.throw(Exception) == 3 + with raises(StopIteration): + g.throw(Exception) + +def test_throw6(): + def f(): + yield 2 + g = f() + with raises(NameError): + g.throw(NameError, "Error", None) + + +def test_throw_fail(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), "error") + +def test_throw_fail2(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(list()) + +def test_throw_fail3(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") + +def test_throw_finishes_generator(): + def f(): + yield 1 + g = f() + assert g.gi_frame is not None + with raises(ValueError): + g.throw(ValueError) + assert g.gi_frame is None + +def test_throw_bug(): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + +def test_throw_on_finished_generator(): + def f(): + yield 1 + g = f() + res = g.next() + assert res == 1 + with raises(StopIteration): g.next() - assert not g.gi_running - raises(StopIteration, g.next) - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' + with raises(NameError): + g.throw(NameError) - def test_generator3(self): - def f(): +def test_close(): + def f(): + yield 1 + g = f() + assert g.close() is None + +def test_close2(): + def f(): + try: yield 1 - g = f() - assert list(g) == [1] + except GeneratorExit: + raise StopIteration + g = f() + g.next() + assert g.close() is None - def test_generator4(self): - def f(): +def test_close3(): + def f(): + try: yield 1 - g = f() - assert [x for x in g] == [1] + except GeneratorExit: + raise NameError + g = f() + g.next() + with raises(NameError): + g.close() - def test_generator5(self): - d = {} - exec """if 1: - def f(): - v = (yield ) - yield v - g = f() - g.next() - """ in d - g = d['g'] - assert g.send(42) == 42 +def test_close_fail(): + def f(): + try: + yield 1 + except GeneratorExit: + yield 2 + g = f() + g.next() + with raises(RuntimeError): + g.close() - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - raises(NameError, g.throw, NameError, "Error") +def test_close_on_collect(): + import gc + def f(): + try: + yield + finally: + f.x = 42 + g = f() + g.next() + del g + gc.collect() + assert f.x == 42 - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - raises(NameError, g.throw, NameError("Error")) +def test_generator_raises_typeerror(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert g.next() == 1 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) +def test_generator_explicit_stopiteration(): + def f(): + yield 1 + raise StopIteration + g = f() + assert [x for x in g] == [1] - def test_throw4(self): - d = {} - exec """if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """ in d - g = d['g'] - assert g.next() == 1 - assert g.next() == 2 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) +def test_generator_propagate_stopiteration(): + def f(): + it = iter([1]) + while 1: + yield it.next() + g = f() + assert [x for x in g] == [1] - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - g.next() - # String exceptions are not allowed anymore - raises(TypeError, g.throw, "Error") - assert g.throw(Exception) == 3 - raises(StopIteration, g.throw, Exception) +def test_generator_restart(): + def g(): + i = me.next() + yield i + me = g() + with raises(ValueError): + me.next() - def test_throw6(self): - def f(): - yield 2 - g = f() - raises(NameError, g.throw, NameError, "Error", None) +def test_generator_expression(): + exec "res = sum(i*i for i in range(5))" + assert res == 30 +def test_generator_expression_2(): + def f(): + total = sum(i for i in [x for x in z]) + return total, x + z = [1, 2, 7] + assert f() == (10, 7) - def test_throw_fail(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, NameError("Error"), "error") +def test_repr(): + def myFunc(): + yield 1 + g = myFunc() + r = repr(g) + assert r.startswith(" 5 + try: + next(gen) + except TypeError: + pass - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = g.next() - assert res == 1 - raises(StopIteration, g.next) - raises(NameError, g.throw, NameError) - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - g.next() - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - g.next() - raises(NameError, g.close) - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - g.next() - raises(RuntimeError, g.close) - - def test_close_on_collect(self): - ## we need to exec it, else it won't run on python2.4 - d = {} - exec """ - def f(): - try: - yield - finally: - f.x = 42 - """.strip() in d - - g = d['f']() - g.next() - del g - import gc - gc.collect() - assert d['f'].x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.send) # one argument required - raises(TypeError, g.send, 1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield it.next() - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = me.next() - yield i - me = g() - raises(ValueError, me.next) - - def test_generator_expression(self): - exec "res = sum(i*i for i in range(5))" - assert res == 30 - - def test_generator_expression_2(self): - d = {} - exec """ -def f(): - total = sum(i for i in [x for x in z]) - return total, x -z = [1, 2, 7] -res = f() -""" in d - assert d['res'] == (10, 7) - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(" 5 - try: - next(gen) - except TypeError: - pass - - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - raises(TypeError, g.send, 2) - raises(TypeError, g.send, 2) - - -def test_should_not_inline(space): - from pypy.interpreter.generator import should_not_inline - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - return g.__code__ - ''') - assert should_not_inline(w_co) == False - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - yield x + 6 - return g.__code__ - ''') - assert should_not_inline(w_co) == True +def test_multiple_invalid_sends(): + def mygen(): + yield 42 + g = mygen() + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -1,310 +1,3 @@ -class AppTestGenerator: - - def test_generator(self): - def f(): - yield 1 - assert f().next() == 1 - - def test_generator2(self): - def f(): - yield 1 - g = f() - assert g.next() == 1 - raises(StopIteration, g.next) - - def test_attributes(self): - def f(): - yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running - g.next() - assert not g.gi_running - raises(StopIteration, g.next) - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - - def test_generator3(self): - def f(): - yield 1 - g = f() - assert list(g) == [1] - - def test_generator4(self): - def f(): - yield 1 - g = f() - assert [x for x in g] == [1] - - def test_generator5(self): - d = {} - exec """if 1: - def f(): - v = (yield ) - yield v - g = f() - g.next() - """ in d - g = d['g'] - assert g.send(42) == 42 - - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - raises(NameError, g.throw, NameError, "Error") - - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - raises(NameError, g.throw, NameError("Error")) - - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert g.next() == 1 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) - - def test_throw4(self): - d = {} - exec """if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """ in d - g = d['g'] - assert g.next() == 1 - assert g.next() == 2 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, g.next) - - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - g.next() - # String exceptions are not allowed anymore - raises(TypeError, g.throw, "Error") - assert g.throw(Exception) == 3 - raises(StopIteration, g.throw, Exception) - - def test_throw6(self): - def f(): - yield 2 - g = f() - raises(NameError, g.throw, NameError, "Error", None) - - - def test_throw_fail(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, NameError("Error"), "error") - - def test_throw_fail2(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, list()) - - def test_throw_fail3(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, NameError("Error"), None, "not tb object") - - def test_throw_finishes_generator(self): - def f(): - yield 1 - g = f() - assert g.gi_frame is not None - raises(ValueError, g.throw, ValueError) - assert g.gi_frame is None - - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = g.next() - assert res == 1 - raises(StopIteration, g.next) - raises(NameError, g.throw, NameError) - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - g.next() - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - g.next() - raises(NameError, g.close) - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - g.next() - raises(RuntimeError, g.close) - - def test_close_on_collect(self): - ## we need to exec it, else it won't run on python2.4 - d = {} - exec """ - def f(): - try: - yield - finally: - f.x = 42 - """.strip() in d - - g = d['f']() - g.next() - del g - import gc - gc.collect() - assert d['f'].x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.send) # one argument required - raises(TypeError, g.send, 1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield it.next() - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = me.next() - yield i - me = g() - raises(ValueError, me.next) - - def test_generator_expression(self): - exec "res = sum(i*i for i in range(5))" - assert res == 30 - - def test_generator_expression_2(self): - d = {} - exec """ -def f(): - total = sum(i for i in [x for x in z]) - return total, x -z = [1, 2, 7] -res = f() -""" in d - assert d['res'] == (10, 7) - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(" 5 - try: - next(gen) - except TypeError: - pass - - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - raises(TypeError, g.send, 2) - raises(TypeError, g.send, 2) - - def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline w_co = space.appexec([], '''(): diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py deleted file mode 100644 diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/interp_hashlib.py +++ /dev/null @@ -1,204 +0,0 @@ -from __future__ import with_statement - -from rpython.rlib import rgc, ropenssl -from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rstring import StringBuilder -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.tool.sourcetools import func_renamer - -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, interp2app, WrappedDefault -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.module.thread.os_lock import Lock - - -algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') - -def hash_name_mapper_callback(obj_name, userdata): - if not obj_name: - return - # Ignore aliased names, they pollute the list and OpenSSL appears - # to have a its own definition of alias as the resulting list - # still contains duplicate and alternate names for several - # algorithms. - if rffi.cast(lltype.Signed, obj_name[0].c_alias): - return - name = rffi.charp2str(obj_name[0].c_name) - global_name_fetcher.meth_names.append(name) - -class NameFetcher: - def setup(self): - self.meth_names = [] - def _cleanup_(self): - self.__dict__.clear() -global_name_fetcher = NameFetcher() - -def fetch_names(space): - global_name_fetcher.setup() - ropenssl.init_digests() - ropenssl.OBJ_NAME_do_all(ropenssl.OBJ_NAME_TYPE_MD_METH, - hash_name_mapper_callback, None) - meth_names = global_name_fetcher.meth_names - global_name_fetcher.meth_names = None - return space.call_function(space.w_frozenset, space.newlist( - [space.newtext(name) for name in meth_names])) - -class W_Hash(W_Root): - NULL_CTX = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ctx = NULL_CTX - - def __init__(self, space, name, copy_from=NULL_CTX): - self.name = name - digest_type = self.digest_type_by_name(space) - self.digest_size = ropenssl.EVP_MD_size(digest_type) - - # Allocate a lock for each HASH object. - # An optimization would be to not release the GIL on small requests, - # and use a custom lock only when needed. - self.lock = Lock(space) - - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size, - self) - try: - if copy_from: - if not ropenssl.EVP_MD_CTX_copy(ctx, copy_from): - raise ValueError - else: - ropenssl.EVP_DigestInit(ctx, digest_type) - self.ctx = ctx - except: - ropenssl.EVP_MD_CTX_free(ctx) - raise - self.register_finalizer(space) - - def _finalize_(self): - ctx = self.ctx - if ctx: - self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) - ropenssl.EVP_MD_CTX_free(ctx) - - def digest_type_by_name(self, space): - digest_type = ropenssl.EVP_get_digestbyname(self.name) - if not digest_type: - raise oefmt(space.w_ValueError, "unknown hash function") - return digest_type - - def descr_repr(self, space): - addrstring = self.getaddrstring(space) - return space.newtext("<%s HASH object at 0x%s>" % ( - self.name, addrstring)) - - @unwrap_spec(string='bufferstr') - def update(self, space, string): - with rffi.scoped_nonmovingbuffer(string) as buf: - with self.lock: - # XXX try to not release the GIL for small requests - ropenssl.EVP_DigestUpdate(self.ctx, buf, len(string)) - - def copy(self, space): - "Return a copy of the hash object." - with self.lock: - return W_Hash(space, self.name, copy_from=self.ctx) - - def digest(self, space): - "Return the digest value as a string of binary data." - digest = self._digest(space) - return space.newbytes(digest) - - def hexdigest(self, space): - "Return the digest value as a string of hexadecimal digits." - digest = self._digest(space) - hexdigits = '0123456789abcdef' - result = StringBuilder(self.digest_size * 2) - for c in digest: - result.append(hexdigits[(ord(c) >> 4) & 0xf]) - result.append(hexdigits[ ord(c) & 0xf]) - return space.newtext(result.build()) - - def get_digest_size(self, space): - return space.newint(self.digest_size) - - def get_block_size(self, space): - digest_type = self.digest_type_by_name(space) - block_size = ropenssl.EVP_MD_block_size(digest_type) - return space.newint(block_size) - - def get_name(self, space): - return space.newtext(self.name) - - def _digest(self, space): - ctx = ropenssl.EVP_MD_CTX_new() - if ctx is None: - raise MemoryError - try: - with self.lock: - if not ropenssl.EVP_MD_CTX_copy(ctx, self.ctx): - raise ValueError - digest_size = self.digest_size - with rffi.scoped_alloc_buffer(digest_size) as buf: - ropenssl.EVP_DigestFinal(ctx, buf.raw, None) - return buf.str(digest_size) - finally: - ropenssl.EVP_MD_CTX_free(ctx) - - -W_Hash.typedef = TypeDef( - 'HASH', - __repr__=interp2app(W_Hash.descr_repr), - update=interp2app(W_Hash.update), - copy=interp2app(W_Hash.copy), - digest=interp2app(W_Hash.digest), - hexdigest=interp2app(W_Hash.hexdigest), - # - digest_size=GetSetProperty(W_Hash.get_digest_size), - digestsize=GetSetProperty(W_Hash.get_digest_size), - block_size=GetSetProperty(W_Hash.get_block_size), - name=GetSetProperty(W_Hash.get_name), -) -W_Hash.typedef.acceptable_as_base_class = False - - at unwrap_spec(name='text', string='bufferstr') -def new(space, name, string=''): - w_hash = W_Hash(space, name) - w_hash.update(space, string) - return w_hash - -# shortcut functions -def make_new_hash(name, funcname): - @func_renamer(funcname) - @unwrap_spec(string='bufferstr') - def new_hash(space, string=''): - return new(space, name, string) - return new_hash - -for _name in algorithms: - _newname = 'new_%s' % (_name,) - globals()[_newname] = make_new_hash(_name, _newname) - - -HAS_FAST_PKCS5_PBKDF2_HMAC = ropenssl.PKCS5_PBKDF2_HMAC is not None -if HAS_FAST_PKCS5_PBKDF2_HMAC: - @unwrap_spec(name='text', password='bytes', salt='bytes', rounds=int, - w_dklen=WrappedDefault(None)) - def pbkdf2_hmac(space, name, password, salt, rounds, w_dklen): - digest = ropenssl.EVP_get_digestbyname(name) - if not digest: - raise oefmt(space.w_ValueError, "unknown hash function") - if space.is_w(w_dklen, space.w_None): - dklen = ropenssl.EVP_MD_size(digest) - else: - dklen = space.int_w(w_dklen) - if dklen < 1: - raise oefmt(space.w_ValueError, - "key length must be greater than 0.") - with rffi.scoped_alloc_buffer(dklen) as buf: - r = ropenssl.PKCS5_PBKDF2_HMAC( - password, len(password), salt, len(salt), rounds, digest, - dklen, buf.raw) - if not r: - raise ValueError - return space.newbytes(buf.str(dklen)) diff --git a/pypy/module/_hashlib/moduledef.py b/pypy/module/_hashlib/moduledef.py deleted file mode 100644 --- a/pypy/module/_hashlib/moduledef.py +++ /dev/null @@ -1,22 +0,0 @@ -from pypy.interpreter.mixedmodule import MixedModule -from pypy.module._hashlib.interp_hashlib import ( - algorithms, fetch_names, HAS_FAST_PKCS5_PBKDF2_HMAC) - - -class Module(MixedModule): - interpleveldefs = { - 'new' : 'interp_hashlib.new', - } - - appleveldefs = { - } - - for name in algorithms: - interpleveldefs['openssl_' + name] = 'interp_hashlib.new_' + name - - if HAS_FAST_PKCS5_PBKDF2_HMAC: - interpleveldefs['pbkdf2_hmac'] = 'interp_hashlib.pbkdf2_hmac' - - def startup(self, space): - w_meth_names = fetch_names(space) - space.setattr(self, space.newtext('openssl_md_meth_names'), w_meth_names) diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_hashlib.py +++ /dev/null @@ -1,123 +0,0 @@ -class AppTestHashlib: - spaceconfig = { - "usemodules": ['_hashlib', 'array', 'struct', 'binascii'], - } - - def test_method_names(self): - import _hashlib - assert isinstance(_hashlib.openssl_md_meth_names, frozenset) - assert "md5" in _hashlib.openssl_md_meth_names - - def test_simple(self): - import _hashlib - assert _hashlib.new('md5').__class__.__name__ == 'HASH' - assert len(_hashlib.new('md5').hexdigest()) == 32 - - def test_attributes(self): - import hashlib - for name, (expected_size, expected_block_size) in { - 'md5': (16, 64), - 'sha1': (20, 64), - 'sha224': (28, 64), - 'sha256': (32, 64), - 'sha384': (48, 128), - 'sha512': (64, 128), - }.items(): - h = hashlib.new(name) - assert h.name == name - assert h.digest_size == expected_size - assert h.digestsize == expected_size - assert h.block_size == expected_block_size - # - h.update('abc') - h2 = h.copy() - h.update('def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update('d') - h2.update('ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - assert len(digest) == h.digest_size - assert len(hexdigest) == h.digest_size * 2 - c_digest = digest - c_hexdigest = hexdigest - - # also test the pure Python implementation - py_new = getattr(hashlib, '__get_builtin_constructor') - h = py_new(name)('') - assert h.digest_size == expected_size - assert h.digestsize == expected_size - assert h.block_size == expected_block_size - # - h.update('abc') - h2 = h.copy() - h.update('def') - digest = h.digest() - hexdigest = h.hexdigest() - h2.update('d') - h2.update('ef') - assert digest == h2.digest() - assert hexdigest == h2.hexdigest() - - # compare both implementations - assert c_digest == digest - assert c_hexdigest == hexdigest - - def test_shortcut(self): - import hashlib - assert repr(hashlib.md5()).startswith("= 1.1") - out = pbkdf2_hmac('sha1', 'password', 'salt', 1) - assert out == '0c60c80f961f0e71f3a9b524af6012062fe037a6'.decode('hex') - out = pbkdf2_hmac('sha1', 'password', 'salt', 2, None) - assert out == 'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'.decode('hex') diff --git a/pypy/module/_hashlib/test/test_ztranslation.py b/pypy/module/_hashlib/test/test_ztranslation.py deleted file mode 100644 --- a/pypy/module/_hashlib/test/test_ztranslation.py +++ /dev/null @@ -1,4 +0,0 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test_checkmodule(): - checkmodule('_hashlib') diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -5,11 +5,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -# We cannot trust ncurses5-config, it's broken in various ways in -# various versions. For example it might not list -ltinfo even though -# it's needed, or --cflags might be completely empty. On Ubuntu 10.04 -# it gives -I/usr/include/ncurses, which doesn't exist at all. Crap. - def try_cflags(): yield ExternalCompilationInfo(includes=['curses.h', 'term.h']) yield ExternalCompilationInfo(includes=['curses.h', 'term.h'], @@ -20,8 +15,9 @@ 'ncurses/term.h']) def try_ldflags(): + yield ExternalCompilationInfo(libraries=['curses', 'tinfo']) yield ExternalCompilationInfo(libraries=['curses']) - yield ExternalCompilationInfo(libraries=['curses', 'tinfo']) + yield ExternalCompilationInfo(libraries=['ncurses', 'tinfo']) yield ExternalCompilationInfo(libraries=['ncurses']) yield ExternalCompilationInfo(libraries=['ncurses'], library_dirs=['/usr/lib64']) @@ -30,7 +26,11 @@ def try_tools(): try: - yield ExternalCompilationInfo.from_pkg_config("ncurses") + yield ExternalCompilationInfo.from_config_tool("ncursesw6-config") + except Exception: + pass + try: + yield ExternalCompilationInfo.from_config_tool("ncurses5-config") except Exception: pass try: @@ -38,7 +38,7 @@ except Exception: pass try: - yield ExternalCompilationInfo.from_config_tool("ncurses5-config") + yield ExternalCompilationInfo.from_pkg_config("ncursesw") except Exception: pass diff --git a/pypy/module/cpyext/cparser.py b/pypy/module/cpyext/cparser.py --- a/pypy/module/cpyext/cparser.py +++ b/pypy/module/cpyext/cparser.py @@ -705,6 +705,7 @@ self.struct_typedefs = {} self._handled = set() self._frozen = False + self._cdecl_type_cache = {} # {cdecl: TYPE} cache if includes is not None: for header in includes: self.include(header) @@ -840,6 +841,14 @@ raise NotImplementedError def gettype(self, cdecl): + try: + return self._cdecl_type_cache[cdecl] + except KeyError: + result = self._real_gettype(cdecl) + self._cdecl_type_cache[cdecl] = result + return result + + def _real_gettype(self, cdecl): obj = self.ctx.parse_type(cdecl) result = self.convert_type(obj) if isinstance(result, DelayedStruct): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -448,14 +448,19 @@ return w_loader def _getimporter(space, w_pathitem): - # the function 'imp._getimporter' is a pypy-only extension + # 'imp._getimporter' is somewhat like CPython's get_path_importer w_path_importer_cache = space.sys.get("path_importer_cache") w_importer = space.finditem(w_path_importer_cache, w_pathitem) if w_importer is None: space.setitem(w_path_importer_cache, w_pathitem, space.w_None) for w_hook in space.unpackiterable(space.sys.get("path_hooks")): + w_pathbytes = w_pathitem + if space.isinstance_w(w_pathitem, space.w_unicode): + from pypy.module.sys.interp_encoding import getfilesystemencoding + w_pathbytes = space.call_method(space.w_unicode, 'encode', + w_pathitem, getfilesystemencoding(space)) try: - w_importer = space.call_function(w_hook, w_pathitem) + w_importer = space.call_function(w_hook, w_pathbytes) except OperationError as e: if not e.match(space, space.w_ImportError): raise diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -4,7 +4,8 @@ class AppTestImpModule: spaceconfig = { - 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct'], + 'usemodules': ['binascii', 'imp', 'itertools', 'time', 'struct', + 'zipimport'], } def setup_class(cls): @@ -246,3 +247,14 @@ assert marshal.loads == 42 marshal.loads = old + + def test_unicode_in_sys_path(self): + # issue 3112: when _getimporter calls + # for x in sys.path: for h in sys.path_hooks: h(x) + # make sure x is properly encoded + import sys + import zipimport # installs a sys.path_hook + if sys.getfilesystemencoding().lower() == 'utf-8': + sys.path.insert(0, u'\xef') + with raises(ImportError): + import impossible_module diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -4,8 +4,10 @@ from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC from rpython.rlib.rawstorage import misaligned_is_fine +IS_X86 = platform.machine().startswith('x86') or platform.machine() == 'i686' + def no_vector_backend(): - if platform.machine().startswith('x86'): + if IS_X86: from rpython.jit.backend.x86.detect_feature import detect_sse4_2 if sys.maxsize < 2**31: return True @@ -19,7 +21,7 @@ return True def align_check(input): - if platform.machine().startswith('x86'): + if IS_X86: return "" if sys.maxsize > 2**32: mask = 7 diff --git a/pypy/module/thread/test/test_thread.py b/pypy/module/thread/test/test_thread.py --- a/pypy/module/thread/test/test_thread.py +++ b/pypy/module/thread/test/test_thread.py @@ -1,38 +1,9 @@ import thread, time from pypy.module.thread.test.support import GenericTestThread +import pytest class AppTestThread(GenericTestThread): - def setup_class(cls): - GenericTestThread.setup_class.im_func(cls) - # if we cannot start more than, say, 1000 threads on this OS, then - # we can check that we get the proper error at app-level - space = cls.space - lock = thread.allocate_lock() - lock.acquire() - def f(): - lock.acquire() - lock.release() - start = thread._count() - try: - try: - for i in range(1000): - thread.start_new_thread(f, ()) - finally: - lock.release() - except (thread.error, MemoryError): - cls.w_can_start_many_threads = space.wrap(False) - else: - cls.w_can_start_many_threads = space.wrap(True) - # wait a bit to allow all threads to finish now - remaining = thread._count() - retries = 0 - while remaining > start: - retries += 1 - if retries == 200: - raise Exception("the test's threads don't stop!") - time.sleep(0.2) - remaining = thread._count() def test_start_new_thread(self): import thread @@ -189,35 +160,6 @@ assert done # see stderr for failures in threads assert sorted(lst) == range(120) - def test_many_threads(self): - import thread, time - if self.can_start_many_threads: - skip("this OS supports too many threads to check (> 1000)") - lock = thread.allocate_lock() - lock.acquire() - count = [0] - def f(): - count[0] += 1 - lock.acquire() - lock.release() - count[0] -= 1 - try: - try: - for i in range(1000): - thread.start_new_thread(f, ()) - finally: - lock.release() - # wait a bit to allow most threads to finish now - while count[0] > 10: - print count[0] # <- releases the GIL - print "ok." - except (thread.error, MemoryError): - pass - else: - raise Exception("could unexpectedly start 1000 threads") - # safety: check that we can start a new thread here - thread.start_new_thread(lambda: None, ()) - def test_stack_size(self): import thread thread.stack_size(0) @@ -256,3 +198,74 @@ waiting = [] thread.start_new_thread(f, ()) raises(KeyboardInterrupt, busy_wait) + + at pytest.mark.skip("too slow") +class _AppTestThread(GenericTestThread): + ''' + This test is very slow, do not run it by default. + ''' + def setup_class(cls): + GenericTestThread.setup_class.im_func(cls) + # if we cannot start more than, say, 1000 threads on this OS, then + # we can check that we get the proper error at app-level + space = cls.space + lock = thread.allocate_lock() + lock.acquire() + def f(): + lock.acquire() + lock.release() + start = thread._count() + try: + try: + for i in range(1000): + thread.start_new_thread(f, ()) + finally: + lock.release() + except (thread.error, MemoryError): + cls.w_can_start_many_threads = space.wrap(False) + else: + cls.w_can_start_many_threads = space.wrap(True) + # wait a bit to allow all threads to finish now + remaining = thread._count() + retries = 0 + while remaining > start: + retries += 1 + if retries == 200: + raise Exception("the test's threads don't stop!") + time.sleep(0.2) + remaining = thread._count() + + def test_many_threads(self): + import time, sys + if sys.version_info[0] < 3: + import thread as _thread + else: + import _thread + if self.can_start_many_threads or sys.platform == 'win32': + skip("this OS supports too many threads to check (> 1000)") + lock = _thread.allocate_lock() + lock.acquire() + count = [0] + def f(): + count[0] += 1 + lock.acquire() + lock.release() + count[0] -= 1 + try: + try: + for i in range(1000): + _thread.start_new_thread(f, ()) + finally: + lock.release() + # wait a bit to allow most threads to finish now + while count[0] > 10: + print(count[0]) # <- releases the GIL + print("ok.") + except (_thread.error, MemoryError): + pass + else: + raise Exception("could unexpectedly start 1000 threads") + # safety: check that we can start a new thread here + _thread.start_new_thread(lambda: None, ()) + + diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -345,7 +345,7 @@ assert (d1 + from_copy) == (d1 + from_compressor) - @py.test.mark.skipif(rzlib.ZLIB_VERSION == '1.2.8', reason='does not error check') + @py.test.mark.skipif(rzlib.ZLIB_VERSION in ('1.2.8', '1.2.3'), reason='does not error check') def test_cannot_copy_compressor_with_stream_in_inconsistent_state(self): if self.runappdirect: skip("can't run with -A") compressor = self.zlib.compressobj() diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -17,14 +17,20 @@ For more information, use test_all.py -h. """ import sys, os +import shutil if __name__ == '__main__': if len(sys.argv) == 1 and os.path.dirname(sys.argv[0]) in '.': print >> sys.stderr, __doc__ sys.exit(2) - #Add toplevel repository dir to sys.path - sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + toplevel = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + # Always remove the cached files + # Before translation this is done via "py.path.local(CACHE_DIR).remove()" + print 'removing %s/rpython/_cache' % toplevel + shutil.rmtree('%s/rpython/_cache' % toplevel, ignore_errors=True) + # Add toplevel repository dir to sys.path + sys.path.insert(0, toplevel) import pytest if sys.platform == 'win32': #Try to avoid opening a dialog box if one of the tests causes a system error diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,6 +67,8 @@ name = options.name if not name: name = 'pypy-nightly' + if options.make_portable and 'portable' not in name: + name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -800,7 +800,7 @@ DIRENT = rffi_platform.Struct('struct dirent', [('d_name', lltype.FixedSizeArray(rffi.CHAR, 1)), ('d_ino', lltype.Signed)] - + [('d_type', rffi.INT)] if HAVE_D_TYPE else []) + + ([('d_type', rffi.INT)] if HAVE_D_TYPE else [])) if HAVE_D_TYPE: DT_UNKNOWN = rffi_platform.ConstantInteger('DT_UNKNOWN') DT_REG = rffi_platform.ConstantInteger('DT_REG') @@ -1931,8 +1931,7 @@ rffi.INT, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_sched_get_priority_min = external('sched_get_priority_min', [rffi.INT], rffi.INT, save_err=rffi.RFFI_SAVE_ERRNO) - if not _WIN32: - c_sched_yield = external('sched_yield', [], rffi.INT) + c_sched_yield = external('sched_yield', [], rffi.INT) @enforceargs(int) def sched_get_priority_max(policy): @@ -1945,6 +1944,36 @@ def sched_yield(): return handle_posix_error('sched_yield', c_sched_yield()) + c_getgroupslist = external('getgrouplist', [rffi.CCHARP, GID_T, + GID_GROUPS_T, rffi.INTP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def getgrouplist(user, group): + groups_p = lltype.malloc(GID_GROUPS_T.TO, 64, flavor='raw') + ngroups_p = lltype.malloc(rffi.INTP.TO, 1, flavor='raw') + ngroups_p[0] = rffi.cast(rffi.INT, 64) + try: + n = handle_posix_error('getgrouplist', c_getgroupslist(user, group, + groups_p, ngroups_p)) + if n == -1: + if widen(ngroups_p[0]) > 64: + # reallocate. Should never happen + lltype.free(groups_p, flavor='raw') + groups_p = lltype.nullptr(GID_GROUPS_T.TO) + groups_p = lltype.malloc(GID_GROUPS_T.TO, widen(ngroups_p[0]), + flavor='raw') + + n = handle_posix_error('getgrouplist', c_getgroupslist(user, + group, groups_p, ngroups_p)) + ngroups = widen(ngroups_p[0]) + groups = [0] * ngroups + for i in range(ngroups): + groups[i] = groups_p[i] + return groups + finally: + lltype.free(ngroups_p, flavor='raw') + if groups_p: + lltype.free(groups_p, flavor='raw') #___________________________________________________________________ c_chroot = external('chroot', [rffi.CCHARP], rffi.INT, diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import register_replacement_for -from rpython.rlib.rarithmetic import intmask, UINT_MAX +from rpython.rlib.rarithmetic import intmask, r_int64, UINT_MAX from rpython.rlib import rposix _WIN32 = sys.platform.startswith('win') @@ -94,6 +94,10 @@ return (float(rffi.getintfield(t, 'c_tv_sec')) + float(rffi.getintfield(t, 'c_tv_usec')) * 0.000001) +def decode_timeval_ns(t): + return (r_int64(rffi.getintfield(t, 'c_tv_sec')) * 10**9 + + r_int64(rffi.getintfield(t, 'c_tv_usec')) * 10**3) + def external(name, args, result, compilation_info=eci, **kwds): return rffi.llexternal(name, args, result, diff --git a/rpython/rlib/test/test_rzlib.py b/rpython/rlib/test/test_rzlib.py --- a/rpython/rlib/test/test_rzlib.py +++ b/rpython/rlib/test/test_rzlib.py @@ -274,7 +274,7 @@ rzlib.deflateEnd(copied) assert bytes1 + bytes_copy == compressed - at py.test.mark.skipif(rzlib.ZLIB_VERSION == '1.2.8', reason='does not error check') + at py.test.mark.skipif(rzlib.ZLIB_VERSION in ('1.2.3', '1.2.8'), reason='does not error check') def test_unsuccessful_compress_copy(): """ Errors during unsuccesful deflateCopy operations raise RZlibErrors. From pypy.commits at gmail.com Sun Dec 8 02:02:46 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:46 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5deca016.1c69fb81.82d48.b3f6@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98254:6f23035ba670 Date: 2019-12-08 09:01 +0200 http://bitbucket.org/pypy/pypy/changeset/6f23035ba670/ Log: merge default into py3.6 diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -28,13 +28,25 @@ # error message raise e_last -def find_curses_include_dirs(): - if os.path.exists('/usr/include/ncurses'): - return ['/usr/include/ncurses'] - if os.path.exists('/usr/include/ncursesw'): - return ['/usr/include/ncursesw'] - return [] +def find_curses_dir_and_name(): + for base in ('/usr', '/usr/local'): + if os.path.exists(os.path.join(base, 'include', 'ncursesw')): + return base, 'ncursesw' + if os.path.exists(os.path.join(base, 'include', 'ncurses')): + return base, 'ncurses' + return '', None +base, name = find_curses_dir_and_name() +if base: + include_dirs = [os.path.join(base, 'include', name)] + library_dirs = [os.path.join(base, 'lib')] + libs = [name, name.replace('ncurses', 'panel')] +else: + include_dirs = [] + library_dirs = [] + libs = [find_library(['ncursesw', 'ncurses']), + find_library(['panelw', 'panel']), + ] ffi = FFI() ffi.set_source("_curses_cffi", """ @@ -83,9 +95,10 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_library(['ncurses', 'ncursesw']), - find_library(['panel', 'panelw'])], - include_dirs=find_curses_include_dirs()) +""", libraries=libs, + library_dirs = library_dirs, + include_dirs=include_dirs, +) ffi.cdef(""" diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -18,6 +18,12 @@ building third party packages for python, so this release changes the ABI tag for PyPy. +Based on the great work done in `portable-pypy`_, the linux downloads we +provide are now built on top of the `manylinux2010`_ CentOS6 docker image. +The tarballs include the needed shared objects to run on any platform that +supports manylinux2010 wheels, which should include all supported versions of +debian- and RedHat-based distributions (including Ubuntu, CentOS, and Fedora). + The `CFFI`_ backend has been updated to version 1.13.1. We recommend using CFFI rather than c-extensions to interact with C. @@ -57,6 +63,8 @@ .. _`CFFI`: http://cffi.readthedocs.io .. _`cppyy`: https://cppyy.readthedocs.io .. _`available as wheels`: https://github.com/antocuni/pypy-wheels +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy +.. _`manylinux2010`: https://github.com/pypa/manylinux What is PyPy? ============= @@ -124,6 +132,7 @@ * Check for overflow in ctypes array creation * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) +* Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,6 +175,8 @@ it. (`issue 3096`_) * Remove incorrect clobbering of the ``locals`` after running ``exec()`` * Adds encoding, decoding codepages on win32 +* Remove socket error attributes from ``_ssl`` (`issue 3119`_) +* Add missing ``os.getgrouplist`` (part of `issue 2375`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -182,6 +193,7 @@ .. _`manylinux2010`: fix broken link .. _`macports pypy`: https://github.com/macports/macports-ports/blob/master/lang/pypy/files/darwin.py.diff +.. _`issue 2375`: https://bitbucket.com/pypy/pypy/issues/2375 .. _`issue 2389`: https://bitbucket.com/pypy/pypy/issues/2389 .. _`issue 2687`: https://bitbucket.com/pypy/pypy/issues/2687 .. _`issue 2970`: https://bitbucket.com/pypy/pypy/issues/2970 @@ -198,8 +210,10 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 +.. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -64,6 +64,8 @@ name = options.name if not name: name = 'pypy-nightly' + if options.make_portable and 'portable' not in name: + name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Sun Dec 8 02:02:48 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:02:48 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release branch Message-ID: <5deca018.1c69fb81.8810d.8774@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98255:d6ff09cd0648 Date: 2019-12-08 09:01 +0200 http://bitbucket.org/pypy/pypy/changeset/d6ff09cd0648/ Log: merge py3.6 into release branch diff too long, truncating to 2000 out of 3708 lines diff --git a/lib_pypy/_cffi_ssl/_stdssl/error.py b/lib_pypy/_cffi_ssl/_stdssl/error.py --- a/lib_pypy/_cffi_ssl/_stdssl/error.py +++ b/lib_pypy/_cffi_ssl/_stdssl/error.py @@ -27,13 +27,14 @@ if self.strerror and isinstance(self.strerror, str): return self.strerror return str(self.args) -# these are expected on socket as well -socket.sslerror = SSLError -for v in [ 'SSL_ERROR_ZERO_RETURN', 'SSL_ERROR_WANT_READ', - 'SSL_ERROR_WANT_WRITE', 'SSL_ERROR_WANT_X509_LOOKUP', 'SSL_ERROR_SYSCALL', - 'SSL_ERROR_SSL', 'SSL_ERROR_WANT_CONNECT', 'SSL_ERROR_EOF', - 'SSL_ERROR_INVALID_ERROR_CODE' ]: - setattr(socket, v, locals()[v]) +# these are expected on socket in python2 as well +if sys.version_info[0] < 3: + socket.sslerror = SSLError + for v in [ 'SSL_ERROR_ZERO_RETURN', 'SSL_ERROR_WANT_READ', + 'SSL_ERROR_WANT_WRITE', 'SSL_ERROR_WANT_X509_LOOKUP', 'SSL_ERROR_SYSCALL', + 'SSL_ERROR_SSL', 'SSL_ERROR_WANT_CONNECT', 'SSL_ERROR_EOF', + 'SSL_ERROR_INVALID_ERROR_CODE' ]: + setattr(socket, v, locals()[v]) class SSLZeroReturnError(SSLError): """ SSL/TLS session closed cleanly. """ diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -28,13 +28,25 @@ # error message raise e_last -def find_curses_include_dirs(): - if os.path.exists('/usr/include/ncurses'): - return ['/usr/include/ncurses'] - if os.path.exists('/usr/include/ncursesw'): - return ['/usr/include/ncursesw'] - return [] +def find_curses_dir_and_name(): + for base in ('/usr', '/usr/local'): + if os.path.exists(os.path.join(base, 'include', 'ncursesw')): + return base, 'ncursesw' + if os.path.exists(os.path.join(base, 'include', 'ncurses')): + return base, 'ncurses' + return '', None +base, name = find_curses_dir_and_name() +if base: + include_dirs = [os.path.join(base, 'include', name)] + library_dirs = [os.path.join(base, 'lib')] + libs = [name, name.replace('ncurses', 'panel')] +else: + include_dirs = [] + library_dirs = [] + libs = [find_library(['ncursesw', 'ncurses']), + find_library(['panelw', 'panel']), + ] ffi = FFI() ffi.set_source("_curses_cffi", """ @@ -83,9 +95,10 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_library(['ncurses', 'ncursesw']), - find_library(['panel', 'panelw'])], - include_dirs=find_curses_include_dirs()) +""", libraries=libs, + library_dirs = library_dirs, + include_dirs=include_dirs, +) ffi.cdef(""" diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -60,12 +60,9 @@ Install build-time dependencies ------------------------------- (**Note**: for some hints on how to translate the Python interpreter under -Windows, see the `windows document`_ . For hints on how to cross-compile in -a chroot using scratchbox2, see the `arm document`_ in the -`RPython documentation`_) +Windows, see the `windows document`_ . .. _`windows document`: windows.html -.. _`arm document`: http://rpython.readthedocs.org/en/latest/arm.html .. _`RPython documentation`: http://rpython.readthedocs.org The host Python needs to have CFFI installed. If translating on PyPy, CFFI is @@ -88,9 +85,6 @@ pyexpat libexpat1 -_ssl - libssl - _vmprof libunwind (optional, loaded dynamically at runtime) @@ -104,6 +98,9 @@ sqlite3 libsqlite3 +_ssl, _hashlib + libssl + curses libncurses-dev (for PyPy2) libncursesw-dev (for PyPy3) @@ -115,11 +112,12 @@ tk-dev lzma (PyPy3 only) - liblzma + liblzma or libxz, version 5 and up -To run untranslated tests, you need the Boehm garbage collector libgc. +To run untranslated tests, you need the Boehm garbage collector libgc, version +7.4 and up -On recent Debian and Ubuntu (16.04 onwards), this is the command to install +On Debian and Ubuntu (16.04 onwards), this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config zlib1g-dev libbz2-dev \ @@ -127,18 +125,11 @@ tk-dev libgc-dev python-cffi \ liblzma-dev libncursesw5-dev # these two only needed on PyPy3 -On older Debian and Ubuntu (12.04-14.04):: - - apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev libgc-dev python-cffi \ - liblzma-dev libncursesw-dev # these two only needed on PyPy3 - On Fedora:: dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ sqlite-devel ncurses-devel expat-devel openssl-devel tk-devel \ - gdbm-devel python-cffi\ + gdbm-devel python-cffi gc-devel\ xz-devel # For lzma on PyPy3. On SLES11:: diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -18,6 +18,12 @@ building third party packages for python, so this release changes the ABI tag for PyPy. +Based on the great work done in `portable-pypy`_, the linux downloads we +provide are now built on top of the `manylinux2010`_ CentOS6 docker image. +The tarballs include the needed shared objects to run on any platform that +supports manylinux2010 wheels, which should include all supported versions of +debian- and RedHat-based distributions (including Ubuntu, CentOS, and Fedora). + The `CFFI`_ backend has been updated to version 1.13.1. We recommend using CFFI rather than c-extensions to interact with C. @@ -57,6 +63,8 @@ .. _`CFFI`: http://cffi.readthedocs.io .. _`cppyy`: https://cppyy.readthedocs.io .. _`available as wheels`: https://github.com/antocuni/pypy-wheels +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy +.. _`manylinux2010`: https://github.com/pypa/manylinux What is PyPy? ============= @@ -124,6 +132,7 @@ * Check for overflow in ctypes array creation * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) +* Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,6 +175,8 @@ it. (`issue 3096`_) * Remove incorrect clobbering of the ``locals`` after running ``exec()`` * Adds encoding, decoding codepages on win32 +* Remove socket error attributes from ``_ssl`` (`issue 3119`_) +* Add missing ``os.getgrouplist`` (part of `issue 2375`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -182,6 +193,7 @@ .. _`manylinux2010`: fix broken link .. _`macports pypy`: https://github.com/macports/macports-ports/blob/master/lang/pypy/files/darwin.py.diff +.. _`issue 2375`: https://bitbucket.com/pypy/pypy/issues/2375 .. _`issue 2389`: https://bitbucket.com/pypy/pypy/issues/2389 .. _`issue 2687`: https://bitbucket.com/pypy/pypy/issues/2687 .. _`issue 2970`: https://bitbucket.com/pypy/pypy/issues/2970 @@ -198,8 +210,10 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 +.. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,6 @@ .. this is a revision shortly after release-pypy-7.3.0 .. startrev: dbbbae99135f +.. branch: backport-decode_timeval_ns-py3.7 +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -5,15 +5,3 @@ .. this is the revision after release-pypy3.6-v7.3.0 .. startrev: 78b4d0a7cf2e - -.. branch: py3.6-asyncgen - -Fix asyncgen_hooks and refactor coroutine execution - -.. branch: py3.6-exc-info - -Follow CPython's use of exc_info more closely (issue 3096) - -.. branch: code_page-utf8 - -Add encoding, decoding of codepages on windows diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -155,7 +155,7 @@ the `get_externals.py` utility to checkout the proper branch for your platform and PyPy version. -.. _subrepository: https://bitbucket.org/pypy/external +.. _subrepository: https://bitbucket.org/pypy/externals Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -734,7 +734,15 @@ filename = sys.argv[0] mainmodule.__file__ = filename mainmodule.__cached__ = None - if not isolated: + for hook in sys.path_hooks: + try: + importer = hook(filename) + break + except ImportError: + continue + else: + importer = None + if importer is None and not isolated: sys.path.insert(0, sys.pypy_resolvedirof(filename)) # assume it's a pyc file only if its name says so. # CPython goes to great lengths to detect other cases @@ -770,18 +778,13 @@ args = (execfile, filename, mainmodule.__dict__) else: filename = sys.argv[0] - for hook in sys.path_hooks: - try: - importer = hook(filename) - except ImportError: - continue + if importer is not None: # It's the name of a directory or a zip file. # put the filename in sys.path[0] and import # the module __main__ import runpy sys.path.insert(0, filename) args = (runpy._run_module_as_main, '__main__', False) - break else: # That's the normal path, "pypy3 stuff.py". # We don't actually load via SourceFileLoader diff --git a/pypy/interpreter/test/apptest_coroutine.py b/pypy/interpreter/test/apptest_coroutine.py --- a/pypy/interpreter/test/apptest_coroutine.py +++ b/pypy/interpreter/test/apptest_coroutine.py @@ -699,6 +699,72 @@ assert run_async(run()) == ([], (1,)) +# Helpers for test_async_gen_exception_11() below +def sync_iterate(g): + res = [] + while True: + try: + res.append(g.__next__()) + except StopIteration: + res.append('STOP') + break + except Exception as ex: + res.append(str(type(ex))) + return res + +def async_iterate(g): + res = [] + while True: + try: + g.__anext__().__next__() + except StopAsyncIteration: + res.append('STOP') + break + except StopIteration as ex: + if ex.args: + res.append(ex.args[0]) + else: + res.append('EMPTY StopIteration') + break + except Exception as ex: + res.append(str(type(ex))) + return res + + +def test_async_gen_exception_11(): + # bpo-33786 + def sync_gen(): + yield 10 + yield 20 + + def sync_gen_wrapper(): + yield 1 + sg = sync_gen() + sg.send(None) + try: + sg.throw(GeneratorExit()) + except GeneratorExit: + yield 2 + yield 3 + + async def async_gen(): + yield 10 + yield 20 + + async def async_gen_wrapper(): + yield 1 + asg = async_gen() + await asg.asend(None) + try: + await asg.athrow(GeneratorExit()) + except GeneratorExit: + yield 2 + yield 3 + + sync_gen_result = sync_iterate(sync_gen_wrapper()) + async_gen_result = async_iterate(async_gen_wrapper()) + assert sync_gen_result == async_gen_result + def test_asyncgen_yield_stopiteration(): async def foo(): yield 1 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/apptest_generator.py copy from pypy/interpreter/test/test_generator.py copy to pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,989 +1,857 @@ -class AppTestGenerator: +from pytest import raises, skip - def test_generator(self): - def f(): +def test_generator(): + def f(): + yield 1 + assert next(f()) == 1 + +def test_generator2(): + def f(): + yield 1 + g = f() + assert next(g) == 1 + with raises(StopIteration): + next(g) + +def test_attributes(): + def f(): + yield 1 + assert g.gi_running + g = f() + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + assert g.gi_frame is not None + assert not g.gi_running + next(g) + assert not g.gi_running + with raises(StopIteration): + next(g) + assert not g.gi_running + assert g.gi_frame is None + assert g.gi_code is f.__code__ + assert g.__name__ == 'f' + +def test_generator3(): + def f(): + yield 1 + g = f() + assert list(g) == [1] + +def test_generator4(): + def f(): + yield 1 + g = f() + assert [x for x in g] == [1] + +def test_generator5(): + def f(): + v = (yield) + yield v + g = f() + next(g) + assert g.send(42) == 42 + +def test_throw1(): + def f(): + yield 2 + g = f() + # two arguments version + with raises(NameError): + g.throw(NameError, "Error") + +def test_throw2(): + def f(): + yield 2 + g = f() + # single argument version + with raises(NameError): + g.throw(NameError("Error")) + +def test_throw3(): + def f(): + try: yield 1 - assert next(f()) == 1 + yield 2 + except NameError: + yield 3 + g = f() + assert next(g) == 1 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + next(g) - def test_generator2(self): - def f(): +def test_throw4(): + def f(): + try: yield 1 - g = f() - assert next(g) == 1 - raises(StopIteration, next, g) + v = (yield 2) + except NameError: + yield 3 + g = f() + assert next(g) == 1 + assert next(g) == 2 + assert g.throw(NameError("Error")) == 3 + with raises(StopIteration): + next(g) - def test_attributes(self): - def f(): +def test_throw5(): + def f(): + try: yield 1 - assert g.gi_running - g = f() - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' - assert g.gi_frame is not None - assert not g.gi_running + except Exception: + x = 3 + try: + yield x + except Exception: + pass + g = f() + next(g) + # String exceptions are not allowed anymore + with raises(TypeError): + g.throw("Error") + assert g.throw(Exception) == 3 + with raises(StopIteration): + g.throw(Exception) + +def test_throw6(): + def f(): + yield 2 + g = f() + with raises(NameError): + g.throw(NameError, "Error", None) + + +def test_throw_fail(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), "error") + +def test_throw_fail2(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(list()) + +def test_throw_fail3(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.throw(NameError("Error"), None, "not tb object") + +def test_throw_finishes_generator(): + def f(): + yield 1 + g = f() + assert g.gi_frame is not None + with raises(ValueError): + g.throw(ValueError) + assert g.gi_frame is None + +def test_throw_bug(): + def f(): + try: + x.throw(IndexError) # => "generator already executing" + except ValueError: + yield 1 + x = f() + res = list(x) + assert res == [1] + +def test_throw_on_finished_generator(): + def f(): + yield 1 + g = f() + res = next(g) + assert res == 1 + with raises(StopIteration): next(g) - assert not g.gi_running - raises(StopIteration, next, g) - assert not g.gi_running - assert g.gi_frame is None - assert g.gi_code is f.__code__ - assert g.__name__ == 'f' + with raises(NameError): + g.throw(NameError) - def test_generator3(self): - def f(): +def test_throw_tb(): + def f(): + try: + yield + except ZeroDivisionError: + raise + g = f() + try: + 1 / 0 + except ZeroDivisionError as v: + try: + g.throw(v) + except Exception as w: + tb = w.__traceback__ + levels = 0 + while tb: + levels += 1 + tb = tb.tb_next + assert levels == 3 + +def test_throw_context(): + # gen.throw(exc) must not modify exc.__context__ + def gen(): + try: + yield + except Exception: + raise ValueError + + try: + raise KeyError + except KeyError: + g = gen() + next(g) + exc1 = Exception(1) + exc2 = Exception(2) + exc2.__context__ = exc1 + try: + g.throw(exc2) + except ValueError: + assert exc2.__context__ is exc1 + +def test_close(): + def f(): + yield 1 + g = f() + assert g.close() is None + +def test_close2(): + def f(): + try: yield 1 - g = f() - assert list(g) == [1] + except GeneratorExit: + raise StopIteration + g = f() + next(g) + assert g.close() is None - def test_generator4(self): - def f(): +def test_close3(): + def f(): + try: yield 1 - g = f() - assert [x for x in g] == [1] + except GeneratorExit: + raise NameError + g = f() + next(g) + with raises(NameError): + g.close() - def test_generator5(self): - d = {} - exec("""if 1: - def f(): - v = (yield ) - yield v - g = f() +def test_close_fail(): + def f(): + try: + yield 1 + except GeneratorExit: + yield 2 + g = f() + next(g) + with raises(RuntimeError): + g.close() + +def test_close_on_collect(): + import gc + def f(): + try: + yield + finally: + f.x = 42 + g = f() + next(g) + del g + gc.collect() + assert f.x == 42 + +def test_generator_raises_typeerror(): + def f(): + yield 1 + g = f() + with raises(TypeError): + g.send() # one argument required + with raises(TypeError): + g.send(1) # not started, must send None + +def test_generator_explicit_stopiteration(): + def f(): + yield 1 + raise StopIteration + g = f() + assert [x for x in g] == [1] + +def test_generator_propagate_stopiteration(): + def f(): + it = iter([1]) + while 1: + yield next(it) + g = f() + assert [x for x in g] == [1] + +def test_generator_restart(): + def g(): + i = next(me) + yield i + me = g() + with raises(ValueError): + next(me) + +def test_generator_expression(): + d = {} + exec("res = sum(i*i for i in range(5))", d, d) + assert d['res'] == 30 + +def test_generator_expression_2(): + def f(): + total = sum(i for i in [x for x in z]) + return total + z = [1, 2, 7] + assert f() == 10 + +def test_repr(): + def myFunc(): + yield 1 + g = myFunc() + r = repr(g) + assert r.startswith(".myFunc at 0x") + assert list(g) == [1] + assert repr(g) == r + +def test_unpackiterable_gen(): + g = (i * i for i in range(-5, 3)) + assert set(g) == set([0, 1, 4, 9, 16, 25]) + assert set(g) == set() + assert set(i for i in range(0)) == set() + +def test_explicit_stop_iteration_unpackiterable(): + def f(): + yield 1 + raise StopIteration + assert tuple(f()) == (1,) + +def test_exception_is_cleared_by_yield(): + def f(): + try: + foobar + except NameError: + yield 5 + raise + gen = f() + next(gen) # --> 5 + try: + next(gen) + except NameError: + pass + +def test_yield_return(): + def f(): + yield 1 + return 2 + g = f() + assert next(g) == 1 + try: next(g) - """, d, d) - g = d['g'] - assert g.send(42) == 42 + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' - def test_throw1(self): - def f(): - yield 2 - g = f() - # two arguments version - raises(NameError, g.throw, NameError, "Error") +def test_yield_from_basic(): + def f1(): + yield from [] + yield from [1, 2, 3] + yield from f2() + def f2(): + yield 4 + yield 5 + gen = f1() + assert next(gen) == 1 + assert next(gen) == 2 + assert next(gen) == 3 + assert next(gen) == 4 + assert next(gen) == 5 + assert list(gen) == [] - def test_throw2(self): - def f(): - yield 2 - g = f() - # single argument version - raises(NameError, g.throw, NameError("Error")) +def test_yield_from_return(): + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return 2 + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' - def test_throw3(self): - def f(): - try: - yield 1 - yield 2 - except: - yield 3 - g = f() - assert next(g) == 1 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, next, g) +def test_yield_from_return_tuple(): + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return (1, 2) + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == (1, 2) + else: + assert False, 'Expected StopIteration' - def test_throw4(self): - d = {} - exec("""if 1: - def f(): - try: - yield 1 - v = (yield 2) - except: - yield 3 - g = f() - """, d, d) - g = d['g'] - assert next(g) == 1 - assert next(g) == 2 - assert g.throw(NameError("Error")) == 3 - raises(StopIteration, next, g) +def test_set_name_qualname(): + class A: + def f(self): + yield 5 + g = A().f() + assert g.__name__ == "f" + assert g.__qualname__ == "test_set_name_qualname..A.f" + g.__name__ = "h.i" + g.__qualname__ = "j.k" + assert g.__name__ == "h.i" + assert g.__qualname__ == "j.k" + with raises(TypeError): + g.__name__ = 42 + with raises(TypeError): + g.__qualname__ = 42 + with raises((TypeError, AttributeError)): + del g.__name__ + with raises((TypeError, AttributeError)): + del g.__qualname__ - def test_throw5(self): - def f(): - try: - yield 1 - except: - x = 3 - try: - yield x - except: - pass - g = f() - next(g) - # String exceptions are not allowed anymore - raises(TypeError, g.throw, "Error") - assert g.throw(Exception) == 3 - raises(StopIteration, g.throw, Exception) +def test_gi_yieldfrom(): + def g(x): + assert gen.gi_yieldfrom is None + yield x + assert gen.gi_yieldfrom is None + def f(x): + assert gen.gi_yieldfrom is None + yield from g(x) + assert gen.gi_yieldfrom is None + yield 42 + assert gen.gi_yieldfrom is None + gen = f(5) + assert gen.gi_yieldfrom is None + assert next(gen) == 5 + assert gen.gi_yieldfrom.__name__ == 'g' + assert next(gen) == 42 + assert gen.gi_yieldfrom is None - def test_throw6(self): - def f(): - yield 2 - g = f() - raises(NameError, g.throw, NameError, "Error", None) +def test_gi_running_in_throw_generatorexit(): + # We must force gi_running to be True on the outer generators + # when running an inner custom close() method. + class A: + def __iter__(self): + return self + def __next__(self): + return 42 + def close(self): + closed.append(gen.gi_running) + def g(): + yield from A() + gen = g() + assert next(gen) == 42 + closed = [] + with raises(GeneratorExit): + gen.throw(GeneratorExit) + assert closed == [True] - - def test_throw_fail(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, NameError("Error"), "error") - - def test_throw_fail2(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, list()) - - def test_throw_fail3(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.throw, NameError("Error"), None, "not tb object") - - def test_throw_finishes_generator(self): - def f(): - yield 1 - g = f() - assert g.gi_frame is not None - raises(ValueError, g.throw, ValueError) - assert g.gi_frame is None - - def test_throw_bug(self): - def f(): - try: - x.throw(IndexError) # => "generator already executing" - except ValueError: - yield 1 - x = f() - res = list(x) - assert res == [1] - - def test_throw_on_finished_generator(self): - def f(): - yield 1 - g = f() - res = next(g) - assert res == 1 - raises(StopIteration, next, g) - raises(NameError, g.throw, NameError) - - def test_throw_tb(self): - def f(): - try: - yield - except: - raise - g = f() - try: - 1/0 - except ZeroDivisionError as v: - try: - g.throw(v) - except Exception as w: - tb = w.__traceback__ - levels = 0 - while tb: - levels += 1 - tb = tb.tb_next - assert levels == 3 - - def test_throw_context(self): - # gen.throw(exc) must not modify exc.__context__ - def gen(): - try: - yield - except: - raise ValueError - - try: - raise KeyError - except KeyError: - g = gen() - next(g) - exc1 = Exception(1) - exc2 = Exception(2) - exc2.__context__ = exc1 - try: - g.throw(exc2) - except ValueError: - assert exc2.__context__ is exc1 - - def test_close(self): - def f(): - yield 1 - g = f() - assert g.close() is None - - def test_close2(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise StopIteration - g = f() - next(g) - assert g.close() is None - - def test_close3(self): - def f(): - try: - yield 1 - except GeneratorExit: - raise NameError - g = f() - next(g) - raises(NameError, g.close) - - def test_close_fail(self): - def f(): - try: - yield 1 - except GeneratorExit: - yield 2 - g = f() - next(g) - raises(RuntimeError, g.close) - - def test_close_on_collect(self): - def f(): - try: - yield - finally: - f.x = 42 - g = f() - next(g) - del g - import gc - gc.collect() - assert f.x == 42 - - def test_generator_raises_typeerror(self): - def f(): - yield 1 - g = f() - raises(TypeError, g.send) # one argument required - raises(TypeError, g.send, 1) # not started, must send None - - def test_generator_explicit_stopiteration(self): - def f(): - yield 1 - raise StopIteration - g = f() - assert [x for x in g] == [1] - - def test_generator_propagate_stopiteration(self): - def f(): - it = iter([1]) - while 1: yield next(it) - g = f() - assert [x for x in g] == [1] - - def test_generator_restart(self): - def g(): - i = next(me) - yield i - me = g() - raises(ValueError, next, me) - - def test_generator_expression(self): - d = {} - exec("res = sum(i*i for i in range(5))", d, d) - assert d['res'] == 30 - - def test_generator_expression_2(self): - d = {} - exec(""" -def f(): - total = sum(i for i in [x for x in z]) - return total -z = [1, 2, 7] -res = f() -""", d, d) - assert d['res'] == 10 - - def test_repr(self): - def myFunc(): - yield 1 - g = myFunc() - r = repr(g) - assert r.startswith(".myFunc at 0x") - assert list(g) == [1] - assert repr(g) == r - - def test_unpackiterable_gen(self): - g = (i*i for i in range(-5, 3)) - assert set(g) == set([0, 1, 4, 9, 16, 25]) - assert set(g) == set() - assert set(i for i in range(0)) == set() - - def test_explicit_stop_iteration_unpackiterable(self): - def f(): - yield 1 - raise StopIteration - assert tuple(f()) == (1,) - - def test_exception_is_cleared_by_yield(self): - def f(): - try: - foobar - except NameError: - yield 5 - raise - gen = f() - next(gen) # --> 5 - try: - next(gen) - except NameError: - pass - - def test_yield_return(self): - """ - def f(): - yield 1 - return 2 - g = f() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == 2 - else: - assert False, 'Expected StopIteration' - """ - - def test_yield_from_basic(self): - """ - def f1(): - yield from [] - yield from [1, 2, 3] - yield from f2() - def f2(): - yield 4 - yield 5 - gen = f1() - assert next(gen) == 1 - assert next(gen) == 2 - assert next(gen) == 3 - assert next(gen) == 4 - assert next(gen) == 5 - assert list(gen) == [] - """ - - def test_yield_from_return(self): - """ - def f1(): - result = yield from f2() - return result - def f2(): - yield 1 - return 2 - g = f1() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == 2 - else: - assert False, 'Expected StopIteration' - """ - - def test_yield_from_return_tuple(self): - """ - def f1(): - result = yield from f2() - return result - def f2(): - yield 1 - return (1, 2) - g = f1() - assert next(g) == 1 - try: - next(g) - except StopIteration as e: - assert e.value == (1, 2) - else: - assert False, 'Expected StopIteration' - """ - - def test_set_name_qualname(self): - class A: - def f(self): - yield 5 - g = A().f() - assert g.__name__ == "f" - assert g.__qualname__ == "test_set_name_qualname..A.f" - g.__name__ = "h.i" - g.__qualname__ = "j.k" - assert g.__name__ == "h.i" - assert g.__qualname__ == "j.k" - raises(TypeError, "g.__name__ = 42") - raises(TypeError, "g.__qualname__ = 42") - raises((TypeError, AttributeError), "del g.__name__") - raises((TypeError, AttributeError), "del g.__qualname__") - - def test_gi_yieldfrom(self): """ - def g(x): - assert gen.gi_yieldfrom is None - yield x - assert gen.gi_yieldfrom is None - def f(x): - assert gen.gi_yieldfrom is None - yield from g(x) - assert gen.gi_yieldfrom is None - yield 42 - assert gen.gi_yieldfrom is None - gen = f(5) - assert gen.gi_yieldfrom is None - assert next(gen) == 5 - assert gen.gi_yieldfrom.__name__ == 'g' - assert next(gen) == 42 - assert gen.gi_yieldfrom is None - """ - - def test_gi_running_in_throw_generatorexit(self): """ - # We must force gi_running to be True on the outer generators - # when running an inner custom close() method. - class A: - def __iter__(self): - return self - def __next__(self): - return 42 - def close(self): - closed.append(gen.gi_running) - def g(): - yield from A() - gen = g() - assert next(gen) == 42 - closed = [] - raises(GeneratorExit, gen.throw, GeneratorExit) - assert closed == [True] - """ - - def test_exc_info_in_generator(self): - import sys - def g(): - try: - raise ValueError - except ValueError: - yield sys.exc_info()[0] - yield sys.exc_info()[0] - try: - raise IndexError - except IndexError: - gen = g() - assert sys.exc_info()[0] is IndexError - assert next(gen) is ValueError - assert sys.exc_info()[0] is IndexError - assert next(gen) is ValueError - assert sys.exc_info()[0] is IndexError - raises(StopIteration, next, gen) - assert sys.exc_info()[0] is IndexError - - def test_exc_info_in_generator_2(self): - import sys - def g(): - yield sys.exc_info()[0] - try: - raise LookupError - except LookupError: - yield sys.exc_info()[0] - yield sys.exc_info()[0] - try: - raise IndexError - except IndexError: - gen = g() # the IndexError is not captured at all +def test_exc_info_in_generator(): + import sys + def g(): try: raise ValueError except ValueError: - assert next(gen) is ValueError - assert next(gen) is LookupError - assert next(gen) is ValueError + yield sys.exc_info()[0] + yield sys.exc_info()[0] + try: + raise IndexError + except IndexError: + gen = g() + assert sys.exc_info()[0] is IndexError + assert next(gen) is ValueError + assert sys.exc_info()[0] is IndexError + assert next(gen) is ValueError + assert sys.exc_info()[0] is IndexError + with raises(StopIteration): + next(gen) + assert sys.exc_info()[0] is IndexError - def test_exc_info_in_generator_3(self): - import sys - def g(): +def test_exc_info_in_generator_2(): + import sys + def g(): + yield sys.exc_info()[0] + try: + raise LookupError + except LookupError: yield sys.exc_info()[0] - yield sys.exc_info()[0] - yield sys.exc_info()[0] - gen = g() - try: - raise IndexError - except IndexError: - assert next(gen) is IndexError - assert next(gen) is None + yield sys.exc_info()[0] + try: + raise IndexError + except IndexError: + gen = g() # the IndexError is not captured at all + try: + raise ValueError + except ValueError: + assert next(gen) is ValueError + assert next(gen) is LookupError + assert next(gen) is ValueError + +def test_exc_info_in_generator_3(): + import sys + def g(): + yield sys.exc_info()[0] + yield sys.exc_info()[0] + yield sys.exc_info()[0] + gen = g() + try: + raise IndexError + except IndexError: + assert next(gen) is IndexError + assert next(gen) is None + try: + raise ValueError + except ValueError: + assert next(gen) is ValueError + +def test_exc_info_in_generator_4(): + skip("buggy behavior, both in CPython and in PyPy") + import sys + def g(): try: raise ValueError except ValueError: - assert next(gen) is ValueError + yield 1 + assert sys.exc_info() == (None, None, None) + yield 2 + gen = g() + try: + raise IndexError + except IndexError: + assert next(gen) is 1 + assert next(gen) is 2 - def test_exc_info_in_generator_4(self): - skip("buggy behavior, both in CPython and in PyPy") - import sys - def g(): - try: - raise ValueError - except ValueError: - yield 1 - assert sys.exc_info() == (None, None, None) - yield 2 - gen = g() +def test_multiple_invalid_sends(): + def mygen(): + yield 42 + g = mygen() + with raises(TypeError): + g.send(2) + with raises(TypeError): + g.send(2) + +def test_delegating_close(): + """ + Test delegating 'close' + """ + trace = [] + def g1(): try: - raise IndexError - except IndexError: - assert next(gen) is 1 - assert next(gen) is 2 + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + g.close() + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1" + ] - def test_multiple_invalid_sends(self): - def mygen(): - yield 42 - g = mygen() - raises(TypeError, g.send, 2) - raises(TypeError, g.send, 2) +def test_handing_exception_while_delegating_close(): + """ + Test handling exception while delegating 'close' + """ + trace = [] + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + raise ValueError("nybbles have exploded with delight") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + with raises(ValueError) as excinfo: + g.close() + assert excinfo.value.args[0] == "nybbles have exploded with delight" + assert isinstance(excinfo.value.__context__, GeneratorExit) + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1", + ] +def test_delegating_throw(): + """ + Test delegating 'throw' + """ + trace = [] + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + finally: + trace.append("Finishing g2") + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + e = ValueError("tomato ejected") + with raises(ValueError) as excinfo: + g.throw(e) + assert excinfo.value.args[0] == "tomato ejected" + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Finishing g2", + "Finishing g1", + ] -class AppTestAsyncGenerator(object): +def test_delegating_throw_to_non_generator(): + """ + Test delegating 'throw' to non-generator + """ + trace = [] + def g(): + try: + trace.append("Starting g") + yield from range(10) + finally: + trace.append("Finishing g") + gi = g() + for i in range(5): + x = next(gi) + trace.append("Yielded %s" % (x,)) + with raises(ValueError) as excinfo: + gi.throw(ValueError("tomato ejected")) + assert excinfo.value.args[0] == "tomato ejected" + assert trace == [ + "Starting g", + "Yielded 0", + "Yielded 1", + "Yielded 2", + "Yielded 3", + "Yielded 4", + "Finishing g", + ] - def test_async_gen_exception_11(self): - """ - # bpo-33786 - def compare_generators(sync_gen, async_gen): - def sync_iterate(g): - res = [] - while True: - try: - res.append(g.__next__()) - except StopIteration: - res.append('STOP') - break - except Exception as ex: - res.append(str(type(ex))) - return res +def test_broken_getattr_handling(): + """ + Test subiterator with a broken getattr implementation + """ + import _io, sys + class Broken: + def __iter__(self): + return self + def __next__(self): + return 1 + def __getattr__(self, attr): + 1 / 0 - def async_iterate(g): - res = [] - while True: - an = g.__anext__() - try: - while True: - try: - an.__next__() - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - break - else: - res.append('EMPTY StopIteration') - break - except StopAsyncIteration: - raise - except Exception as ex: - res.append(str(type(ex))) - break - except StopAsyncIteration: - res.append('STOP') - break - return res + def g(): + yield from Broken() - def async_iterate(g): - res = [] - while True: - try: - g.__anext__().__next__() - except StopAsyncIteration: - res.append('STOP') - break - except StopIteration as ex: - if ex.args: - res.append(ex.args[0]) - else: - res.append('EMPTY StopIteration') - break - except Exception as ex: - res.append(str(type(ex))) - return res + gi = g() + assert next(gi) == 1 + with raises(ZeroDivisionError): + gi.send(1) - sync_gen_result = sync_iterate(sync_gen) - async_gen_result = async_iterate(async_gen) - assert sync_gen_result == async_gen_result, "%s != %s" % (str(sync_gen_result), str(async_gen_result)) - return async_gen_result + gi = g() + assert next(gi) == 1 + with raises(ZeroDivisionError): + gi.throw(RuntimeError) - def sync_gen(): - yield 10 - yield 20 + gi = g() + assert next(gi) == 1 + sys.stderr = _io.StringIO() + gi.close() + assert 'ZeroDivisionError' in sys.stderr.getvalue() - def sync_gen_wrapper(): +def test_returning_value_from_delegated_throw(): + """ + Test returning value from delegated 'throw' + """ + trace = [] + class LunchError(Exception): + pass + def g1(): + try: + trace.append("Starting g1") + yield "g1 ham" + yield from g2() + yield "g1 eggs" + finally: + trace.append("Finishing g1") + def g2(): + try: + trace.append("Starting g2") + yield "g2 spam" + yield "g2 more spam" + except LunchError: + trace.append("Caught LunchError in g2") + yield "g2 lunch saved" + yield "g2 yet more spam" + g = g1() + for i in range(2): + x = next(g) + trace.append("Yielded %s" % (x,)) + e = LunchError("tomato ejected") + g.throw(e) + for x in g: + trace.append("Yielded %s" % (x,)) + assert trace == [ + "Starting g1", + "Yielded g1 ham", + "Starting g2", + "Yielded g2 spam", + "Caught LunchError in g2", + "Yielded g2 yet more spam", + "Yielded g1 eggs", + "Finishing g1", + ] + +def test_catching_exception_from_subgen_and_returning(): + """ + Test catching an exception thrown into a + subgenerator and returning a value + """ + trace = [] + def inner(): + try: yield 1 - sg = sync_gen() - sg.send(None) - try: - sg.throw(GeneratorExit()) - except GeneratorExit: - yield 2 - yield 3 + except ValueError: + trace.append("inner caught ValueError") + return 2 - async def async_gen(): - yield 10 - yield 20 + def outer(): + v = yield from inner() + trace.append("inner returned %r to outer" % v) + yield v + g = outer() + trace.append(next(g)) + trace.append(g.throw(ValueError)) + assert trace == [ + 1, + "inner caught ValueError", + "inner returned 2 to outer", + 2, + ] - async def async_gen_wrapper(): - yield 1 - asg = async_gen() - await asg.asend(None) - try: - await asg.athrow(GeneratorExit()) - except GeneratorExit: - yield 2 - yield 3 +def test_exception_context(): + import operator + def f(): + try: + raise ValueError + except ValueError: + yield from map(operator.truediv, [2, 3], [4, 0]) + gen = f() + assert next(gen) == 0.5 + try: + next(gen) + except ZeroDivisionError as e: + assert e.__context__ is not None + assert isinstance(e.__context__, ValueError) + else: + assert False, "should have raised" - compare_generators(sync_gen_wrapper(), async_gen_wrapper()) - """ +def test_past_generator_stop(): + # how it works without 'from __future__' import generator_stop + def f(x): + raise StopIteration + yield x + with raises(StopIteration): + next(f(5)) -def test_should_not_inline(space): - from pypy.interpreter.generator import should_not_inline - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - return g.__code__ - ''') - assert should_not_inline(w_co) == False - w_co = space.appexec([], '''(): - def g(x): - yield x + 5 - yield x + 6 - return g.__code__ - ''') - assert should_not_inline(w_co) == True - -class AppTestYieldFrom: - def test_delegating_close(self): - """ - Test delegating 'close' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - g.close() - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1" - ] - - def test_handing_exception_while_delegating_close(self): - """ - Test handling exception while delegating 'close' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - raise ValueError("nybbles have exploded with delight") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - exc = raises(ValueError, g.close) - assert exc.value.args[0] == "nybbles have exploded with delight" - assert isinstance(exc.value.__context__, GeneratorExit) - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1", - ] - - def test_delegating_throw(self): - """ - Test delegating 'throw' - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" - finally: - trace.append("Finishing g2") - ''', d) - g1, g2 = d['g1'], d['g2'] - g = g1() - for i in range(2): - x = next(g) - trace.append("Yielded %s" % (x,)) - e = ValueError("tomato ejected") - exc = raises(ValueError, g.throw, e) - assert exc.value.args[0] == "tomato ejected" - assert trace == [ - "Starting g1", - "Yielded g1 ham", - "Starting g2", - "Yielded g2 spam", - "Finishing g2", - "Finishing g1", - ] - - def test_delegating_throw_to_non_generator(self): - """ - Test delegating 'throw' to non-generator - """ - trace = [] - d = dict(trace=trace) - exec('''if 1: - def g(): - try: - trace.append("Starting g") - yield from range(10) - finally: - trace.append("Finishing g") - ''', d) - g = d['g'] - gi = g() - for i in range(5): - x = next(gi) - trace.append("Yielded %s" % (x,)) - exc = raises(ValueError, gi.throw, ValueError("tomato ejected")) - assert exc.value.args[0] == "tomato ejected" - assert trace == [ - "Starting g", - "Yielded 0", - "Yielded 1", - "Yielded 2", - "Yielded 3", - "Yielded 4", - "Finishing g", - ] - - def test_broken_getattr_handling(self): - """ - Test subiterator with a broken getattr implementation - """ - class Broken: - def __iter__(self): - return self - def __next__(self): - return 1 - def __getattr__(self, attr): - 1/0 - - d = dict(Broken=Broken) - exec('''if 1: - def g(): - yield from Broken() - ''', d) - g = d['g'] - - gi = g() - assert next(gi) == 1 - raises(ZeroDivisionError, gi.send, 1) - - gi = g() - assert next(gi) == 1 - raises(ZeroDivisionError, gi.throw, RuntimeError) - - gi = g() - assert next(gi) == 1 - import io, sys - sys.stderr = io.StringIO() - gi.close() - assert 'ZeroDivisionError' in sys.stderr.getvalue() - - def test_returning_value_from_delegated_throw(self): - """ - Test returning value from delegated 'throw' - """ - trace = [] - class LunchError(Exception): - pass - d = dict(trace=trace, LunchError=LunchError) - exec('''if 1: - def g1(): - try: - trace.append("Starting g1") - yield "g1 ham" - yield from g2() - yield "g1 eggs" - finally: - trace.append("Finishing g1") - def g2(): - try: - trace.append("Starting g2") - yield "g2 spam" - yield "g2 more spam" From pypy.commits at gmail.com Sun Dec 8 02:16:16 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 07 Dec 2019 23:16:16 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: fix bad merge Message-ID: <5deca340.1c69fb81.4aece.410a@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98256:23a8a6861343 Date: 2019-12-08 09:15 +0200 http://bitbucket.org/pypy/pypy/changeset/23a8a6861343/ Log: fix bad merge diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -93,16 +93,6 @@ PyErr_BadInternalCall(space) return w_dict.getitem(w_key) - at cpython_api([PyObject, PyObject], PyObject, result_borrowed=True) -def PyDict_GetItemWithError(space, w_dict, w_key): - """Variant of PyDict_GetItem() that does not suppress - exceptions. Return NULL with an exception set if an exception - occurred. Return NULL without an exception set if the key - wasn't present.""" - if not isinstance(w_dict, W_DictMultiObject): - PyErr_BadInternalCall(space) - return w_dict.getitem(w_key) - @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_SetItem(space, w_dict, w_key, w_obj): if not isinstance(w_dict, W_DictMultiObject): From pypy.commits at gmail.com Sun Dec 8 09:35:19 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 08 Dec 2019 06:35:19 -0800 (PST) Subject: [pypy-commit] pypy default: this is the wrong abstraction, the archive name needs to match for uploading Message-ID: <5ded0a27.1c69fb81.73764.b358@mx.google.com> Author: Matti Picus Branch: Changeset: r98257:9decd6d9ff68 Date: 2019-12-08 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/9decd6d9ff68/ Log: this is the wrong abstraction, the archive name needs to match for uploading diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,8 +67,6 @@ name = options.name if not name: name = 'pypy-nightly' - if options.make_portable and 'portable' not in name: - name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Sun Dec 8 09:35:21 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 08 Dec 2019 06:35:21 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5ded0a29.1c69fb81.9242c.ff4e@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98258:73547ce83824 Date: 2019-12-08 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/73547ce83824/ Log: merge default into py3.6 diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -64,8 +64,6 @@ name = options.name if not name: name = 'pypy-nightly' - if options.make_portable and 'portable' not in name: - name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Sun Dec 8 09:35:23 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 08 Dec 2019 06:35:23 -0800 (PST) Subject: [pypy-commit] pypy release-pypy2.7-v7.x: merge default into branch Message-ID: <5ded0a2b.1c69fb81.d5ef9.2360@mx.google.com> Author: Matti Picus Branch: release-pypy2.7-v7.x Changeset: r98259:e6471221abc1 Date: 2019-12-08 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/e6471221abc1/ Log: merge default into branch diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -67,8 +67,6 @@ name = options.name if not name: name = 'pypy-nightly' - if options.make_portable and 'portable' not in name: - name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Sun Dec 8 09:35:25 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 08 Dec 2019 06:35:25 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release Message-ID: <5ded0a2d.1c69fb81.77bcd.46e9@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98260:d9820e22930d Date: 2019-12-08 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/d9820e22930d/ Log: merge py3.6 into release diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -64,8 +64,6 @@ name = options.name if not name: name = 'pypy-nightly' - if options.make_portable and 'portable' not in name: - name += '-portable' assert '/' not in name rename_pypy_c = options.pypy_c override_pypy_c = options.override_pypy_c From pypy.commits at gmail.com Mon Dec 9 08:22:07 2019 From: pypy.commits at gmail.com (arigo) Date: Mon, 09 Dec 2019 05:22:07 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Issue #3111 Message-ID: <5dee4a7f.1c69fb81.3e1ce.0276@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r98261:1a1d7d250c82 Date: 2019-12-09 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/1a1d7d250c82/ Log: Issue #3111 Back-port the tentative bpo-38091 from cpython: Import deadlock detection causes deadlock diff --git a/lib-python/3/importlib/_bootstrap.py b/lib-python/3/importlib/_bootstrap.py --- a/lib-python/3/importlib/_bootstrap.py +++ b/lib-python/3/importlib/_bootstrap.py @@ -67,6 +67,7 @@ # Deadlock avoidance for concurrent circular imports. me = _thread.get_ident() tid = self.owner + count = 0 while True: lock = _blocking_on.get(tid) if lock is None: @@ -74,6 +75,14 @@ tid = lock.owner if tid == me: return True + # workaround for https://bugs.python.org/issue38091: + # instead of looping here forever, eventually return False. + # Unsure if this will cause real deadlocks to go undetected, + # but at least it doesn't cause *this* logic here to + # deadlock when there is otherwise no deadlock! + count += 1 + if count >= 100: + return False def acquire(self): """ diff --git a/lib-python/3/test/test_import/__init__.py b/lib-python/3/test/test_import/__init__.py --- a/lib-python/3/test/test_import/__init__.py +++ b/lib-python/3/test/test_import/__init__.py @@ -366,16 +366,22 @@ os.does_not_exist def test_concurrency(self): + def delay_has_deadlock(frame, event, arg): + if event == 'call' and frame.f_code.co_name == 'has_deadlock': + time.sleep(0.05) + sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data')) try: exc = None def run(): + sys.settrace(delay_has_deadlock) event.wait() try: import package except BaseException as e: nonlocal exc exc = e + sys.settrace(None) for i in range(10): event = threading.Event() From pypy.commits at gmail.com Mon Dec 9 11:10:30 2019 From: pypy.commits at gmail.com (Stian Andreassen) Date: Mon, 09 Dec 2019 08:10:30 -0800 (PST) Subject: [pypy-commit] pypy py3.7-tracemalloc: Expose tracemalloc C API Message-ID: <5dee71f6.1c69fb81.83c78.3951@mx.google.com> Author: Stian Andreassen Branch: py3.7-tracemalloc Changeset: r98262:c31bb71da53e Date: 2019-12-09 17:09 +0100 http://bitbucket.org/pypy/pypy/changeset/c31bb71da53e/ Log: Expose tracemalloc C API diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -655,7 +655,7 @@ 'PyMem_RawMalloc', 'PyMem_RawCalloc', 'PyMem_RawRealloc', 'PyMem_RawFree', 'PyMem_Malloc', 'PyMem_Calloc', 'PyMem_Realloc', 'PyMem_Free', 'PyObject_CallFinalizerFromDealloc', - '_PyTraceMalloc_Track', '_PyTraceMalloc_Untrack', + 'PyTraceMalloc_Track', 'PyTraceMalloc_Untrack', 'PyBytes_FromFormat', 'PyBytes_FromFormatV', 'PyType_FromSpec', diff --git a/pypy/module/cpyext/include/pymem.h b/pypy/module/cpyext/include/pymem.h --- a/pypy/module/cpyext/include/pymem.h +++ b/pypy/module/cpyext/include/pymem.h @@ -61,9 +61,9 @@ #define PyMem_DEL PyMem_FREE -/* From CPython 3.6, with a different goal. _PyTraceMalloc_Track() +/* From CPython 3.6, with a different goal. PyTraceMalloc_Track() * is equivalent to __pypy__.add_memory_pressure(size); it works with - * or without the GIL. _PyTraceMalloc_Untrack() is an empty stub. + * or without the GIL. PyTraceMalloc_Untrack() is an empty stub. * You can check if these functions are available by using: * * #if defined(PYPY_TRACEMALLOC) || \ @@ -71,11 +71,9 @@ */ #define PYPY_TRACEMALLOC 1 -typedef unsigned int _PyTraceMalloc_domain_t; - -PyAPI_FUNC(int) _PyTraceMalloc_Track(_PyTraceMalloc_domain_t domain, +PyAPI_FUNC(int) PyTraceMalloc_Track(unsigned int domain, uintptr_t ptr, size_t size); -PyAPI_FUNC(int) _PyTraceMalloc_Untrack(_PyTraceMalloc_domain_t domain, +PyAPI_FUNC(int) PyTraceMalloc_Untrack(unsigned int domain, uintptr_t ptr); diff --git a/pypy/module/cpyext/src/pymem.c b/pypy/module/cpyext/src/pymem.c --- a/pypy/module/cpyext/src/pymem.c +++ b/pypy/module/cpyext/src/pymem.c @@ -94,7 +94,7 @@ free(ptr); } -int _PyTraceMalloc_Track(_PyTraceMalloc_domain_t domain, +int PyTraceMalloc_Track(unsigned int domain, uintptr_t ptr, size_t size) { /* to avoid acquiring/releasing the GIL too often, only do it @@ -133,7 +133,7 @@ /* Should we return -2 or 0? In theory it should be -2, because we're not using the info to really track the allocations. But I'm sure someone is too clever somewhere and stops calling - _PyTraceMalloc_Track() if it returns -2. On the other hand, + PyTraceMalloc_Track() if it returns -2. On the other hand, returning 0 might lead to expectations that importing 'tracemalloc' works on Python 3. Oh well, in that case we'll just crash with ImportError during 'import tracemalloc'. @@ -141,7 +141,7 @@ return 0; } -int _PyTraceMalloc_Untrack(_PyTraceMalloc_domain_t domain, +int PyTraceMalloc_Untrack(unsigned int domain, uintptr_t ptr) { /* nothing to do */ diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -391,7 +391,7 @@ module = self.import_extension('foo', [ ("foo", "METH_O", """ - _PyTraceMalloc_Track(0, 0, PyLong_AsLong(args) - sizeof(long)); + PyTraceMalloc_Track(0, 0, PyLong_AsLong(args) - sizeof(long)); Py_INCREF(Py_None); return Py_None; """)]) From pypy.commits at gmail.com Tue Dec 10 11:50:04 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 10 Dec 2019 08:50:04 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v7.3.0rc1 for changeset e6471221abc1 Message-ID: <5defccbc.1c69fb81.7632e.6e0e@mx.google.com> Author: Matti Picus Branch: Changeset: r98265:38dd5be2287d Date: 2019-12-10 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/38dd5be2287d/ Log: Added tag release-pypy2.7-v7.3.0rc1 for changeset e6471221abc1 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -57,3 +57,4 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0rc2 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 +e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 From pypy.commits at gmail.com Tue Dec 10 11:50:06 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 10 Dec 2019 08:50:06 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release Message-ID: <5defccbe.1c69fb81.513c0.6224@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98266:533398cfd64e Date: 2019-12-10 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/533398cfd64e/ Log: merge py3.6 into release diff --git a/lib-python/3/importlib/_bootstrap.py b/lib-python/3/importlib/_bootstrap.py --- a/lib-python/3/importlib/_bootstrap.py +++ b/lib-python/3/importlib/_bootstrap.py @@ -67,6 +67,7 @@ # Deadlock avoidance for concurrent circular imports. me = _thread.get_ident() tid = self.owner + count = 0 while True: lock = _blocking_on.get(tid) if lock is None: @@ -74,6 +75,14 @@ tid = lock.owner if tid == me: return True + # workaround for https://bugs.python.org/issue38091: + # instead of looping here forever, eventually return False. + # Unsure if this will cause real deadlocks to go undetected, + # but at least it doesn't cause *this* logic here to + # deadlock when there is otherwise no deadlock! + count += 1 + if count >= 100: + return False def acquire(self): """ diff --git a/lib-python/3/test/test_import/__init__.py b/lib-python/3/test/test_import/__init__.py --- a/lib-python/3/test/test_import/__init__.py +++ b/lib-python/3/test/test_import/__init__.py @@ -366,16 +366,22 @@ os.does_not_exist def test_concurrency(self): + def delay_has_deadlock(frame, event, arg): + if event == 'call' and frame.f_code.co_name == 'has_deadlock': + time.sleep(0.05) + sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data')) try: exc = None def run(): + sys.settrace(delay_has_deadlock) event.wait() try: import package except BaseException as e: nonlocal exc exc = e + sys.settrace(None) for i in range(10): event = threading.Event() From pypy.commits at gmail.com Tue Dec 10 11:50:08 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 10 Dec 2019 08:50:08 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy3.6-v7.3.0rc1 for changeset 533398cfd64e Message-ID: <5defccc0.1c69fb81.974ee.590e@mx.google.com> Author: Matti Picus Branch: Changeset: r98267:86951c9b1898 Date: 2019-12-10 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/86951c9b1898/ Log: Added tag release-pypy3.6-v7.3.0rc1 for changeset 533398cfd64e diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -58,3 +58,4 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 +533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 From pypy.commits at gmail.com Tue Dec 10 11:50:10 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 10 Dec 2019 08:50:10 -0800 (PST) Subject: [pypy-commit] pypy default: tweak release note and repackaging script Message-ID: <5defccc2.1c69fb81.3281e.92ee@mx.google.com> Author: Matti Picus Branch: Changeset: r98268:29db3c15c1cd Date: 2019-12-10 18:48 +0200 http://bitbucket.org/pypy/pypy/changeset/29db3c15c1cd/ Log: tweak release note and repackaging script diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -177,6 +177,8 @@ * Adds encoding, decoding codepages on win32 * Remove socket error attributes from ``_ssl`` (`issue 3119`_) * Add missing ``os.getgrouplist`` (part of `issue 2375`_) +* Back-port the tentative fix from cpython: "Import deadlock detection causes + deadlock" (part of `issue 3111`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -210,6 +212,7 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3111`: https://bitbucket.com/pypy/pypy/issues/3111 .. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,8 +2,8 @@ pmaj=2 # python main version: 2 or 3 pmin=7 # python minor version maj=7 -min=2 -rev=0rc2 +min=3 +rev=0rc1 case $pmaj in "2") exe=pypy;; From pypy.commits at gmail.com Wed Dec 11 03:41:11 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 11 Dec 2019 00:41:11 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: add pypy3.6 7.3.0rc1 hashes Message-ID: <5df0aba7.1c69fb81.990ab.6748@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r959:957dc5e9e470 Date: 2019-12-11 10:29 +0200 http://bitbucket.org/pypy/pypy.org/changeset/957dc5e9e470/ Log: add pypy3.6 7.3.0rc1 hashes diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -395,6 +395,17 @@ 51d8b0dee35c593072ac63dafcde3bf02deb0f0fe3c40585dc29a2d520882197 pypy2.7-v7.3.0rc1-src.zip 6d1f9766bca3445e7275494be7b72d196fd6f9de0c2bde0d509cfaad930bee67 pypy2.7-v7.3.0rc1-win32.zip +pypy3.6-7.3.0 sha256:: + + dc7c2a34920e13a2968f822291d1a85faec99f7c8708da15828ae3f4b142b284 pypy3.6-v7.3.0rc1-aarch64.tar.bz2 + 16d7ee8b6e031863fd958024d9d38dcb114484d4673db5f0ada60bedb5c2ed2c pypy3.6-v7.3.0rc1-linux32.tar.bz2 + 4b4d63d60746a8812a5a6524b5242425a41dbe2bcdb59435893d1212048a1f18 pypy3.6-v7.3.0rc1-linux64.tar.bz2 + 3ae4c7fa6c66c402d71f4e720969b0e89cf7550745cb585981ad5be6385758ca pypy3.6-v7.3.0rc1-osx64.tar.bz2 + df80be7d215aa967d40d26c59b825fff2c5ff2bd1764e1300a4dd15d5c1b482f pypy3.6-v7.3.0rc1-s390x.tar.bz2 + f386f658b53c5010a93b722d87a3e536fd61bff8da0a401515f8640082443530 pypy3.6-v7.3.0rc1-src.tar.bz2 + 0943983740ed0ed29c5097c5a95b1d269357377477d7e2a10e597c728a25485b pypy3.6-v7.3.0rc1-src.zip + a099a46c5efca7927cf304dd36b98e9c61b9da7657ce555275bb1b9e33f7a095 pypy3.6-v7.3.0rc1-win32.zip + pypy2.7-7.2.0 sha256:: 57b0be053c6a5f069e23b843f38863cf7920f5eef7bc89f2e086e5c3a28a2ba9 pypy2.7-v7.2.0-aarch64.tar.bz2 @@ -462,15 +473,3 @@ 4858e7e8a0007bc3b381bd392208b28d30889a4e5a88a3c28e3d9dc4f25b654e pypy3.6-v7.1.0-src.zip 77a0576a3d518210467f0df2d0d9a1892c664566dc02f25d974c2dbc6b4749e7 pypy3.6-v7.1.0-win32.zip -pypy2.7-7.0.0 sha256:: - - 446fc208dd77a0048368da830564e6e4180bcd786e524b5369c61785af5c903a pypy2.7-v7.0.0-linux32.tar.bz2 - 971b1909f9fe960c4c643a6940d3f8a60d9a7a2937119535ab0cfaf83498ecd7 pypy2.7-v7.0.0-linux64.tar.bz2 - e7ecb029d9c7a59388838fc4820a50a2f5bee6536010031060e3dfa882730dc8 pypy2.7-v7.0.0-osx64.tar.bz2 - 2ce390d93fa57ba912066a8b6439588bd9cf6aa9cef44d892b8e3e6dba64615e pypy2.7-v7.0.0-s390x.tar.bz2 - 04477a41194240cd71e485c3f41dec35a787d1b3bc030f9aa59e5e81bcf4118b pypy2.7-v7.0.0-win32.zip - 165ffdf49a04c3ebdc966f76e67dd1767ad699657215dd83ca6996ab8ed87f52 pypy2.7-v7.0.0-ppc64.tar.bz2 - cfb0e2e9b1434e94ea559548c7486c8e7b4319a397309e8ed3783d9beadf1c6c pypy2.7-v7.0.0-ppc64le.tar.bz2 - f51d8bbfc4e73a8a01820b7871a45d13c59f1399822cdf8a19388c69eb20c18c pypy2.7-v7.0.0-src.tar.bz2 - 77c8c02cf412a5f8182ffe8845877cffa506e5a5ce3a7cd835483fdc1202afd4 pypy2.7-v7.0.0-src.zip - From pypy.commits at gmail.com Wed Dec 11 05:05:08 2019 From: pypy.commits at gmail.com (arigo) Date: Wed, 11 Dec 2019 02:05:08 -0800 (PST) Subject: [pypy-commit] pypy kill-asmgcc: in-progress: remove asmgcc completely (modern versions of gcc show test Message-ID: <5df0bf54.1c69fb81.bd6cb.7413@mx.google.com> Author: Armin Rigo Branch: kill-asmgcc Changeset: r98269:b23b24b53574 Date: 2019-12-11 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/b23b24b53574/ Log: in-progress: remove asmgcc completely (modern versions of gcc show test failures) diff too long, truncating to 2000 out of 12498 lines diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -14,7 +14,7 @@ conf = get_pypy_config() conf.translation.gc = "boehm" with py.test.raises(ConfigError): - conf.translation.gcrootfinder = 'asmgcc' + conf.translation.gcrootfinder = 'shadowstack' def test_frameworkgc(): for name in ["minimark", "semispace"]: diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,16 +1,7 @@ Choose the method used to find the roots in the GC. This only -applies to our framework GCs. You have a choice of two -alternatives: +applies to our framework GCs. - ``--gcrootfinder=shadowstack``: use a so-called "shadow stack", which is an explicitly maintained custom stack of - root pointers. This is the most portable solution. - -- ``--gcrootfinder=asmgcc``: use assembler hackery to find the - roots directly from the normal stack. This is a bit faster, - but platform specific. It works so far with GCC or MSVC, - on i386 and x86-64. It is tested only on Linux - so other platforms (as well as MSVC) may need - various fixes before they can be used. Note asmgcc will be deprecated - at some future date, and does not work with clang. - + root pointers. This is the most portable solution, and also + the only one available now. diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -43,8 +43,7 @@ from rpython.rlib import rgil rgil.acquire() - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) @@ -69,7 +68,6 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - rffi.stackcounter.stacks_counter -= 1 rgil.release() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1026,8 +1026,7 @@ else: gilstate = pystate.PyGILState_IGNORE - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C retval = fatal_value boxed_args = () tb = None @@ -1104,7 +1103,6 @@ return fatal_value assert lltype.typeOf(retval) == restype - rffi.stackcounter.stacks_counter -= 1 _restore_gil_state(pygilstate_release, gilstate, gil_release, _gil_auto, tid) return retval diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -30,7 +30,7 @@ # called from the rffi-generated wrapper). The gc_thread_run() # operation will automatically notice that the current thread id was # not seen before, and (in shadowstack) it will allocate and use a -# fresh new stack. Again, this has no effect in asmgcc. +# fresh new stack. # # * Only then does bootstrap() really run. The first thing it does # is grab the start-up information (app-level callable and args) @@ -43,7 +43,7 @@ # thread. # # * Just before a thread finishes, gc_thread_die() is called to free -# its shadow stack. This has no effect in asmgcc. +# its shadow stack. class Bootstrapper(object): diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -13,13 +13,6 @@ config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') -if compiler.name == 'msvc' or sys.platform == 'darwin': - def test_no_asmgcrot_on_msvc(): - config = get_combined_translation_config() - config.translation.gcrootfinder = "asmgcc" - py.test.raises(ConfigError, set_opt_level, config, 'jit') - - def test_get_translation_config(): from rpython.translator.interactive import Translation from rpython.config import config diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -18,10 +18,6 @@ DEFL_GC = "incminimark" # XXX DEFL_ROOTFINDER_WITHJIT = "shadowstack" -## if sys.platform.startswith("linux"): -## _mach = os.popen('uname -m', 'r').read().strip() -## if _mach.startswith('x86') or _mach in ['i386', 'i486', 'i586', 'i686']: -## DEFL_ROOTFINDER_WITHJIT = "asmgcc" # only for Linux on x86 / x86-64 IS_64_BITS = sys.maxint > 2147483647 @@ -100,13 +96,11 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ["n/a", "shadowstack"], "shadowstack", cmdline="--gcrootfinder", requires={ "shadowstack": [("translation.gctransformer", "framework")], - "asmgcc": [("translation.gctransformer", "framework"), - ("translation.backend", "c")], }), # other noticeable options @@ -402,10 +396,6 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X and on Win32 - if config.translation.gcrootfinder == "asmgcc": - if sys.platform == "darwin" or sys.platform =="win32": - raise ConfigError("'asmgcc' not supported on this platform") # ---------------------------------------------------------------- diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -438,51 +438,8 @@ @staticmethod @rgc.no_collect - def _reacquire_gil_asmgcc(css, old_rpy_fastgil): - # Before doing an external call, 'rpy_fastgil' is initialized to - # be equal to css. This function is called if we find out after - # the call that it is no longer equal to css. See description - # in translator/c/src/thread_pthread.c. - - # XXX some duplicated logic here, but note that rgil.acquire() - # does more than just RPyGilAcquire() - if old_rpy_fastgil == 0: - # this case occurs if some other thread stole the GIL but - # released it again. What occurred here is that we changed - # 'rpy_fastgil' from 0 to 1, thus successfully reaquiring the - # GIL. - pass - - elif old_rpy_fastgil == 1: - # 'rpy_fastgil' was (and still is) locked by someone else. - # We need to wait for the regular mutex. - from rpython.rlib import rgil - rgil.acquire() - else: - # stole the GIL from a different thread that is also - # currently in an external call from the jit. Attach - # the 'old_rpy_fastgil' into the chained list. - from rpython.memory.gctransform import asmgcroot - oth = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, old_rpy_fastgil) - next = asmgcroot.gcrootanchor.next - oth.next = next - oth.prev = asmgcroot.gcrootanchor - asmgcroot.gcrootanchor.next = oth - next.prev = oth - - # similar to trackgcroot.py:pypy_asm_stackwalk, second part: - # detach the 'css' from the chained list - from rpython.memory.gctransform import asmgcroot - old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) - prev = old.prev - next = old.next - prev.next = next - next.prev = prev - - @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): - # Simplified version of _reacquire_gil_asmgcc(): in shadowstack mode, + # This used to be more complex for asmgcc. In shadowstack mode, # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) @@ -499,13 +456,10 @@ self._reacquire_gil_shadowstack) self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) else: - reacqgil_func = llhelper(self._REACQGIL2_FUNC, - self._reacquire_gil_asmgcc) - self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) + raise AssertionError("!is_shadow_stack") def _is_asmgcc(self): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - return bool(gcrootmap) and not gcrootmap.is_shadow_stack + return False # legacy def debug_bridge(descr_number, rawstart, codeendpos): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -21,7 +21,6 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler -from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo # ____________________________________________________________ @@ -117,7 +116,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(jitframe.JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth', + 'jf_frame_info', 'jf_gcmap', 'jf_savedata', 'jf_forward']: setattr(descrs, name, cpu.fielddescrof(jitframe.JITFRAME, name)) descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, @@ -244,15 +243,6 @@ # ____________________________________________________________ # All code below is for the hybrid or minimark GC -class GcRootMap_asmgcc(object): - is_shadow_stack = False - - def __init__(self, gcdescr): - pass - - def register_asm_addr(self, start, mark): - pass - class GcRootMap_shadowstack(object): is_shadow_stack = True diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -49,7 +49,6 @@ rgc.register_custom_trace_hook(JITFRAME, lambda_jitframe_trace) frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info - frame.jf_extra_stack_depth = 0 return frame def jitframe_resolve(frame): @@ -71,8 +70,6 @@ ('jf_force_descr', llmemory.GCREF), # a map of GC pointers ('jf_gcmap', lltype.Ptr(GCMAP)), - # how much we decrease stack pointer. Used around calls and malloc slowpath - ('jf_extra_stack_depth', lltype.Signed), # For the front-end: a GCREF for the savedata ('jf_savedata', llmemory.GCREF), # For GUARD_(NO)_EXCEPTION and GUARD_NOT_FORCED: the exception we @@ -103,7 +100,6 @@ LENGTHOFS = llmemory.arraylengthoffset(JITFRAME.jf_frame) SIGN_SIZE = llmemory.sizeof(lltype.Signed) UNSIGN_SIZE = llmemory.sizeof(lltype.Unsigned) -STACK_DEPTH_OFS = getofs('jf_extra_stack_depth') def jitframe_trace(gc, obj_addr, callback, arg): gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr')) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -584,8 +584,6 @@ length = self.emit_getfield(ConstInt(frame_info), descr=descrs.jfi_frame_depth, raw=True) - self.emit_setfield(frame, self.c_zero, - descr=descrs.jf_extra_stack_depth) self.emit_setfield(frame, self.c_null, descr=descrs.jf_savedata) self.emit_setfield(frame, self.c_null, diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -110,7 +110,7 @@ class config_(object): class translation(object): gc = self.gc - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False class FakeTranslator(object): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -507,7 +507,6 @@ ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), ('jf_descr', llmemory.GCREF), ('jf_force_descr', llmemory.GCREF), - ('jf_extra_stack_depth', lltype.Signed), ('jf_guard_exc', llmemory.GCREF), ('jf_gcmap', lltype.Ptr(jitframe.GCMAP)), ('jf_gc_trace_state', lltype.Signed), @@ -594,7 +593,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + 'jf_frame_info', 'jf_gcmap']: setattr(descrs, name, cpu.fielddescrof(JITFRAME, name)) descrs.jfi_frame_depth = cpu.fielddescrof(jitframe.JITFRAMEINFO, 'jfi_frame_depth') diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -170,7 +170,6 @@ jf_descr = framedescrs.jf_descr jf_guard_exc = framedescrs.jf_guard_exc jf_forward = framedescrs.jf_forward - jf_extra_stack_depth = framedescrs.jf_extra_stack_depth signedframedescr = self.cpu.signedframedescr floatframedescr = self.cpu.floatframedescr casmdescr.compiled_loop_token = clt @@ -386,7 +385,7 @@ class config_(object): class translation(object): gc = 'minimark' - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False gcdescr = get_description(config_) @@ -1102,7 +1101,6 @@ p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) - %(setfield('p1', 0, jf_extra_stack_depth))s %(setfield('p1', 'NULL', jf_savedata))s %(setfield('p1', 'NULL', jf_force_descr))s %(setfield('p1', 'NULL', jf_descr))s diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -176,9 +176,6 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -331,9 +331,6 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -35,9 +35,7 @@ PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, - # and it can be left here for when it is needed. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and it can be left here for when it is needed. THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 @@ -45,12 +43,10 @@ PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, - # and is moved into this frame location. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and is moved into this frame location. THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD -assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 +assert PASS_ON_MY_FRAME >= 12 # return address, followed by FRAME_FIXED_SIZE words DEFAULT_FRAME_BYTES = (1 + FRAME_FIXED_SIZE) * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -137,11 +137,6 @@ self.expand_byte_mask_addr = float_constants + 64 self.element_ones = [float_constants + 80 + 16*i for i in range(4)] - def set_extra_stack_depth(self, mc, value): - if self._is_asmgcc(): - extra_ofs = self.cpu.get_ofs_of_frame_field('jf_extra_stack_depth') - mc.MOV_bi(extra_ofs, value) - def build_frame_realloc_slowpath(self): mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) @@ -161,14 +156,20 @@ mc.MOV_sr(0, ebp.value) # align - self.set_extra_stack_depth(mc, align * WORD) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.set_extra_stack_depth(mc, align * WORD) + self._store_and_reset_exception(mc, None, ebx, ecx) mc.CALL(imm(self.cpu.realloc_frame)) mc.MOV_rr(ebp.value, eax.value) self._restore_exception(mc, None, ebx, ecx) mc.ADD_ri(esp.value, (align - 1) * WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -196,12 +197,12 @@ # the caller already did push_gcmap(store=True) if IS_X86_64: mc.SUB(esp, imm(WORD)) # alignment - self.set_extra_stack_depth(mc, 2 * WORD) + #self.set_extra_stack_depth(mc, 2 * WORD) # the arguments are already in the correct registers else: # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 8 * WORD) + #self.set_extra_stack_depth(mc, 8 * WORD) # store the arguments at the correct place in the stack for i in range(4): mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) @@ -211,7 +212,7 @@ mc.ADD(esp, imm(WORD)) else: mc.ADD(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [eax], supports_floats, callee_only) mc.RET() @@ -275,11 +276,11 @@ # (already in edx) # length mc.MOV_rr(esi.value, ecx.value) # tid mc.MOV_rs(edi.value, WORD * 3) # load the itemsize - self.set_extra_stack_depth(mc, 16) + #self.set_extra_stack_depth(mc, 16) mc.CALL(imm(follow_jump(addr))) self._reload_frame_if_necessary(mc) mc.ADD_ri(esp.value, 16 - WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) # mc.TEST_rr(eax.value, eax.value) # common case: not taken @@ -1018,8 +1019,6 @@ from rpython.rlib.rvmprof.rvmprof import cintf # edx = address of pypy_threadlocal_s self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(edx.value, ~1) # eax = (our local vmprof_tl_stack).next self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax @@ -2236,25 +2235,6 @@ def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) - if self._is_asmgcc(): - # We need to remove the bit "already seen during the - # previous minor collection" instead of passing this - # value directly. - if IS_X86_64: - tmploc = esi # already the correct place - if argloc is tmploc: - # this case is theoretical only so far: in practice, - # argloc is always eax, never esi - self.mc.MOV_rr(edi.value, esi.value) - argloc = edi - else: - tmploc = eax - if tmploc is argloc: - tmploc = edx - self.mc.MOV(tmploc, threadlocal_loc) - self.mc.AND_ri(tmploc.value, ~1) - threadlocal_loc = tmploc - # self.simple_call(addr, [argloc, threadlocal_loc]) def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): @@ -2672,8 +2652,6 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(resloc.value, ~1) self.load_from_mem(resloc, addr_add_const(resloc, offset), imm(size), imm(sign)) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -61,13 +61,6 @@ self.arglocs = arglocs + [fnloc] self.start_frame_size = self.mc._frame_size - def select_call_release_gil_mode(self): - AbstractCallBuilder.select_call_release_gil_mode(self) - if self.asm._is_asmgcc(): - from rpython.memory.gctransform import asmgcroot - self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS - assert self.stack_max >= 3 - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -103,9 +96,14 @@ # value eax, if necessary assert not self.is_call_release_gil current_esp = self.get_current_esp() - self.change_extra_stack_depth = (current_esp != 0) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, -current_esp) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.change_extra_stack_depth = (current_esp != 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, -current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) @@ -119,13 +117,14 @@ # top at this point, so reuse it instead of loading it again ssreg = ebx self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap assert self.is_call_release_gil + assert not self.asm._is_asmgcc() # # Save this thread's shadowstack pointer into 'ebx', # for later comparison @@ -135,38 +134,12 @@ rst = gcrootmap.get_root_stack_top_addr() self.mc.MOV(ebx, heap(rst)) # - if not self.asm._is_asmgcc(): - # shadowstack: change 'rpy_fastgil' to 0 (it should be - # non-zero right now). - self.change_extra_stack_depth = False - # ^^ note that set_extra_stack_depth() in this case is a no-op - css_value = imm(0) - else: - from rpython.memory.gctransform import asmgcroot - # build a 'css' structure on the stack: 2 words for the linkage, - # and 5/7 words as described for asmgcroot.ASM_FRAMEDATA, for a - # total size of JIT_USE_WORDS. This structure is found at - # [ESP+css]. - css = -self.get_current_esp() + ( - WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS)) - assert css >= 2 * WORD - # Save ebp - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_sr(index_of_ebp, ebp.value) # MOV [css.ebp], EBP - # Save the "return address": we pretend that it's css - self.mc.LEA_rs(eax.value, css) # LEA eax, [css] - frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_sr(frame_ptr, eax.value) # MOV [css.frame], eax - # Set up jf_extra_stack_depth to pretend that the return address - # was at css, and so our stack frame is supposedly shorter by - # (PASS_ON_MY_FRAME-JIT_USE_WORDS+1) words - delta = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS + 1 - self.change_extra_stack_depth = True - self.asm.set_extra_stack_depth(self.mc, -delta * WORD) - css_value = eax + # shadowstack: change 'rpy_fastgil' to 0 (it should be + # non-zero right now). + #self.change_extra_stack_depth = False # # <--here--> would come a memory fence, if the CPU needed one. - self.mc.MOV(heap(fastgil), css_value) + self.mc.MOV(heap(fastgil), imm(0)) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more @@ -184,8 +157,6 @@ self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, THREADLOCAL_OFS - self.get_current_esp()) - if self.asm._is_asmgcc(): - self.mc.AND_ri(self.tlofs_reg.value, ~1) return self.tlofs_reg def save_stack_position(self): @@ -318,13 +289,6 @@ cb = self.callbuilder if not cb.result_value_saved_early: cb.save_result_value(save_edx=False) - if assembler._is_asmgcc(): - if IS_X86_32: - css_value = edx - old_value = ecx - mc.MOV_sr(4, old_value.value) - mc.MOV_sr(0, css_value.value) - # on X86_64, they are already in the right registers mc.CALL(imm(follow_jump(assembler.reacqgil_addr))) if not cb.result_value_saved_early: cb.restore_result_value(save_edx=False) @@ -333,29 +297,10 @@ from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not - # (to acquiring the GIL, remove the asmgcc head from - # the chained list, etc.) + # (to acquiring the GIL) mc = self.mc restore_edx = False - if not self.asm._is_asmgcc(): - css = 0 - css_value = imm(0) - old_value = ecx - else: - from rpython.memory.gctransform import asmgcroot - css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) - if IS_X86_32: - assert css >= 16 - if self.restype == 'L': # long long result: eax/edx - if not self.result_value_saved_early: - mc.MOV_sr(12, edx.value) - restore_edx = True - css_value = edx # note: duplicated in ReacqGilSlowPath - old_value = ecx # - elif IS_X86_64: - css_value = edi - old_value = esi - mc.LEA_rs(css_value.value, css) + old_value = ecx # # Use XCHG as an atomic test-and-set-lock. It also implicitly # does a memory barrier. @@ -365,11 +310,12 @@ else: mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) - mc.CMP(old_value, css_value) + mc.CMP(old_value, imm(0)) # gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap - if bool(gcrootmap) and gcrootmap.is_shadow_stack: + if bool(gcrootmap): from rpython.jit.backend.x86.assembler import heap + assert gcrootmap.is_shadow_stack # # When doing a call_release_gil with shadowstack, there # is the risk that the 'rpy_fastgil' was free but the @@ -406,14 +352,8 @@ if not we_are_translated(): # for testing: now we can accesss mc.SUB(ebp, imm(1)) # ebp again # - # Now that we required the GIL, we can reload a possibly modified ebp - if self.asm._is_asmgcc(): - # special-case: reload ebp from the css - from rpython.memory.gctransform import asmgcroot - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] - #else: - # for shadowstack, done for us by _reload_frame_if_necessary() + # Now that we required the GIL, we will reload a possibly modified ebp: + # this done for us by _reload_frame_if_necessary() def save_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -829,10 +829,7 @@ self.xrm.before_call(save_all_regs=save_all_regs) if gc_level == SAVE_GCREF_REGS: gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap - # we save all the GCREF registers for shadowstack and asmgcc for now - # --- for asmgcc too: we can't say "register x is a gc ref" - # without distinguishing call sites, which we don't do any - # more for now. + # we save all the GCREF registers for shadowstack if gcrootmap: # and gcrootmap.is_shadow_stack: save_all_regs = SAVE_GCREF_REGS self.rm.before_call(save_all_regs=save_all_regs) @@ -940,15 +937,6 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): - # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' - # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. - # We must make sure that edi and esi do not contain GC pointers. - if IS_X86_32 and self.assembler._is_asmgcc(): - for box, loc in self.rm.reg_bindings.items(): - if (loc == edi or loc == esi) and box.type == REF: - self.rm.force_spill_var(box) - assert box not in self.rm.reg_bindings - # args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments v_func = args[1] diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py deleted file mode 100644 --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ /dev/null @@ -1,9 +0,0 @@ -import py -from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests -from rpython.translator.platform import platform as compiler - -if compiler.name == 'msvc': - py.test.skip('asmgcc buggy on msvc') - -class TestAsmGcc(CompileFrameworkTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py --- a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py +++ b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py @@ -1,11 +1,5 @@ from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests -from rpython.translator.platform import platform as compiler class TestShadowStack(ReleaseGILTests): gcrootfinder = "shadowstack" - - -if compiler.name != 'msvc': - class TestAsmGcc(ReleaseGILTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py --- a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py @@ -1,19 +1,12 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC -from rpython.translator.platform import platform as compiler -if compiler.name == 'msvc': - _MSVC = True -else: - _MSVC = False class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest): def _get_TranslationContext(self): t = TranslationContext() t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' - if not _MSVC: - t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True return t diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1156,8 +1156,7 @@ 'CALL_ASSEMBLER/*d/rfin', # call already compiled assembler 'CALL_MAY_FORCE/*d/rfin', 'CALL_LOOPINVARIANT/*d/rfin', - 'CALL_RELEASE_GIL/*d/fin', - # release the GIL and "close the stack" for asmgcc + 'CALL_RELEASE_GIL/*d/fin', # release the GIL around the call 'CALL_PURE/*d/rfin', # removed before it's passed to the backend 'CHECK_MEMORY_ERROR/1/n', # after a CALL: NULL => propagate MemoryError 'CALL_MALLOC_NURSERY/1/r', # nursery malloc, const number of bytes, zeroed diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py deleted file mode 100644 --- a/rpython/memory/gctransform/asmgcroot.py +++ /dev/null @@ -1,870 +0,0 @@ -from rpython.flowspace.model import (Constant, Variable, Block, Link, - copygraph, SpaceOperation, checkgraph) -from rpython.rlib.debug import ll_assert -from rpython.rlib.nonconst import NonConstant -from rpython.rlib import rgil -from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import ( - BaseFrameworkGCTransformer, BaseRootWalker) -from rpython.rtyper.llannotation import SomeAddress -from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import varoftype -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys - - -# -# This transformer avoids the use of a shadow stack in a completely -# platform-specific way, by directing genc to insert asm() special -# instructions in the C source, which are recognized by GCC. -# The .s file produced by GCC is then parsed by trackgcroot.py. -# - -IS_64_BITS = sys.maxint > 2147483647 - -class AsmGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer): - _asmgcc_save_restore_arguments = None - - def push_roots(self, hop, keep_current_args=False): - livevars = self.get_livevars_for_roots(hop, keep_current_args) - self.num_pushs += len(livevars) - return livevars - - def pop_roots(self, hop, livevars): - if not livevars: - return - # mark the values as gc roots - for var in livevars: - v_adr = gen_cast(hop.llops, llmemory.Address, var) - v_newaddr = hop.genop("direct_call", [c_asm_gcroot, v_adr], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) - - def build_root_walker(self): - return AsmStackRootWalker(self) - - def mark_call_cannotcollect(self, hop, name): - hop.genop("direct_call", [c_asm_nocollect, name]) - - def gct_direct_call(self, hop): - # just a sanity check: if we find a fnptr with the hint on the - # _callable, then we'd also find the hint by looking only at the - # graph. We'll actually change this graph only later, in - # start_transforming_graph(). - fnptr = hop.spaceop.args[0].value - try: - close_stack = fnptr._obj._callable._gctransformer_hint_close_stack_ - except AttributeError: - pass - else: - assert fnptr._obj.graph.func is fnptr._obj._callable - BaseFrameworkGCTransformer.gct_direct_call(self, hop) - - def start_transforming_graph(self, graph): - try: - close_stack = graph.func._gctransformer_hint_close_stack_ - except AttributeError: - close_stack = False - if close_stack: - self._transform_hint_close_stack(graph) - - def _transform_hint_close_stack(self, graph): - # We cannot easily pass variable amount of arguments of the call - # across the call to the pypy_asm_stackwalk helper. So we store - # them away and restore them. More precisely, we need to - # replace 'graph' with code that saves the arguments, and make - # a new graph that starts with restoring the arguments. - if self._asmgcc_save_restore_arguments is None: - self._asmgcc_save_restore_arguments = {} - sradict = self._asmgcc_save_restore_arguments - sra = [] # list of pointers to raw-malloced containers for args - seen = {} - ARGS = [v.concretetype for v in graph.getargs()] - for TYPE in ARGS: - if isinstance(TYPE, lltype.Ptr): - TYPE = llmemory.Address - num = seen.get(TYPE, 0) - seen[TYPE] = num + 1 - key = (TYPE, num) - if key not in sradict: - CONTAINER = lltype.FixedSizeArray(TYPE, 1) - p = lltype.malloc(CONTAINER, flavor='raw', zero=True, - immortal=True) - sradict[key] = Constant(p, lltype.Ptr(CONTAINER)) - sra.append(sradict[key]) - # - # make a copy of the graph that will reload the values - graph2 = copygraph(graph) - del graph2.func # otherwise, start_transforming_graph() will - # again transform graph2, and we get an - # infinite loop - # - # edit the original graph to only store the value of the arguments - block = Block(graph.startblock.inputargs) - c_item0 = Constant('item0', lltype.Void) - assert len(block.inputargs) == len(sra) - for v_arg, c_p in zip(block.inputargs, sra): - if isinstance(v_arg.concretetype, lltype.Ptr): - v_adr = varoftype(llmemory.Address) - block.operations.append( - SpaceOperation("cast_ptr_to_adr", [v_arg], v_adr)) - v_arg = v_adr - v_void = varoftype(lltype.Void) - block.operations.append( - SpaceOperation("bare_setfield", [c_p, c_item0, v_arg], v_void)) - # - # call asm_stackwalk(graph2) - RESULT = graph.getreturnvar().concretetype - FUNC2 = lltype.FuncType([], RESULT) - fnptr2 = lltype.functionptr(FUNC2, - graph.name + '_reload', - graph=graph2) - c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2)) - HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2), - ASM_FRAMEDATA_HEAD_PTR], RESULT) - v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk") - block.operations.append( - SpaceOperation("cast_pointer", [c_asm_stackwalk], v_asm_stackwalk)) - v_result = varoftype(RESULT) - block.operations.append( - SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2, - c_gcrootanchor, - Constant(None, lltype.Void)], - v_result)) - block.closeblock(Link([v_result], graph.returnblock)) - graph.startblock = block - # - # edit the copy of the graph to reload the values - block2 = graph2.startblock - block1 = Block([]) - reloadedvars = [] - for v, c_p in zip(block2.inputargs, sra): - v = v.copy() - if isinstance(v.concretetype, lltype.Ptr): - w = varoftype(llmemory.Address) - else: - w = v - block1.operations.append(SpaceOperation('getfield', - [c_p, c_item0], w)) - if w is not v: - block1.operations.append(SpaceOperation('cast_adr_to_ptr', - [w], v)) - reloadedvars.append(v) - block1.closeblock(Link(reloadedvars, block2)) - graph2.startblock = block1 - # - checkgraph(graph) - checkgraph(graph2) - - -class AsmStackRootWalker(BaseRootWalker): - - def __init__(self, gctransformer): - BaseRootWalker.__init__(self, gctransformer) - - def _asm_callback(): - self.walk_stack_from() - self._asm_callback = _asm_callback - self._shape_decompressor = ShapeDecompressor() - self._with_jit = hasattr(gctransformer.translator, '_jit2gc') - if self._with_jit: - jit2gc = gctransformer.translator._jit2gc - self.frame_tid = jit2gc['frame_tid'] - self.gctransformer = gctransformer - # - # unless overridden in need_thread_support(): - self.belongs_to_current_thread = lambda framedata: True - - def need_stacklet_support(self, gctransformer, getfn): - from rpython.annotator import model as annmodel - from rpython.rlib import _stacklet_asmgcc - # stacklet support: BIG HACK for rlib.rstacklet - _stacklet_asmgcc._asmstackrootwalker = self # as a global! argh - _stacklet_asmgcc.complete_destrptr(gctransformer) - # - def gc_detach_callback_pieces(): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - result = llmemory.NULL - framedata = anchor.address[1] - while framedata != anchor: - next = framedata.address[1] - if self.belongs_to_current_thread(framedata): - # detach it - prev = framedata.address[0] - prev.address[1] = next - next.address[0] = prev - # update the global stack counter - rffi.stackcounter.stacks_counter -= 1 - # reattach framedata into the singly-linked list 'result' - framedata.address[0] = rffi.cast(llmemory.Address, -1) - framedata.address[1] = result - result = framedata - framedata = next - return result - # - def gc_reattach_callback_pieces(pieces): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while pieces != llmemory.NULL: - framedata = pieces - pieces = pieces.address[1] - # attach 'framedata' into the normal doubly-linked list - following = anchor.address[1] - following.address[0] = framedata - framedata.address[1] = following - anchor.address[1] = framedata - framedata.address[0] = anchor - # update the global stack counter - rffi.stackcounter.stacks_counter += 1 - # - s_addr = SomeAddress() - s_None = annmodel.s_None - self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, - [], s_addr) - self.gc_reattach_callback_pieces_ptr=getfn(gc_reattach_callback_pieces, - [s_addr], s_None) - - def need_thread_support(self, gctransformer, getfn): - # Threads supported "out of the box" by the rest of the code. - # The whole code in this function is only there to support - # fork()ing in a multithreaded process :-( - # For this, we need to handle gc_thread_start and gc_thread_die - # to record the mapping {thread_id: stack_start}, and - # gc_thread_before_fork and gc_thread_after_fork to get rid of - # all ASM_FRAMEDATA structures that do no belong to the current - # thread after a fork(). - from rpython.rlib import rthread - from rpython.memory.support import AddressDict - from rpython.memory.support import copy_without_null_values - from rpython.annotator import model as annmodel - gcdata = self.gcdata - - def get_aid(): - """Return the thread identifier, cast to an (opaque) address.""" - return llmemory.cast_int_to_adr(rthread.get_ident()) - - def thread_start(): - value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) - gcdata.aid2stack.setitem(get_aid(), value) - thread_start._always_inline_ = True - - def thread_setup(): - gcdata.aid2stack = AddressDict() - gcdata.dead_threads_count = 0 - # to also register the main thread's stack - thread_start() - thread_setup._always_inline_ = True - - def thread_die(): - gcdata.aid2stack.setitem(get_aid(), llmemory.NULL) - # from time to time, rehash the dictionary to remove - # old NULL entries - gcdata.dead_threads_count += 1 - if (gcdata.dead_threads_count & 511) == 0: - copy = copy_without_null_values(gcdata.aid2stack) - gcdata.aid2stack.delete() - gcdata.aid2stack = copy - - def belongs_to_current_thread(framedata): - # xxx obscure: the answer is Yes if, as a pointer, framedata - # lies between the start of the current stack and the top of it. - stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) - ll_assert(stack_start != llmemory.NULL, - "current thread not found in gcdata.aid2stack!") - stack_stop = llmemory.cast_int_to_adr( - llop.stack_current(lltype.Signed)) - return (stack_start <= framedata <= stack_stop or - stack_start >= framedata >= stack_stop) - self.belongs_to_current_thread = belongs_to_current_thread - - def thread_before_fork(): - # before fork(): collect all ASM_FRAMEDATA structures that do - # not belong to the current thread, and move them out of the - # way, i.e. out of the main circular doubly linked list. - detached_pieces = llmemory.NULL - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - while initialframedata != anchor: # while we have not looped back - if not belongs_to_current_thread(initialframedata): - # Unlink it - prev = initialframedata.address[0] - next = initialframedata.address[1] - prev.address[1] = next - next.address[0] = prev - # Link it to the singly linked list 'detached_pieces' - initialframedata.address[0] = detached_pieces - detached_pieces = initialframedata - rffi.stackcounter.stacks_counter -= 1 - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - return detached_pieces - - def thread_after_fork(result_of_fork, detached_pieces): - if result_of_fork == 0: - # We are in the child process. Assumes that only the - # current thread survived. All the detached_pieces - # are pointers in other stacks, so have likely been - # freed already by the multithreaded library. - # Nothing more for us to do. - pass - else: - # We are still in the parent process. The fork() may - # have succeeded or not, but that's irrelevant here. - # We need to reattach the detached_pieces now, to the - # circular doubly linked list at 'gcrootanchor'. The - # order is not important. - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while detached_pieces != llmemory.NULL: - reattach = detached_pieces - detached_pieces = detached_pieces.address[0] - a_next = anchor.address[1] - reattach.address[0] = anchor - reattach.address[1] = a_next - anchor.address[1] = reattach - a_next.address[0] = reattach - rffi.stackcounter.stacks_counter += 1 - - self.thread_setup = thread_setup - self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None, - inline=True) - self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) - self.thread_before_fork_ptr = getfn(thread_before_fork, [], - SomeAddress()) - self.thread_after_fork_ptr = getfn(thread_after_fork, - [annmodel.SomeInteger(), - SomeAddress()], - annmodel.s_None) - # - # check that the order of the need_*() is correct for us: if we - # need both threads and stacklets, need_thread_support() must be - # called first, to initialize self.belongs_to_current_thread. - assert not hasattr(self, 'gc_detach_callback_pieces_ptr') - - def postprocess_graph(self, gct, graph, any_inlining): - pass - - def walk_stack_roots(self, collect_stack_root, is_minor=False): - gcdata = self.gcdata - gcdata._gc_collect_stack_root = collect_stack_root - gcdata._gc_collect_is_minor = is_minor - pypy_asm_stackwalk(llhelper(ASM_CALLBACK_PTR, self._asm_callback), - gcrootanchor) - - def walk_stack_from(self): - curframe = lltype.malloc(WALKFRAME, flavor='raw') - otherframe = lltype.malloc(WALKFRAME, flavor='raw') - - # Walk over all the pieces of stack. They are in a circular linked - # list of structures of 7 words, the 2 first words being prev/next. - # The anchor of this linked list is: - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - stackscount = 0 - while initialframedata != anchor: # while we have not looped back - self.walk_frames(curframe, otherframe, initialframedata) - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - stackscount += 1 - # - # for the JIT: rpy_fastgil may contain an extra framedata - rpy_fastgil = rgil.gil_fetch_fastgil().signed[0] - if rpy_fastgil != 1: - ll_assert(rpy_fastgil != 0, "walk_stack_from doesn't have the GIL") - initialframedata = rffi.cast(llmemory.Address, rpy_fastgil) - # - # very rare issue: initialframedata.address[0] is uninitialized - # in this case, but "retaddr = callee.frame_address.address[0]" - # reads it. If it happens to be exactly a valid return address - # inside the C code, then bad things occur. - initialframedata.address[0] = llmemory.NULL - # - self.walk_frames(curframe, otherframe, initialframedata) - stackscount += 1 - # - expected = rffi.stackcounter.stacks_counter - if NonConstant(0): - rffi.stackcounter.stacks_counter += 42 # hack to force it - ll_assert(not (stackscount < expected), "non-closed stacks around") - ll_assert(not (stackscount > expected), "stacks counter corruption?") - lltype.free(otherframe, flavor='raw') - lltype.free(curframe, flavor='raw') - - def walk_frames(self, curframe, otherframe, initialframedata): - self.fill_initial_frame(curframe, initialframedata) - # Loop over all the frames in the stack - while self.walk_to_parent_frame(curframe, otherframe): - swap = curframe - curframe = otherframe # caller becomes callee - otherframe = swap - - def fill_initial_frame(self, curframe, initialframedata): - # Read the information provided by initialframedata - initialframedata += 2*sizeofaddr #skip the prev/next words at the start - reg = 0 - while reg < CALLEE_SAVED_REGS: - # NB. 'initialframedata' stores the actual values of the - # registers %ebx etc., and if these values are modified - # they are reloaded by pypy_asm_stackwalk(). By contrast, - # 'regs_stored_at' merely points to the actual values - # from the 'initialframedata'. - curframe.regs_stored_at[reg] = initialframedata + reg*sizeofaddr - reg += 1 - curframe.frame_address = initialframedata.address[CALLEE_SAVED_REGS] - - def walk_to_parent_frame(self, callee, caller): - """Starting from 'callee', walk the next older frame on the stack - and fill 'caller' accordingly. Also invokes the collect_stack_root() - callback from the GC code for each GC root found in 'caller'. - """ - # - # The gcmap table is a list of entries, two machine words each: - # void *SafePointAddress; - # int Shape; - # - # A "safe point" is the return address of a call. - # The "shape" of a safe point is a list of integers - # that represent "locations". A "location" can be - # either in the stack or in a register. See - # getlocation() for the decoding of this integer. - # The locations stored in a "shape" are as follows: - # - # * The "location" of the return address. This is just - # after the end of the frame of 'callee'; it is the - # first word of the frame of 'caller' (see picture - # below). - # - # * Four "locations" that specify where the function saves - # each of the four callee-saved registers (%ebx, %esi, - # %edi, %ebp). - # - # * The number of live GC roots around the call. - # - # * For each GC root, an integer that specify where the - # GC pointer is stored. This is a "location" too. - # - # XXX the details are completely specific to X86!!! - # a picture of the stack may help: - # ^ ^ ^ - # | ... | to older frames - # +--------------+ - # | ret addr | <------ caller_frame (addr of retaddr) - # | ... | - # | caller frame | - # | ... | - # +--------------+ - # | ret addr | <------ callee_frame (addr of retaddr) - # | ... | - # | callee frame | - # | ... | lower addresses - # +--------------+ v v v - # - - retaddr = callee.frame_address.address[0] - # - # try to locate the caller function based on retaddr. - # set up self._shape_decompressor. - # - ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] - self.locate_caller_based_on_retaddr(retaddr, ebp_in_caller) - # - # found! Enumerate the GC roots in the caller frame - # - collect_stack_root = self.gcdata._gc_collect_stack_root - gc = self.gc - while True: - location = self._shape_decompressor.next() - if location == 0: - break - addr = self.getlocation(callee, ebp_in_caller, location) - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - # - # small hack: the JIT reserves THREADLOCAL_OFS's last bit for - # us. We use it to store an "already traced past this frame" - # flag. - if self._with_jit and self.gcdata._gc_collect_is_minor: - if self.mark_jit_frame_can_stop(callee): - return False - # - # track where the caller_frame saved the registers from its own - # caller - # - reg = CALLEE_SAVED_REGS - 1 - while reg >= 0: - location = self._shape_decompressor.next() - addr = self.getlocation(callee, ebp_in_caller, location) - caller.regs_stored_at[reg] = addr - reg -= 1 - - location = self._shape_decompressor.next() - caller.frame_address = self.getlocation(callee, ebp_in_caller, - location) - # we get a NULL marker to mean "I'm the frame - # of the entry point, stop walking" - return caller.frame_address != llmemory.NULL - - def locate_caller_based_on_retaddr(self, retaddr, ebp_in_caller): - gcmapstart = llop.gc_asmgcroot_static(llmemory.Address, 0) - gcmapend = llop.gc_asmgcroot_static(llmemory.Address, 1) - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if not self._shape_decompressor.sorted: - # the item may have been not found because the main array was - # not sorted. Sort it and try again. - win32_follow_gcmap_jmp(gcmapstart, gcmapend) - sort_gcmap(gcmapstart, gcmapend) - self._shape_decompressor.sorted = True - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if self._with_jit: - # item not found. We assume that it's a JIT-generated - # location -- but we check for consistency that ebp points - # to a JITFRAME object. - from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - - tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) - if (rffi.cast(lltype.Signed, tid) == - rffi.cast(lltype.Signed, self.frame_tid)): - # fish the depth - extra_stack_depth = (ebp_in_caller + STACK_DEPTH_OFS).signed[0] - ll_assert((extra_stack_depth & (rffi.sizeof(lltype.Signed) - 1)) - == 0, "asmgcc: misaligned extra_stack_depth") - extra_stack_depth //= rffi.sizeof(lltype.Signed) - self._shape_decompressor.setjitframe(extra_stack_depth) - return - llop.debug_fatalerror(lltype.Void, "cannot find gc roots!") - - def getlocation(self, callee, ebp_in_caller, location): - """Get the location in the 'caller' frame of a variable, based - on the integer 'location' that describes it. All locations are - computed based on information saved by the 'callee'. - """ - ll_assert(location >= 0, "negative location") - kind = location & LOC_MASK - offset = location & ~ LOC_MASK - if IS_64_BITS: - offset <<= 1 - if kind == LOC_REG: # register - if location == LOC_NOWHERE: - return llmemory.NULL - reg = (location >> 2) - 1 - ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") - return callee.regs_stored_at[reg] - elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + sizeofaddr - return esp_in_caller + offset - elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) - return ebp_in_caller + offset - else: # kind == LOC_EBP_MINUS: at -N(%ebp) - return ebp_in_caller - offset - - def mark_jit_frame_can_stop(self, callee): - location = self._shape_decompressor.get_threadlocal_loc() - if location == LOC_NOWHERE: - return False - addr = self.getlocation(callee, llmemory.NULL, location) - # - x = addr.signed[0] - if x & 1: - return True # this JIT stack frame is already marked! - else: - addr.signed[0] = x | 1 # otherwise, mark it but don't stop - return False - - -LOC_REG = 0 -LOC_ESP_PLUS = 1 -LOC_EBP_PLUS = 2 -LOC_EBP_MINUS = 3 -LOC_MASK = 0x03 -LOC_NOWHERE = LOC_REG | 0 - -# ____________________________________________________________ - -sizeofaddr = llmemory.sizeof(llmemory.Address) -arrayitemsize = 2 * sizeofaddr - - -def binary_search(start, end, addr1): - """Search for an element in a sorted array. - - The interval from the start address (included) to the end address - (excluded) is assumed to be a sorted arrays of pairs (addr1, addr2). - This searches for the item with a given addr1 and returns its - address. If not found exactly, it tries to return the address - of the item left of addr1 (i.e. such that result.address[0] < addr1). - """ - count = (end - start) // arrayitemsize - while count > 1: - middleindex = count // 2 - middle = start + middleindex * arrayitemsize - if addr1 < middle.address[0]: - count = middleindex - else: - start = middle - count -= middleindex - return start - -def search_in_gcmap(gcmapstart, gcmapend, retaddr): - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item # found - # 'retaddr' not exactly found. Check that 'item' is the start of a - # compressed range that includes 'retaddr'. - if retaddr > item.address[0] and item.signed[1] < 0: - return item # ok - else: - return llmemory.NULL # failed - -def search_in_gcmap2(gcmapstart, gcmapend, retaddr): - # same as 'search_in_gcmap', but without range checking support - # (item.signed[1] is an address in this case, not a signed at all!) - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item.address[1] # found - else: - return llmemory.NULL # failed - -def sort_gcmap(gcmapstart, gcmapend): - count = (gcmapend - gcmapstart) // arrayitemsize - qsort(gcmapstart, - rffi.cast(rffi.SIZE_T, count), - rffi.cast(rffi.SIZE_T, arrayitemsize), - c_compare_gcmap_entries) - -def replace_dead_entries_with_nulls(start, end): - # replace the dead entries (null value) with a null key. - count = (end - start) // arrayitemsize - 1 - while count >= 0: - item = start + count * arrayitemsize - if item.address[1] == llmemory.NULL: - item.address[0] = llmemory.NULL - count -= 1 - -if sys.platform == 'win32': - def win32_follow_gcmap_jmp(start, end): - # The initial gcmap table contains addresses to a JMP - # instruction that jumps indirectly to the real code. - # Replace them with the target addresses. - assert rffi.SIGNEDP is rffi.LONGP, "win64 support missing" - while start < end: - code = rffi.cast(rffi.CCHARP, start.address[0])[0] - if code == '\xe9': # jmp - rel32 = rffi.cast(rffi.SIGNEDP, start.address[0]+1)[0] - target = start.address[0] + (rel32 + 5) - start.address[0] = target - start += arrayitemsize -else: - def win32_follow_gcmap_jmp(start, end): - pass - -# ____________________________________________________________ - -class ShapeDecompressor: - _alloc_flavor_ = "raw" - - sorted = False - - def setpos(self, pos): - if pos < 0: - pos = ~ pos # can ignore this "range" marker here - gccallshapes = llop.gc_asmgcroot_static(llmemory.Address, 2) - self.addr = gccallshapes + pos - self.jit_index = -1 - - def setjitframe(self, extra_stack_depth): - self.jit_index = 0 - self.extra_stack_depth = extra_stack_depth - - def next(self): - index = self.jit_index - if index < 0: - # case "outside the jit" - addr = self.addr - value = 0 - while True: - b = ord(addr.char[0]) - addr += 1 - value += b - if b < 0x80: - break - value = (value - 0x80) << 7 - self.addr = addr - return value - else: - # case "in the jit" - from rpython.jit.backend.x86.arch import FRAME_FIXED_SIZE - from rpython.jit.backend.x86.arch import PASS_ON_MY_FRAME - self.jit_index = index + 1 - if index == 0: - # the jitframe is an object in EBP - return LOC_REG | ((INDEX_OF_EBP + 1) << 2) - if index == 1: - return 0 - # the remaining returned values should be: - # saved %rbp - # saved %r15 or on 32bit: - # saved %r14 saved %ebp - # saved %r13 saved %edi - # saved %r12 saved %esi - # saved %rbx saved %ebx - # return addr return addr - stack_depth = PASS_ON_MY_FRAME + self.extra_stack_depth - if IS_64_BITS: - if index == 2: # rbp - return LOC_ESP_PLUS | (stack_depth << 2) - if index == 3: # r15 - return LOC_ESP_PLUS | ((stack_depth + 5) << 2) - if index == 4: # r14 - return LOC_ESP_PLUS | ((stack_depth + 4) << 2) - if index == 5: # r13 - return LOC_ESP_PLUS | ((stack_depth + 3) << 2) - if index == 6: # r12 - return LOC_ESP_PLUS | ((stack_depth + 2) << 2) - if index == 7: # rbx - return LOC_ESP_PLUS | ((stack_depth + 1) << 2) - if index == 8: # return addr - return (LOC_ESP_PLUS | - ((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2)) - else: - if index == 2: # ebp - return LOC_ESP_PLUS | (stack_depth << 2) - if index == 3: # edi - return LOC_ESP_PLUS | ((stack_depth + 3) << 2) - if index == 4: # esi - return LOC_ESP_PLUS | ((stack_depth + 2) << 2) - if index == 5: # ebx - return LOC_ESP_PLUS | ((stack_depth + 1) << 2) - if index == 6: # return addr - return (LOC_ESP_PLUS | - ((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2)) - llop.debug_fatalerror(lltype.Void, "asmgcroot: invalid index") - return 0 # annotator fix - - def get_threadlocal_loc(self): - index = self.jit_index - if index < 0: - return LOC_NOWHERE # case "outside the jit" - else: - # case "in the jit" - from rpython.jit.backend.x86.arch import THREADLOCAL_OFS, WORD - return (LOC_ESP_PLUS | - ((THREADLOCAL_OFS // WORD + self.extra_stack_depth) << 2)) - - -# ____________________________________________________________ - -# -# The special pypy_asm_stackwalk(), implemented directly in -# assembler, fills information about the current stack top in an -# ASM_FRAMEDATA array and invokes an RPython callback with it. -# An ASM_FRAMEDATA is an array of 5 values that describe everything -# we need to know about a stack frame: -# -# - the value that %ebx had when the current function started -# - the value that %esi had when the current function started -# - the value that %edi had when the current function started -# - the value that %ebp had when the current function started -# - frame address (actually the addr of the retaddr of the current function; -# that's the last word of the frame in memory) -# -# On 64 bits, it is an array of 7 values instead of 5: -# -# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address -# - -if IS_64_BITS: - CALLEE_SAVED_REGS = 6 - INDEX_OF_EBP = 5 - FRAME_PTR = CALLEE_SAVED_REGS -else: - CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers - INDEX_OF_EBP = 3 - FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array - -JIT_USE_WORDS = 2 + FRAME_PTR + 1 - -ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) - -# used internally by walk_stack_from() -WALKFRAME = lltype.Struct('WALKFRAME', - ('regs_stored_at', # address of where the registers have been saved - lltype.FixedSizeArray(llmemory.Address, CALLEE_SAVED_REGS)), - ('frame_address', - llmemory.Address), - ) - -# We have a circular doubly-linked list of all the ASM_FRAMEDATAs currently -# alive. The list's starting point is given by 'gcrootanchor', which is not -# a full ASM_FRAMEDATA but only contains the prev/next pointers: -ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference()) -ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD', - ('prev', ASM_FRAMEDATA_HEAD_PTR), - ('next', ASM_FRAMEDATA_HEAD_PTR) - )) -gcrootanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, immortal=True) -gcrootanchor.prev = gcrootanchor -gcrootanchor.next = gcrootanchor -c_gcrootanchor = Constant(gcrootanchor, ASM_FRAMEDATA_HEAD_PTR) - -eci = ExternalCompilationInfo(compile_extra=['-DPYPY_USE_ASMGCC'], - post_include_bits=[""" -static int pypy_compare_gcmap_entries(const void *addr1, const void *addr2) -{ - char *key1 = * (char * const *) addr1; - char *key2 = * (char * const *) addr2; - if (key1 < key2) - return -1; - else if (key1 == key2) - return 0; - else - return 1; -} -"""]) - -pypy_asm_stackwalk = rffi.llexternal('pypy_asm_stackwalk', - [ASM_CALLBACK_PTR, - ASM_FRAMEDATA_HEAD_PTR], - lltype.Signed, - sandboxsafe=True, - _nowrapper=True, - random_effects_on_gcobjs=True, - compilation_info=eci) -c_asm_stackwalk = Constant(pypy_asm_stackwalk, - lltype.typeOf(pypy_asm_stackwalk)) - -pypy_asm_gcroot = rffi.llexternal('pypy_asm_gcroot', - [llmemory.Address], - llmemory.Address, - sandboxsafe=True, - _nowrapper=True) -c_asm_gcroot = Constant(pypy_asm_gcroot, lltype.typeOf(pypy_asm_gcroot)) - -pypy_asm_nocollect = rffi.llexternal('pypy_asm_gc_nocollect', - [rffi.CCHARP], lltype.Void, - sandboxsafe=True, - _nowrapper=True) -c_asm_nocollect = Constant(pypy_asm_nocollect, lltype.typeOf(pypy_asm_nocollect)) - -QSORT_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address, - llmemory.Address], rffi.INT)) -c_compare_gcmap_entries = rffi.llexternal('pypy_compare_gcmap_entries', - [llmemory.Address, llmemory.Address], - rffi.INT, compilation_info=eci, - _nowrapper=True, sandboxsafe=True) -qsort = rffi.llexternal('qsort', - [llmemory.Address, - rffi.SIZE_T, - rffi.SIZE_T, - QSORT_CALLBACK_PTR], - lltype.Void, - sandboxsafe=True, - random_effects_on_gcobjs=False, # but has a callback - _nowrapper=True) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1003,21 +1003,6 @@ # for stacklet hop.genop("direct_call", [self.root_walker.gc_modified_shadowstack_ptr]) - def gct_gc_detach_callback_pieces(self, hop): - op = hop.spaceop - assert len(op.args) == 0 - hop.genop("direct_call", - [self.root_walker.gc_detach_callback_pieces_ptr], - resultvar=op.result) - - def gct_gc_reattach_callback_pieces(self, hop): - op = hop.spaceop - assert len(op.args) == 1 - hop.genop("direct_call", - [self.root_walker.gc_reattach_callback_pieces_ptr, - op.args[0]], - resultvar=op.result) - def gct_do_malloc_fixedsize(self, hop): # used by the JIT (see rpython.jit.backend.llsupport.gc) op = hop.spaceop @@ -1244,8 +1229,10 @@ def gct_gc_thread_start(self, hop): assert self.translator.config.translation.thread + # There is no 'thread_start_ptr' any more for now, so the following + # line is always false. if hasattr(self.root_walker, 'thread_start_ptr'): - # only with asmgcc. Note that this is actually called after + # Note that this is actually called after # the first gc_thread_run() in the new thread. hop.genop("direct_call", [self.root_walker.thread_start_ptr]) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -217,9 +217,6 @@ self.var_last_needed_in = None self.curr_block = None - def start_transforming_graph(self, graph): - pass # for asmgcc.py - def transform_graph(self, graph): if graph in self.minimal_transform: if self.minimalgctransformer: @@ -229,7 +226,6 @@ if graph in self.seen_graphs: return self.seen_graphs.add(graph) - self.start_transforming_graph(graph) self.links_to_split = {} # link -> vars to pop_alive across the link diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py deleted file mode 100644 --- a/rpython/rlib/_stacklet_asmgcc.py +++ /dev/null @@ -1,325 +0,0 @@ -from rpython.rlib.debug import ll_assert -from rpython.rlib import rgc -from rpython.rlib.objectmodel import specialize -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.rlib import _rffi_stacklet as _c - - -_asmstackrootwalker = None # BIG HACK: monkey-patched by asmgcroot.py -_stackletrootwalker = None - -def get_stackletrootwalker(): - # XXX this is too complicated now; we don't need a StackletRootWalker - # instance to store global state. We could rewrite it all in one big - # function. We don't care enough for now. - - # lazily called, to make the following imports lazy - global _stackletrootwalker - if _stackletrootwalker is not None: - return _stackletrootwalker - - from rpython.memory.gctransform.asmgcroot import ( - WALKFRAME, CALLEE_SAVED_REGS, INDEX_OF_EBP, sizeofaddr) - - assert _asmstackrootwalker is not None, "should have been monkey-patched" - basewalker = _asmstackrootwalker - - class StackletRootWalker(object): - _alloc_flavor_ = "raw" - - def setup(self, obj): - # initialization: read the SUSPSTACK object - p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK)) - if not p.handle: - return False - self.context = llmemory.cast_ptr_to_adr(p.handle) - self.next_callback_piece = p.callback_pieces - anchor = p.anchor - del p - self.curframe = lltype.malloc(WALKFRAME, flavor='raw') - self.otherframe = lltype.malloc(WALKFRAME, flavor='raw') - self.fill_initial_frame(self.curframe, anchor) - return True - - def fill_initial_frame(self, curframe, initialframedata): - # Copy&paste :-( - initialframedata += 2*sizeofaddr - reg = 0 - while reg < CALLEE_SAVED_REGS: - curframe.regs_stored_at[reg] = initialframedata+reg*sizeofaddr - reg += 1 - retaddraddr = initialframedata + CALLEE_SAVED_REGS * sizeofaddr - retaddraddr = self.translateptr(retaddraddr) - curframe.frame_address = retaddraddr.address[0] - - def fetch_next_stack_piece(self): - if self.next_callback_piece == llmemory.NULL: - lltype.free(self.curframe, flavor='raw') - lltype.free(self.otherframe, flavor='raw') - self.context = llmemory.NULL - return False - else: - anchor = self.next_callback_piece - nextaddr = anchor + sizeofaddr - nextaddr = self.translateptr(nextaddr) - self.next_callback_piece = nextaddr.address[0] - self.fill_initial_frame(self.curframe, anchor) - return True - - @specialize.arg(3) - def customtrace(self, gc, obj, callback, arg): - # - # Pointers to the stack can be "translated" or not: - # - # * Non-translated pointers point to where the data would be - # if the stack was installed and running. - # - # * Translated pointers correspond to where the data - # is now really in memory. - # - # Note that 'curframe' contains non-translated pointers, and - # of course the stack itself is full of non-translated pointers. - # - if not self.setup(obj): - return - - while True: - callee = self.curframe - retaddraddr = self.translateptr(callee.frame_address) - retaddr = retaddraddr.address[0] - ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP] - ebp_in_caller = self.translateptr(ebp_in_caller) - ebp_in_caller = ebp_in_caller.address[0] - basewalker.locate_caller_based_on_retaddr(retaddr, - ebp_in_caller) - - # see asmgcroot for similarity: - while True: - location = basewalker._shape_decompressor.next() - if location == 0: - break - addr = basewalker.getlocation(callee, ebp_in_caller, - location) - # yield the translated addr of the next GCREF in the stack - addr = self.translateptr(addr) - gc._trace_callback(callback, arg, addr) - - caller = self.otherframe - reg = CALLEE_SAVED_REGS - 1 - while reg >= 0: - location = basewalker._shape_decompressor.next() - addr = basewalker.getlocation(callee, ebp_in_caller, - location) - caller.regs_stored_at[reg] = addr # non-translated - reg -= 1 - - location = basewalker._shape_decompressor.next() - caller.frame_address = basewalker.getlocation(callee, - ebp_in_caller, - location) - # ^^^ non-translated - if caller.frame_address == llmemory.NULL: - # completely done with this piece of stack - if not self.fetch_next_stack_piece(): - return - continue - # - self.otherframe = callee - self.curframe = caller - # loop back - - def translateptr(self, addr): - return _c._translate_pointer(self.context, addr) - - _stackletrootwalker = StackletRootWalker() - return _stackletrootwalker -get_stackletrootwalker._annspecialcase_ = 'specialize:memo' - -def complete_destrptr(gctransformer): - translator = gctransformer.translator - mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) - args_s = [lltype_to_annotation(lltype.Ptr(SUSPSTACK))] - s_result = annmodel.s_None - destrptr = mixlevelannotator.delayedfunction(suspstack_destructor, - args_s, s_result) - mixlevelannotator.finish() - lltype.attachRuntimeTypeInfo(SUSPSTACK, destrptr=destrptr) - - -def customtrace(gc, obj, callback, arg): - stackletrootwalker = get_stackletrootwalker() - stackletrootwalker.customtrace(gc, obj, callback, arg) -lambda_customtrace = lambda: customtrace - -def suspstack_destructor(suspstack): - h = suspstack.handle - if h: - _c.destroy(h) - - -SUSPSTACK = lltype.GcStruct('SuspStack', - ('handle', _c.handle), - ('anchor', llmemory.Address), - ('callback_pieces', llmemory.Address), - rtti=True) -NULL_SUSPSTACK = lltype.nullptr(SUSPSTACK) - -ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference()) -ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD', - ('prev', ASM_FRAMEDATA_HEAD_PTR), - ('next', ASM_FRAMEDATA_HEAD_PTR) - )) -alternateanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, - immortal=True) -alternateanchor.prev = alternateanchor -alternateanchor.next = alternateanchor - -FUNCNOARG_P = lltype.Ptr(lltype.FuncType([], _c.handle)) -pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk', - [FUNCNOARG_P, - ASM_FRAMEDATA_HEAD_PTR], - lltype.Signed, sandboxsafe=True, - _nowrapper=True) - - -def _new_callback(): - # Here, we just closed the stack. Get the stack anchor, store - # it in the gcrootfinder.suspstack.anchor, and create a new - # stacklet with stacklet_new(). If this call fails, then we - # are just returning NULL. - _stack_just_closed() - # - return _c.new(gcrootfinder.newthrd, llhelper(_c.run_fn, _new_runfn), - llmemory.NULL) - -def _stack_just_closed(): - # Immediately unlink the new stackanchor from the doubly-linked - # chained list. When returning from pypy_asm_stackwalk2, the - # assembler code will try to unlink it again, which should be - # a no-op given that the doubly-linked list is empty. - stackanchor = llmemory.cast_ptr_to_adr(alternateanchor.next) - gcrootfinder.suspstack.anchor = stackanchor - alternateanchor.prev = alternateanchor - alternateanchor.next = alternateanchor - -def _new_runfn(h, _): - # Here, we are in a fresh new stacklet. - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py - # - # There is a fresh suspstack object waiting on the gcrootfinder, - # so populate it with data that represents the parent suspended - # stacklet and detach the suspstack object from gcrootfinder. - suspstack = gcrootfinder.attach_handle_on_suspstack(h) - # - # Call the main function provided by the (RPython) user. - suspstack = gcrootfinder.runfn(suspstack, gcrootfinder.arg) - # - # Here, suspstack points to the target stacklet to which we want - # to jump to next. Read the 'handle' and forget about the - # suspstack object. - return _consume_suspstack(suspstack) - -def _consume_suspstack(suspstack): - h = suspstack.handle - ll_assert(bool(h), "_consume_suspstack: null handle") - suspstack.handle = _c.null_handle - return h - -def _switch_callback(): - # Here, we just closed the stack. Get the stack anchor, store - # it in the gcrootfinder.suspstack.anchor, and switch to this - # suspstack with stacklet_switch(). If this call fails, then we - # are just returning NULL. - oldanchor = gcrootfinder.suspstack.anchor - _stack_just_closed() - h = _consume_suspstack(gcrootfinder.suspstack) - # - # gcrootfinder.suspstack.anchor is left with the anchor of the - # previous place (i.e. before the call to switch()). - h2 = _c.switch(h) - # - if not h2: # MemoryError: restore - gcrootfinder.suspstack.anchor = oldanchor From pypy.commits at gmail.com Wed Dec 11 09:32:40 2019 From: pypy.commits at gmail.com (arigo) Date: Wed, 11 Dec 2019 06:32:40 -0800 (PST) Subject: [pypy-commit] pypy kill-asmgcc: Document branch Message-ID: <5df0fe08.1c69fb81.3619b.b7d5@mx.google.com> Author: Armin Rigo Branch: kill-asmgcc Changeset: r98270:04030975247c Date: 2019-12-11 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/04030975247c/ Log: Document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -8,3 +8,8 @@ .. branch: backport-decode_timeval_ns-py3.7 Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. From pypy.commits at gmail.com Thu Dec 12 07:55:16 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 12 Dec 2019 04:55:16 -0800 (PST) Subject: [pypy-commit] pypy default: adapt patch from portable-pypy (thanks squeakypl) Message-ID: <5df238b4.1c69fb81.802f4.eb68@mx.google.com> Author: Matti Picus Branch: Changeset: r98273:a5cd0e93d2fb Date: 2019-12-12 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/a5cd0e93d2fb/ Log: adapt patch from portable-pypy (thanks squeakypl) diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -1,4 +1,5 @@ import sys +import os import time import thread as _thread import weakref @@ -1205,6 +1206,10 @@ return stats def set_default_verify_paths(self): + if not os.environ.get('SSL_CERT_FILE') and not os.environ.get('SSL_CERT_DIR'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1548,20 +1553,69 @@ lib.RAND_add(buf, len(buf), entropy) def get_default_verify_paths(): + ''' + Find a certificate store and associated values + Returns something like + `('SSL_CERT_FILE', '/usr/lib/ssl/cert.pem', 'SSL_CERT_DIR', '/usr/lib/ssl/certs')` + on Ubuntu and windows10 + + `('SSL_CERT_FILE', '/usr/local/cert.pem', 'SSL_CERT_DIR', '/usr/local/certs')` + on CentOS + + `('SSL_CERT_FILE', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem', + 'SSL_CERT_DIR', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/certs')` + on Darwin + + For portable builds (based on CentOS, but could be running on any glibc + linux) we need to check other locations. The list of places to try was taken + from golang in Dec 2018: + https://golang.org/src/crypto/x509/root_unix.go (for the directories), + https://golang.org/src/crypto/x509/root_linux.go (for the files) + ''' + certFiles = [ + "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", # OpenSUSE + "/etc/pki/tls/cacert.pem", # OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7 + "/etc/ssl/cert.pem", # Alpine Linux + ] + certDirectories = [ + "/etc/ssl/certs", # SLES10/SLES11 + "/system/etc/security/cacerts", # Android + "/usr/local/share/certs", # FreeBSD + "/etc/pki/tls/certs", # Fedora/RHEL + "/etc/openssl/certs", # NetBSD + "/var/ssl/certs", # AIX + ] + + # optimization: reuse the values from a local varaible + if getattr(get_default_verify_paths, 'retval', None): + return get_default_verify_paths.retval + + # This should never fail, it should always return SSL_CERT_FILE and SSL_CERT_DIR ofile_env = _str_from_buf(lib.X509_get_default_cert_file_env()) - if ofile_env is None: - return None + odir_env = _str_from_buf(lib.X509_get_default_cert_dir_env()) + + # Platform depenedent ofile = _str_from_buf(lib.X509_get_default_cert_file()) - if ofile is None: - return None - odir_env = _str_from_buf(lib.X509_get_default_cert_dir_env()) - if odir_env is None: - return None odir = _str_from_buf(lib.X509_get_default_cert_dir()) - if odir is None: - return odir - return (ofile_env, ofile, odir_env, odir); + + if os.path.exists(ofile) and os.path.exists(odir): + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + + # OpenSSL didn't supply the goods. Try some other options + for f in certFiles: + if os.path.exists(f): + ofile = f + for f in certDirectories: + if os.path.exists(f): + odir = f + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + @ffi.callback("int(SSL*,unsigned char **,unsigned char *,const unsigned char *,unsigned int,void *)") def select_alpn_callback(ssl, out, outlen, client_protocols, client_protocols_len, args): From pypy.commits at gmail.com Thu Dec 12 07:55:21 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 12 Dec 2019 04:55:21 -0800 (PST) Subject: [pypy-commit] pypy py3.6: test, implement PyOS_FSPath Message-ID: <5df238b9.1c69fb81.d0210.f05a@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98275:adc2921362d4 Date: 2019-12-12 14:54 +0200 http://bitbucket.org/pypy/pypy/changeset/adc2921362d4/ Log: test, implement PyOS_FSPath diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -62,3 +62,26 @@ w_str = space.repr(w_obj) space.call_method(w_p, "write", w_str) return 0 + + at cpython_api([PyObject], PyObject) +def PyOS_FSPath(space, w_path): + """ + Return the file system representation for path. If the object is a str or + bytes object, then its reference count is incremented. If the object + implements the os.PathLike interface, then __fspath__() is returned as long + as it is a str or bytes object. Otherwise TypeError is raised and NULL is + returned. + """ + if (space.isinstance_w(w_path, space.w_unicode) or + space.isinstance_w(w_path, space.w_bytes)): + return w_path + if not space.lookup(w_path, '__fspath__'): + raise oefmt(space.w_TypeError, + "expected str, bytes or os.PathLike object, not %T", w_path) + w_ret = space.call_method(w_path, '__fspath__') + if (space.isinstance_w(w_ret, space.w_unicode) or + space.isinstance_w(w_ret, space.w_bytes)): + return w_ret + raise oefmt(space.w_TypeError, + "expected %T.__fspath__() to return str or bytes, not %T", w_path, w_ret) + diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -1,6 +1,7 @@ import pytest from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.object import Py_PRINT_RAW +from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi from rpython.tool.udir import udir @@ -70,3 +71,31 @@ out, err = capfd.readouterr() out = out.replace('\r\n', '\n') assert out == "test\n'test\\n'" + + def test_fspath(self, space, api): + w_obj = space.newtext("test") + w_ret = api.PyOS_FSPath(w_obj) + assert space.eq_w(w_ret, w_obj) + + w_obj = space.newint(3) + with pytest.raises(OperationError): + w_ret = api.PyOS_FSPath(w_obj) + + + w_p1 = space.appexec([], '''(): + class Pathlike(): + def __fspath__(self): + return 'test' + return Pathlike()''') + + w_p2 = space.appexec([], '''(): + class UnPathlike(): + def __fspath__(self): + return 42 + return UnPathlike()''') + + w_ret = api.PyOS_FSPath(w_p1) + assert space.eq_w(w_ret, space.newtext('test')) + + with pytest.raises(OperationError): + w_ret = api.PyOS_FSPath(w_p2) From pypy.commits at gmail.com Thu Dec 12 07:55:19 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 12 Dec 2019 04:55:19 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5df238b7.1c69fb81.49139.d68a@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98274:d7d279c89e1c Date: 2019-12-12 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/d7d279c89e1c/ Log: merge default into py3.6 diff too long, truncating to 2000 out of 12653 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -57,3 +57,5 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0rc2 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 +e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 +533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -1,4 +1,5 @@ import sys +import os import time import _thread import weakref @@ -1238,6 +1239,10 @@ return stats def set_default_verify_paths(self): + if not os.environ.get('SSL_CERT_FILE') and not os.environ.get('SSL_CERT_DIR'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1581,20 +1586,69 @@ lib.RAND_add(buf, len(buf), entropy) def get_default_verify_paths(): + ''' + Find a certificate store and associated values + Returns something like + `('SSL_CERT_FILE', '/usr/lib/ssl/cert.pem', 'SSL_CERT_DIR', '/usr/lib/ssl/certs')` + on Ubuntu and windows10 + + `('SSL_CERT_FILE', '/usr/local/cert.pem', 'SSL_CERT_DIR', '/usr/local/certs')` + on CentOS + + `('SSL_CERT_FILE', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem', + 'SSL_CERT_DIR', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/certs')` + on Darwin + + For portable builds (based on CentOS, but could be running on any glibc + linux) we need to check other locations. The list of places to try was taken + from golang in Dec 2018: + https://golang.org/src/crypto/x509/root_unix.go (for the directories), + https://golang.org/src/crypto/x509/root_linux.go (for the files) + ''' + certFiles = [ + "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", # OpenSUSE + "/etc/pki/tls/cacert.pem", # OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7 + "/etc/ssl/cert.pem", # Alpine Linux + ] + certDirectories = [ + "/etc/ssl/certs", # SLES10/SLES11 + "/system/etc/security/cacerts", # Android + "/usr/local/share/certs", # FreeBSD + "/etc/pki/tls/certs", # Fedora/RHEL + "/etc/openssl/certs", # NetBSD + "/var/ssl/certs", # AIX + ] + + # optimization: reuse the values from a local varaible + if getattr(get_default_verify_paths, 'retval', None): + return get_default_verify_paths.retval + + # This should never fail, it should always return SSL_CERT_FILE and SSL_CERT_DIR ofile_env = _cstr_decode_fs(lib.X509_get_default_cert_file_env()) - if ofile_env is None: - return None + odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) + + # Platform depenedent ofile = _cstr_decode_fs(lib.X509_get_default_cert_file()) - if ofile is None: - return None - odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) - if odir_env is None: - return None odir = _cstr_decode_fs(lib.X509_get_default_cert_dir()) - if odir is None: - return odir - return (ofile_env, ofile, odir_env, odir); + + if os.path.exists(ofile) and os.path.exists(odir): + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + + # OpenSSL didn't supply the goods. Try some other options + for f in certFiles: + if os.path.exists(f): + ofile = f + for f in certDirectories: + if os.path.exists(f): + odir = f + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + @ffi.callback("int(SSL*,unsigned char **,unsigned char *,const unsigned char *,unsigned int,void *)") def select_alpn_callback(ssl, out, outlen, client_protocols, client_protocols_len, args): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -14,7 +14,7 @@ conf = get_pypy_config() conf.translation.gc = "boehm" with py.test.raises(ConfigError): - conf.translation.gcrootfinder = 'asmgcc' + conf.translation.gcrootfinder = 'shadowstack' def test_frameworkgc(): for name in ["minimark", "semispace"]: diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,16 +1,7 @@ Choose the method used to find the roots in the GC. This only -applies to our framework GCs. You have a choice of two -alternatives: +applies to our framework GCs. - ``--gcrootfinder=shadowstack``: use a so-called "shadow stack", which is an explicitly maintained custom stack of - root pointers. This is the most portable solution. - -- ``--gcrootfinder=asmgcc``: use assembler hackery to find the - roots directly from the normal stack. This is a bit faster, - but platform specific. It works so far with GCC or MSVC, - on i386 and x86-64. It is tested only on Linux - so other platforms (as well as MSVC) may need - various fixes before they can be used. Note asmgcc will be deprecated - at some future date, and does not work with clang. - + root pointers. This is the most portable solution, and also + the only one available now. diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -177,6 +177,8 @@ * Adds encoding, decoding codepages on win32 * Remove socket error attributes from ``_ssl`` (`issue 3119`_) * Add missing ``os.getgrouplist`` (part of `issue 2375`_) +* Back-port the tentative fix from cpython: "Import deadlock detection causes + deadlock" (part of `issue 3111`_) Python 3.6 C-API ~~~~~~~~~~~~~~~~ @@ -210,6 +212,7 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3111`: https://bitbucket.com/pypy/pypy/issues/3111 .. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -8,3 +8,8 @@ .. branch: backport-decode_timeval_ns-py3.7 Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -43,8 +43,7 @@ from rpython.rlib import rgil rgil.acquire() - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) @@ -69,7 +68,6 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - rffi.stackcounter.stacks_counter -= 1 rgil.release() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1046,8 +1046,7 @@ else: gilstate = pystate.PyGILState_IGNORE - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C retval = fatal_value boxed_args = () tb = None @@ -1124,7 +1123,6 @@ return fatal_value assert lltype.typeOf(retval) == restype - rffi.stackcounter.stacks_counter -= 1 _restore_gil_state(pygilstate_release, gilstate, gil_release, _gil_auto, tid) return retval diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -30,7 +30,7 @@ # called from the rffi-generated wrapper). The gc_thread_run() # operation will automatically notice that the current thread id was # not seen before, and (in shadowstack) it will allocate and use a -# fresh new stack. Again, this has no effect in asmgcc. +# fresh new stack. # # * Only then does bootstrap() really run. The first thing it does # is grab the start-up information (app-level callable and args) @@ -43,7 +43,7 @@ # thread. # # * Just before a thread finishes, gc_thread_die() is called to free -# its shadow stack. This has no effect in asmgcc. +# its shadow stack. class Bootstrapper(object): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,8 +2,8 @@ pmaj=2 # python main version: 2 or 3 pmin=7 # python minor version maj=7 -min=2 -rev=0rc2 +min=3 +rev=0rc1 case $pmaj in "2") exe=pypy;; diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -13,13 +13,6 @@ config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') -if compiler.name == 'msvc' or sys.platform == 'darwin': - def test_no_asmgcrot_on_msvc(): - config = get_combined_translation_config() - config.translation.gcrootfinder = "asmgcc" - py.test.raises(ConfigError, set_opt_level, config, 'jit') - - def test_get_translation_config(): from rpython.translator.interactive import Translation from rpython.config import config diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -18,10 +18,6 @@ DEFL_GC = "incminimark" # XXX DEFL_ROOTFINDER_WITHJIT = "shadowstack" -## if sys.platform.startswith("linux"): -## _mach = os.popen('uname -m', 'r').read().strip() -## if _mach.startswith('x86') or _mach in ['i386', 'i486', 'i586', 'i686']: -## DEFL_ROOTFINDER_WITHJIT = "asmgcc" # only for Linux on x86 / x86-64 IS_64_BITS = sys.maxint > 2147483647 @@ -100,13 +96,11 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ["n/a", "shadowstack"], "shadowstack", cmdline="--gcrootfinder", requires={ "shadowstack": [("translation.gctransformer", "framework")], - "asmgcc": [("translation.gctransformer", "framework"), - ("translation.backend", "c")], }), # other noticeable options @@ -402,10 +396,6 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X and on Win32 - if config.translation.gcrootfinder == "asmgcc": - if sys.platform == "darwin" or sys.platform =="win32": - raise ConfigError("'asmgcc' not supported on this platform") # ---------------------------------------------------------------- diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -438,51 +438,8 @@ @staticmethod @rgc.no_collect - def _reacquire_gil_asmgcc(css, old_rpy_fastgil): - # Before doing an external call, 'rpy_fastgil' is initialized to - # be equal to css. This function is called if we find out after - # the call that it is no longer equal to css. See description - # in translator/c/src/thread_pthread.c. - - # XXX some duplicated logic here, but note that rgil.acquire() - # does more than just RPyGilAcquire() - if old_rpy_fastgil == 0: - # this case occurs if some other thread stole the GIL but - # released it again. What occurred here is that we changed - # 'rpy_fastgil' from 0 to 1, thus successfully reaquiring the - # GIL. - pass - - elif old_rpy_fastgil == 1: - # 'rpy_fastgil' was (and still is) locked by someone else. - # We need to wait for the regular mutex. - from rpython.rlib import rgil - rgil.acquire() - else: - # stole the GIL from a different thread that is also - # currently in an external call from the jit. Attach - # the 'old_rpy_fastgil' into the chained list. - from rpython.memory.gctransform import asmgcroot - oth = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, old_rpy_fastgil) - next = asmgcroot.gcrootanchor.next - oth.next = next - oth.prev = asmgcroot.gcrootanchor - asmgcroot.gcrootanchor.next = oth - next.prev = oth - - # similar to trackgcroot.py:pypy_asm_stackwalk, second part: - # detach the 'css' from the chained list - from rpython.memory.gctransform import asmgcroot - old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) - prev = old.prev - next = old.next - prev.next = next - next.prev = prev - - @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): - # Simplified version of _reacquire_gil_asmgcc(): in shadowstack mode, + # This used to be more complex for asmgcc. In shadowstack mode, # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) @@ -499,13 +456,10 @@ self._reacquire_gil_shadowstack) self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) else: - reacqgil_func = llhelper(self._REACQGIL2_FUNC, - self._reacquire_gil_asmgcc) - self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) + raise AssertionError("!is_shadow_stack") def _is_asmgcc(self): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - return bool(gcrootmap) and not gcrootmap.is_shadow_stack + return False # legacy def debug_bridge(descr_number, rawstart, codeendpos): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -21,7 +21,6 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler -from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo # ____________________________________________________________ @@ -117,7 +116,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(jitframe.JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth', + 'jf_frame_info', 'jf_gcmap', 'jf_savedata', 'jf_forward']: setattr(descrs, name, cpu.fielddescrof(jitframe.JITFRAME, name)) descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, @@ -244,15 +243,6 @@ # ____________________________________________________________ # All code below is for the hybrid or minimark GC -class GcRootMap_asmgcc(object): - is_shadow_stack = False - - def __init__(self, gcdescr): - pass - - def register_asm_addr(self, start, mark): - pass - class GcRootMap_shadowstack(object): is_shadow_stack = True diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -49,7 +49,6 @@ rgc.register_custom_trace_hook(JITFRAME, lambda_jitframe_trace) frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info - frame.jf_extra_stack_depth = 0 return frame def jitframe_resolve(frame): @@ -71,8 +70,6 @@ ('jf_force_descr', llmemory.GCREF), # a map of GC pointers ('jf_gcmap', lltype.Ptr(GCMAP)), - # how much we decrease stack pointer. Used around calls and malloc slowpath - ('jf_extra_stack_depth', lltype.Signed), # For the front-end: a GCREF for the savedata ('jf_savedata', llmemory.GCREF), # For GUARD_(NO)_EXCEPTION and GUARD_NOT_FORCED: the exception we @@ -103,7 +100,6 @@ LENGTHOFS = llmemory.arraylengthoffset(JITFRAME.jf_frame) SIGN_SIZE = llmemory.sizeof(lltype.Signed) UNSIGN_SIZE = llmemory.sizeof(lltype.Unsigned) -STACK_DEPTH_OFS = getofs('jf_extra_stack_depth') def jitframe_trace(gc, obj_addr, callback, arg): gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr')) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -584,8 +584,6 @@ length = self.emit_getfield(ConstInt(frame_info), descr=descrs.jfi_frame_depth, raw=True) - self.emit_setfield(frame, self.c_zero, - descr=descrs.jf_extra_stack_depth) self.emit_setfield(frame, self.c_null, descr=descrs.jf_savedata) self.emit_setfield(frame, self.c_null, diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -110,7 +110,7 @@ class config_(object): class translation(object): gc = self.gc - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False class FakeTranslator(object): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -507,7 +507,6 @@ ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), ('jf_descr', llmemory.GCREF), ('jf_force_descr', llmemory.GCREF), - ('jf_extra_stack_depth', lltype.Signed), ('jf_guard_exc', llmemory.GCREF), ('jf_gcmap', lltype.Ptr(jitframe.GCMAP)), ('jf_gc_trace_state', lltype.Signed), @@ -594,7 +593,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + 'jf_frame_info', 'jf_gcmap']: setattr(descrs, name, cpu.fielddescrof(JITFRAME, name)) descrs.jfi_frame_depth = cpu.fielddescrof(jitframe.JITFRAMEINFO, 'jfi_frame_depth') diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -170,7 +170,6 @@ jf_descr = framedescrs.jf_descr jf_guard_exc = framedescrs.jf_guard_exc jf_forward = framedescrs.jf_forward - jf_extra_stack_depth = framedescrs.jf_extra_stack_depth signedframedescr = self.cpu.signedframedescr floatframedescr = self.cpu.floatframedescr casmdescr.compiled_loop_token = clt @@ -386,7 +385,7 @@ class config_(object): class translation(object): gc = 'minimark' - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False gcdescr = get_description(config_) @@ -1102,7 +1101,6 @@ p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) - %(setfield('p1', 0, jf_extra_stack_depth))s %(setfield('p1', 'NULL', jf_savedata))s %(setfield('p1', 'NULL', jf_force_descr))s %(setfield('p1', 'NULL', jf_descr))s diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -176,9 +176,6 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -331,9 +331,6 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -35,9 +35,7 @@ PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, - # and it can be left here for when it is needed. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and it can be left here for when it is needed. THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 @@ -45,12 +43,10 @@ PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, - # and is moved into this frame location. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and is moved into this frame location. THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD -assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 +assert PASS_ON_MY_FRAME >= 12 # return address, followed by FRAME_FIXED_SIZE words DEFAULT_FRAME_BYTES = (1 + FRAME_FIXED_SIZE) * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -137,11 +137,6 @@ self.expand_byte_mask_addr = float_constants + 64 self.element_ones = [float_constants + 80 + 16*i for i in range(4)] - def set_extra_stack_depth(self, mc, value): - if self._is_asmgcc(): - extra_ofs = self.cpu.get_ofs_of_frame_field('jf_extra_stack_depth') - mc.MOV_bi(extra_ofs, value) - def build_frame_realloc_slowpath(self): mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) @@ -161,14 +156,20 @@ mc.MOV_sr(0, ebp.value) # align - self.set_extra_stack_depth(mc, align * WORD) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.set_extra_stack_depth(mc, align * WORD) + self._store_and_reset_exception(mc, None, ebx, ecx) mc.CALL(imm(self.cpu.realloc_frame)) mc.MOV_rr(ebp.value, eax.value) self._restore_exception(mc, None, ebx, ecx) mc.ADD_ri(esp.value, (align - 1) * WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -196,12 +197,12 @@ # the caller already did push_gcmap(store=True) if IS_X86_64: mc.SUB(esp, imm(WORD)) # alignment - self.set_extra_stack_depth(mc, 2 * WORD) + #self.set_extra_stack_depth(mc, 2 * WORD) # the arguments are already in the correct registers else: # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 8 * WORD) + #self.set_extra_stack_depth(mc, 8 * WORD) # store the arguments at the correct place in the stack for i in range(4): mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) @@ -211,7 +212,7 @@ mc.ADD(esp, imm(WORD)) else: mc.ADD(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [eax], supports_floats, callee_only) mc.RET() @@ -275,11 +276,11 @@ # (already in edx) # length mc.MOV_rr(esi.value, ecx.value) # tid mc.MOV_rs(edi.value, WORD * 3) # load the itemsize - self.set_extra_stack_depth(mc, 16) + #self.set_extra_stack_depth(mc, 16) mc.CALL(imm(follow_jump(addr))) self._reload_frame_if_necessary(mc) mc.ADD_ri(esp.value, 16 - WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) # mc.TEST_rr(eax.value, eax.value) # common case: not taken @@ -1018,8 +1019,6 @@ from rpython.rlib.rvmprof.rvmprof import cintf # edx = address of pypy_threadlocal_s self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(edx.value, ~1) # eax = (our local vmprof_tl_stack).next self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax @@ -2236,25 +2235,6 @@ def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) - if self._is_asmgcc(): - # We need to remove the bit "already seen during the - # previous minor collection" instead of passing this - # value directly. - if IS_X86_64: - tmploc = esi # already the correct place - if argloc is tmploc: - # this case is theoretical only so far: in practice, - # argloc is always eax, never esi - self.mc.MOV_rr(edi.value, esi.value) - argloc = edi - else: - tmploc = eax - if tmploc is argloc: - tmploc = edx - self.mc.MOV(tmploc, threadlocal_loc) - self.mc.AND_ri(tmploc.value, ~1) - threadlocal_loc = tmploc - # self.simple_call(addr, [argloc, threadlocal_loc]) def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): @@ -2672,8 +2652,6 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(resloc.value, ~1) self.load_from_mem(resloc, addr_add_const(resloc, offset), imm(size), imm(sign)) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -61,13 +61,6 @@ self.arglocs = arglocs + [fnloc] self.start_frame_size = self.mc._frame_size - def select_call_release_gil_mode(self): - AbstractCallBuilder.select_call_release_gil_mode(self) - if self.asm._is_asmgcc(): - from rpython.memory.gctransform import asmgcroot - self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS - assert self.stack_max >= 3 - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -103,9 +96,14 @@ # value eax, if necessary assert not self.is_call_release_gil current_esp = self.get_current_esp() - self.change_extra_stack_depth = (current_esp != 0) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, -current_esp) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.change_extra_stack_depth = (current_esp != 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, -current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) @@ -119,13 +117,14 @@ # top at this point, so reuse it instead of loading it again ssreg = ebx self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap assert self.is_call_release_gil + assert not self.asm._is_asmgcc() # # Save this thread's shadowstack pointer into 'ebx', # for later comparison @@ -135,38 +134,12 @@ rst = gcrootmap.get_root_stack_top_addr() self.mc.MOV(ebx, heap(rst)) # - if not self.asm._is_asmgcc(): - # shadowstack: change 'rpy_fastgil' to 0 (it should be - # non-zero right now). - self.change_extra_stack_depth = False - # ^^ note that set_extra_stack_depth() in this case is a no-op - css_value = imm(0) - else: - from rpython.memory.gctransform import asmgcroot - # build a 'css' structure on the stack: 2 words for the linkage, - # and 5/7 words as described for asmgcroot.ASM_FRAMEDATA, for a - # total size of JIT_USE_WORDS. This structure is found at - # [ESP+css]. - css = -self.get_current_esp() + ( - WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS)) - assert css >= 2 * WORD - # Save ebp - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_sr(index_of_ebp, ebp.value) # MOV [css.ebp], EBP - # Save the "return address": we pretend that it's css - self.mc.LEA_rs(eax.value, css) # LEA eax, [css] - frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_sr(frame_ptr, eax.value) # MOV [css.frame], eax - # Set up jf_extra_stack_depth to pretend that the return address - # was at css, and so our stack frame is supposedly shorter by - # (PASS_ON_MY_FRAME-JIT_USE_WORDS+1) words - delta = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS + 1 - self.change_extra_stack_depth = True - self.asm.set_extra_stack_depth(self.mc, -delta * WORD) - css_value = eax + # shadowstack: change 'rpy_fastgil' to 0 (it should be + # non-zero right now). + #self.change_extra_stack_depth = False # # <--here--> would come a memory fence, if the CPU needed one. - self.mc.MOV(heap(fastgil), css_value) + self.mc.MOV(heap(fastgil), imm(0)) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more @@ -184,8 +157,6 @@ self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, THREADLOCAL_OFS - self.get_current_esp()) - if self.asm._is_asmgcc(): - self.mc.AND_ri(self.tlofs_reg.value, ~1) return self.tlofs_reg def save_stack_position(self): @@ -318,13 +289,6 @@ cb = self.callbuilder if not cb.result_value_saved_early: cb.save_result_value(save_edx=False) - if assembler._is_asmgcc(): - if IS_X86_32: - css_value = edx - old_value = ecx - mc.MOV_sr(4, old_value.value) - mc.MOV_sr(0, css_value.value) - # on X86_64, they are already in the right registers mc.CALL(imm(follow_jump(assembler.reacqgil_addr))) if not cb.result_value_saved_early: cb.restore_result_value(save_edx=False) @@ -333,29 +297,10 @@ from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not - # (to acquiring the GIL, remove the asmgcc head from - # the chained list, etc.) + # (to acquiring the GIL) mc = self.mc restore_edx = False - if not self.asm._is_asmgcc(): - css = 0 - css_value = imm(0) - old_value = ecx - else: - from rpython.memory.gctransform import asmgcroot - css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) - if IS_X86_32: - assert css >= 16 - if self.restype == 'L': # long long result: eax/edx - if not self.result_value_saved_early: - mc.MOV_sr(12, edx.value) - restore_edx = True - css_value = edx # note: duplicated in ReacqGilSlowPath - old_value = ecx # - elif IS_X86_64: - css_value = edi - old_value = esi - mc.LEA_rs(css_value.value, css) + old_value = ecx # # Use XCHG as an atomic test-and-set-lock. It also implicitly # does a memory barrier. @@ -365,11 +310,12 @@ else: mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) - mc.CMP(old_value, css_value) + mc.CMP(old_value, imm(0)) # gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap - if bool(gcrootmap) and gcrootmap.is_shadow_stack: + if bool(gcrootmap): from rpython.jit.backend.x86.assembler import heap + assert gcrootmap.is_shadow_stack # # When doing a call_release_gil with shadowstack, there # is the risk that the 'rpy_fastgil' was free but the @@ -406,14 +352,8 @@ if not we_are_translated(): # for testing: now we can accesss mc.SUB(ebp, imm(1)) # ebp again # - # Now that we required the GIL, we can reload a possibly modified ebp - if self.asm._is_asmgcc(): - # special-case: reload ebp from the css - from rpython.memory.gctransform import asmgcroot - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] - #else: - # for shadowstack, done for us by _reload_frame_if_necessary() + # Now that we required the GIL, we will reload a possibly modified ebp: + # this done for us by _reload_frame_if_necessary() def save_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -829,10 +829,7 @@ self.xrm.before_call(save_all_regs=save_all_regs) if gc_level == SAVE_GCREF_REGS: gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap - # we save all the GCREF registers for shadowstack and asmgcc for now - # --- for asmgcc too: we can't say "register x is a gc ref" - # without distinguishing call sites, which we don't do any - # more for now. + # we save all the GCREF registers for shadowstack if gcrootmap: # and gcrootmap.is_shadow_stack: save_all_regs = SAVE_GCREF_REGS self.rm.before_call(save_all_regs=save_all_regs) @@ -940,15 +937,6 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): - # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' - # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. - # We must make sure that edi and esi do not contain GC pointers. - if IS_X86_32 and self.assembler._is_asmgcc(): - for box, loc in self.rm.reg_bindings.items(): - if (loc == edi or loc == esi) and box.type == REF: - self.rm.force_spill_var(box) - assert box not in self.rm.reg_bindings - # args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments v_func = args[1] diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py deleted file mode 100644 --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ /dev/null @@ -1,9 +0,0 @@ -import py -from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests -from rpython.translator.platform import platform as compiler - -if compiler.name == 'msvc': - py.test.skip('asmgcc buggy on msvc') - -class TestAsmGcc(CompileFrameworkTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py --- a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py +++ b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py @@ -1,11 +1,5 @@ from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests -from rpython.translator.platform import platform as compiler class TestShadowStack(ReleaseGILTests): gcrootfinder = "shadowstack" - - -if compiler.name != 'msvc': - class TestAsmGcc(ReleaseGILTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py --- a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py @@ -1,19 +1,12 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC -from rpython.translator.platform import platform as compiler -if compiler.name == 'msvc': - _MSVC = True -else: - _MSVC = False class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest): def _get_TranslationContext(self): t = TranslationContext() t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' - if not _MSVC: - t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True return t diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1156,8 +1156,7 @@ 'CALL_ASSEMBLER/*d/rfin', # call already compiled assembler 'CALL_MAY_FORCE/*d/rfin', 'CALL_LOOPINVARIANT/*d/rfin', - 'CALL_RELEASE_GIL/*d/fin', - # release the GIL and "close the stack" for asmgcc + 'CALL_RELEASE_GIL/*d/fin', # release the GIL around the call 'CALL_PURE/*d/rfin', # removed before it's passed to the backend 'CHECK_MEMORY_ERROR/1/n', # after a CALL: NULL => propagate MemoryError 'CALL_MALLOC_NURSERY/1/r', # nursery malloc, const number of bytes, zeroed diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py deleted file mode 100644 --- a/rpython/memory/gctransform/asmgcroot.py +++ /dev/null @@ -1,870 +0,0 @@ -from rpython.flowspace.model import (Constant, Variable, Block, Link, - copygraph, SpaceOperation, checkgraph) -from rpython.rlib.debug import ll_assert -from rpython.rlib.nonconst import NonConstant -from rpython.rlib import rgil -from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import ( - BaseFrameworkGCTransformer, BaseRootWalker) -from rpython.rtyper.llannotation import SomeAddress -from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import varoftype -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys - - -# -# This transformer avoids the use of a shadow stack in a completely -# platform-specific way, by directing genc to insert asm() special -# instructions in the C source, which are recognized by GCC. -# The .s file produced by GCC is then parsed by trackgcroot.py. -# - -IS_64_BITS = sys.maxint > 2147483647 - -class AsmGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer): - _asmgcc_save_restore_arguments = None - - def push_roots(self, hop, keep_current_args=False): - livevars = self.get_livevars_for_roots(hop, keep_current_args) - self.num_pushs += len(livevars) - return livevars - - def pop_roots(self, hop, livevars): - if not livevars: - return - # mark the values as gc roots - for var in livevars: - v_adr = gen_cast(hop.llops, llmemory.Address, var) - v_newaddr = hop.genop("direct_call", [c_asm_gcroot, v_adr], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) - - def build_root_walker(self): - return AsmStackRootWalker(self) - - def mark_call_cannotcollect(self, hop, name): - hop.genop("direct_call", [c_asm_nocollect, name]) - - def gct_direct_call(self, hop): - # just a sanity check: if we find a fnptr with the hint on the - # _callable, then we'd also find the hint by looking only at the - # graph. We'll actually change this graph only later, in - # start_transforming_graph(). - fnptr = hop.spaceop.args[0].value - try: - close_stack = fnptr._obj._callable._gctransformer_hint_close_stack_ - except AttributeError: - pass - else: - assert fnptr._obj.graph.func is fnptr._obj._callable - BaseFrameworkGCTransformer.gct_direct_call(self, hop) - - def start_transforming_graph(self, graph): - try: - close_stack = graph.func._gctransformer_hint_close_stack_ - except AttributeError: - close_stack = False - if close_stack: - self._transform_hint_close_stack(graph) - - def _transform_hint_close_stack(self, graph): - # We cannot easily pass variable amount of arguments of the call - # across the call to the pypy_asm_stackwalk helper. So we store - # them away and restore them. More precisely, we need to - # replace 'graph' with code that saves the arguments, and make - # a new graph that starts with restoring the arguments. - if self._asmgcc_save_restore_arguments is None: - self._asmgcc_save_restore_arguments = {} - sradict = self._asmgcc_save_restore_arguments - sra = [] # list of pointers to raw-malloced containers for args - seen = {} - ARGS = [v.concretetype for v in graph.getargs()] - for TYPE in ARGS: - if isinstance(TYPE, lltype.Ptr): - TYPE = llmemory.Address - num = seen.get(TYPE, 0) - seen[TYPE] = num + 1 - key = (TYPE, num) - if key not in sradict: - CONTAINER = lltype.FixedSizeArray(TYPE, 1) - p = lltype.malloc(CONTAINER, flavor='raw', zero=True, - immortal=True) - sradict[key] = Constant(p, lltype.Ptr(CONTAINER)) - sra.append(sradict[key]) - # - # make a copy of the graph that will reload the values - graph2 = copygraph(graph) - del graph2.func # otherwise, start_transforming_graph() will - # again transform graph2, and we get an - # infinite loop - # - # edit the original graph to only store the value of the arguments - block = Block(graph.startblock.inputargs) - c_item0 = Constant('item0', lltype.Void) - assert len(block.inputargs) == len(sra) - for v_arg, c_p in zip(block.inputargs, sra): - if isinstance(v_arg.concretetype, lltype.Ptr): - v_adr = varoftype(llmemory.Address) - block.operations.append( - SpaceOperation("cast_ptr_to_adr", [v_arg], v_adr)) - v_arg = v_adr - v_void = varoftype(lltype.Void) - block.operations.append( - SpaceOperation("bare_setfield", [c_p, c_item0, v_arg], v_void)) - # - # call asm_stackwalk(graph2) - RESULT = graph.getreturnvar().concretetype - FUNC2 = lltype.FuncType([], RESULT) - fnptr2 = lltype.functionptr(FUNC2, - graph.name + '_reload', - graph=graph2) - c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2)) - HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2), - ASM_FRAMEDATA_HEAD_PTR], RESULT) - v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk") - block.operations.append( - SpaceOperation("cast_pointer", [c_asm_stackwalk], v_asm_stackwalk)) - v_result = varoftype(RESULT) - block.operations.append( - SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2, - c_gcrootanchor, - Constant(None, lltype.Void)], - v_result)) - block.closeblock(Link([v_result], graph.returnblock)) - graph.startblock = block - # - # edit the copy of the graph to reload the values - block2 = graph2.startblock - block1 = Block([]) - reloadedvars = [] - for v, c_p in zip(block2.inputargs, sra): - v = v.copy() - if isinstance(v.concretetype, lltype.Ptr): - w = varoftype(llmemory.Address) - else: - w = v - block1.operations.append(SpaceOperation('getfield', - [c_p, c_item0], w)) - if w is not v: - block1.operations.append(SpaceOperation('cast_adr_to_ptr', - [w], v)) - reloadedvars.append(v) - block1.closeblock(Link(reloadedvars, block2)) - graph2.startblock = block1 - # - checkgraph(graph) - checkgraph(graph2) - - -class AsmStackRootWalker(BaseRootWalker): - - def __init__(self, gctransformer): - BaseRootWalker.__init__(self, gctransformer) - - def _asm_callback(): - self.walk_stack_from() - self._asm_callback = _asm_callback - self._shape_decompressor = ShapeDecompressor() - self._with_jit = hasattr(gctransformer.translator, '_jit2gc') - if self._with_jit: - jit2gc = gctransformer.translator._jit2gc - self.frame_tid = jit2gc['frame_tid'] - self.gctransformer = gctransformer - # - # unless overridden in need_thread_support(): - self.belongs_to_current_thread = lambda framedata: True - - def need_stacklet_support(self, gctransformer, getfn): - from rpython.annotator import model as annmodel - from rpython.rlib import _stacklet_asmgcc - # stacklet support: BIG HACK for rlib.rstacklet - _stacklet_asmgcc._asmstackrootwalker = self # as a global! argh - _stacklet_asmgcc.complete_destrptr(gctransformer) - # - def gc_detach_callback_pieces(): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - result = llmemory.NULL - framedata = anchor.address[1] - while framedata != anchor: - next = framedata.address[1] - if self.belongs_to_current_thread(framedata): - # detach it - prev = framedata.address[0] - prev.address[1] = next - next.address[0] = prev - # update the global stack counter - rffi.stackcounter.stacks_counter -= 1 - # reattach framedata into the singly-linked list 'result' - framedata.address[0] = rffi.cast(llmemory.Address, -1) - framedata.address[1] = result - result = framedata - framedata = next - return result - # - def gc_reattach_callback_pieces(pieces): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while pieces != llmemory.NULL: - framedata = pieces - pieces = pieces.address[1] - # attach 'framedata' into the normal doubly-linked list - following = anchor.address[1] - following.address[0] = framedata - framedata.address[1] = following - anchor.address[1] = framedata - framedata.address[0] = anchor - # update the global stack counter - rffi.stackcounter.stacks_counter += 1 - # - s_addr = SomeAddress() - s_None = annmodel.s_None - self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, - [], s_addr) - self.gc_reattach_callback_pieces_ptr=getfn(gc_reattach_callback_pieces, - [s_addr], s_None) - - def need_thread_support(self, gctransformer, getfn): - # Threads supported "out of the box" by the rest of the code. - # The whole code in this function is only there to support - # fork()ing in a multithreaded process :-( - # For this, we need to handle gc_thread_start and gc_thread_die - # to record the mapping {thread_id: stack_start}, and - # gc_thread_before_fork and gc_thread_after_fork to get rid of - # all ASM_FRAMEDATA structures that do no belong to the current - # thread after a fork(). - from rpython.rlib import rthread - from rpython.memory.support import AddressDict - from rpython.memory.support import copy_without_null_values - from rpython.annotator import model as annmodel - gcdata = self.gcdata - - def get_aid(): - """Return the thread identifier, cast to an (opaque) address.""" - return llmemory.cast_int_to_adr(rthread.get_ident()) - - def thread_start(): - value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) - gcdata.aid2stack.setitem(get_aid(), value) - thread_start._always_inline_ = True - - def thread_setup(): - gcdata.aid2stack = AddressDict() - gcdata.dead_threads_count = 0 - # to also register the main thread's stack - thread_start() - thread_setup._always_inline_ = True - - def thread_die(): - gcdata.aid2stack.setitem(get_aid(), llmemory.NULL) - # from time to time, rehash the dictionary to remove - # old NULL entries - gcdata.dead_threads_count += 1 - if (gcdata.dead_threads_count & 511) == 0: - copy = copy_without_null_values(gcdata.aid2stack) - gcdata.aid2stack.delete() - gcdata.aid2stack = copy - - def belongs_to_current_thread(framedata): - # xxx obscure: the answer is Yes if, as a pointer, framedata - # lies between the start of the current stack and the top of it. - stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) - ll_assert(stack_start != llmemory.NULL, - "current thread not found in gcdata.aid2stack!") - stack_stop = llmemory.cast_int_to_adr( - llop.stack_current(lltype.Signed)) - return (stack_start <= framedata <= stack_stop or - stack_start >= framedata >= stack_stop) - self.belongs_to_current_thread = belongs_to_current_thread - - def thread_before_fork(): - # before fork(): collect all ASM_FRAMEDATA structures that do - # not belong to the current thread, and move them out of the - # way, i.e. out of the main circular doubly linked list. - detached_pieces = llmemory.NULL - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - while initialframedata != anchor: # while we have not looped back - if not belongs_to_current_thread(initialframedata): - # Unlink it - prev = initialframedata.address[0] - next = initialframedata.address[1] - prev.address[1] = next - next.address[0] = prev - # Link it to the singly linked list 'detached_pieces' - initialframedata.address[0] = detached_pieces - detached_pieces = initialframedata - rffi.stackcounter.stacks_counter -= 1 - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - return detached_pieces - - def thread_after_fork(result_of_fork, detached_pieces): - if result_of_fork == 0: - # We are in the child process. Assumes that only the - # current thread survived. All the detached_pieces - # are pointers in other stacks, so have likely been - # freed already by the multithreaded library. - # Nothing more for us to do. - pass - else: - # We are still in the parent process. The fork() may - # have succeeded or not, but that's irrelevant here. - # We need to reattach the detached_pieces now, to the - # circular doubly linked list at 'gcrootanchor'. The - # order is not important. - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while detached_pieces != llmemory.NULL: - reattach = detached_pieces - detached_pieces = detached_pieces.address[0] - a_next = anchor.address[1] - reattach.address[0] = anchor - reattach.address[1] = a_next - anchor.address[1] = reattach - a_next.address[0] = reattach - rffi.stackcounter.stacks_counter += 1 - - self.thread_setup = thread_setup - self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None, - inline=True) - self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) - self.thread_before_fork_ptr = getfn(thread_before_fork, [], - SomeAddress()) - self.thread_after_fork_ptr = getfn(thread_after_fork, - [annmodel.SomeInteger(), - SomeAddress()], - annmodel.s_None) - # - # check that the order of the need_*() is correct for us: if we - # need both threads and stacklets, need_thread_support() must be - # called first, to initialize self.belongs_to_current_thread. - assert not hasattr(self, 'gc_detach_callback_pieces_ptr') - - def postprocess_graph(self, gct, graph, any_inlining): - pass - - def walk_stack_roots(self, collect_stack_root, is_minor=False): - gcdata = self.gcdata - gcdata._gc_collect_stack_root = collect_stack_root - gcdata._gc_collect_is_minor = is_minor - pypy_asm_stackwalk(llhelper(ASM_CALLBACK_PTR, self._asm_callback), - gcrootanchor) - - def walk_stack_from(self): - curframe = lltype.malloc(WALKFRAME, flavor='raw') - otherframe = lltype.malloc(WALKFRAME, flavor='raw') - - # Walk over all the pieces of stack. They are in a circular linked - # list of structures of 7 words, the 2 first words being prev/next. - # The anchor of this linked list is: - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - stackscount = 0 - while initialframedata != anchor: # while we have not looped back - self.walk_frames(curframe, otherframe, initialframedata) - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - stackscount += 1 - # - # for the JIT: rpy_fastgil may contain an extra framedata - rpy_fastgil = rgil.gil_fetch_fastgil().signed[0] - if rpy_fastgil != 1: - ll_assert(rpy_fastgil != 0, "walk_stack_from doesn't have the GIL") - initialframedata = rffi.cast(llmemory.Address, rpy_fastgil) - # - # very rare issue: initialframedata.address[0] is uninitialized - # in this case, but "retaddr = callee.frame_address.address[0]" - # reads it. If it happens to be exactly a valid return address - # inside the C code, then bad things occur. - initialframedata.address[0] = llmemory.NULL - # - self.walk_frames(curframe, otherframe, initialframedata) - stackscount += 1 - # - expected = rffi.stackcounter.stacks_counter - if NonConstant(0): - rffi.stackcounter.stacks_counter += 42 # hack to force it - ll_assert(not (stackscount < expected), "non-closed stacks around") - ll_assert(not (stackscount > expected), "stacks counter corruption?") - lltype.free(otherframe, flavor='raw') - lltype.free(curframe, flavor='raw') - - def walk_frames(self, curframe, otherframe, initialframedata): - self.fill_initial_frame(curframe, initialframedata) - # Loop over all the frames in the stack - while self.walk_to_parent_frame(curframe, otherframe): - swap = curframe - curframe = otherframe # caller becomes callee - otherframe = swap - - def fill_initial_frame(self, curframe, initialframedata): - # Read the information provided by initialframedata - initialframedata += 2*sizeofaddr #skip the prev/next words at the start - reg = 0 - while reg < CALLEE_SAVED_REGS: - # NB. 'initialframedata' stores the actual values of the - # registers %ebx etc., and if these values are modified - # they are reloaded by pypy_asm_stackwalk(). By contrast, - # 'regs_stored_at' merely points to the actual values - # from the 'initialframedata'. - curframe.regs_stored_at[reg] = initialframedata + reg*sizeofaddr - reg += 1 - curframe.frame_address = initialframedata.address[CALLEE_SAVED_REGS] - - def walk_to_parent_frame(self, callee, caller): - """Starting from 'callee', walk the next older frame on the stack - and fill 'caller' accordingly. Also invokes the collect_stack_root() - callback from the GC code for each GC root found in 'caller'. - """ - # - # The gcmap table is a list of entries, two machine words each: - # void *SafePointAddress; - # int Shape; - # - # A "safe point" is the return address of a call. - # The "shape" of a safe point is a list of integers - # that represent "locations". A "location" can be - # either in the stack or in a register. See - # getlocation() for the decoding of this integer. - # The locations stored in a "shape" are as follows: - # - # * The "location" of the return address. This is just - # after the end of the frame of 'callee'; it is the - # first word of the frame of 'caller' (see picture - # below). - # - # * Four "locations" that specify where the function saves - # each of the four callee-saved registers (%ebx, %esi, - # %edi, %ebp). - # - # * The number of live GC roots around the call. - # - # * For each GC root, an integer that specify where the - # GC pointer is stored. This is a "location" too. - # - # XXX the details are completely specific to X86!!! - # a picture of the stack may help: - # ^ ^ ^ - # | ... | to older frames - # +--------------+ - # | ret addr | <------ caller_frame (addr of retaddr) - # | ... | - # | caller frame | - # | ... | - # +--------------+ - # | ret addr | <------ callee_frame (addr of retaddr) - # | ... | - # | callee frame | - # | ... | lower addresses - # +--------------+ v v v - # - - retaddr = callee.frame_address.address[0] - # - # try to locate the caller function based on retaddr. - # set up self._shape_decompressor. - # - ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] - self.locate_caller_based_on_retaddr(retaddr, ebp_in_caller) - # - # found! Enumerate the GC roots in the caller frame - # - collect_stack_root = self.gcdata._gc_collect_stack_root - gc = self.gc - while True: - location = self._shape_decompressor.next() - if location == 0: - break - addr = self.getlocation(callee, ebp_in_caller, location) - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - # - # small hack: the JIT reserves THREADLOCAL_OFS's last bit for - # us. We use it to store an "already traced past this frame" - # flag. - if self._with_jit and self.gcdata._gc_collect_is_minor: - if self.mark_jit_frame_can_stop(callee): - return False - # - # track where the caller_frame saved the registers from its own - # caller - # - reg = CALLEE_SAVED_REGS - 1 - while reg >= 0: - location = self._shape_decompressor.next() - addr = self.getlocation(callee, ebp_in_caller, location) - caller.regs_stored_at[reg] = addr - reg -= 1 - - location = self._shape_decompressor.next() - caller.frame_address = self.getlocation(callee, ebp_in_caller, - location) - # we get a NULL marker to mean "I'm the frame - # of the entry point, stop walking" - return caller.frame_address != llmemory.NULL - - def locate_caller_based_on_retaddr(self, retaddr, ebp_in_caller): - gcmapstart = llop.gc_asmgcroot_static(llmemory.Address, 0) - gcmapend = llop.gc_asmgcroot_static(llmemory.Address, 1) - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if not self._shape_decompressor.sorted: - # the item may have been not found because the main array was - # not sorted. Sort it and try again. - win32_follow_gcmap_jmp(gcmapstart, gcmapend) - sort_gcmap(gcmapstart, gcmapend) - self._shape_decompressor.sorted = True - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if self._with_jit: - # item not found. We assume that it's a JIT-generated - # location -- but we check for consistency that ebp points - # to a JITFRAME object. - from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - - tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) - if (rffi.cast(lltype.Signed, tid) == - rffi.cast(lltype.Signed, self.frame_tid)): - # fish the depth - extra_stack_depth = (ebp_in_caller + STACK_DEPTH_OFS).signed[0] - ll_assert((extra_stack_depth & (rffi.sizeof(lltype.Signed) - 1)) - == 0, "asmgcc: misaligned extra_stack_depth") - extra_stack_depth //= rffi.sizeof(lltype.Signed) - self._shape_decompressor.setjitframe(extra_stack_depth) - return - llop.debug_fatalerror(lltype.Void, "cannot find gc roots!") - - def getlocation(self, callee, ebp_in_caller, location): - """Get the location in the 'caller' frame of a variable, based - on the integer 'location' that describes it. All locations are - computed based on information saved by the 'callee'. - """ - ll_assert(location >= 0, "negative location") - kind = location & LOC_MASK - offset = location & ~ LOC_MASK - if IS_64_BITS: - offset <<= 1 - if kind == LOC_REG: # register - if location == LOC_NOWHERE: - return llmemory.NULL - reg = (location >> 2) - 1 - ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") - return callee.regs_stored_at[reg] - elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + sizeofaddr - return esp_in_caller + offset - elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) - return ebp_in_caller + offset - else: # kind == LOC_EBP_MINUS: at -N(%ebp) - return ebp_in_caller - offset - - def mark_jit_frame_can_stop(self, callee): - location = self._shape_decompressor.get_threadlocal_loc() - if location == LOC_NOWHERE: - return False - addr = self.getlocation(callee, llmemory.NULL, location) - # - x = addr.signed[0] - if x & 1: - return True # this JIT stack frame is already marked! - else: - addr.signed[0] = x | 1 # otherwise, mark it but don't stop - return False - - -LOC_REG = 0 -LOC_ESP_PLUS = 1 -LOC_EBP_PLUS = 2 -LOC_EBP_MINUS = 3 -LOC_MASK = 0x03 -LOC_NOWHERE = LOC_REG | 0 - -# ____________________________________________________________ - -sizeofaddr = llmemory.sizeof(llmemory.Address) -arrayitemsize = 2 * sizeofaddr - - -def binary_search(start, end, addr1): - """Search for an element in a sorted array. - - The interval from the start address (included) to the end address - (excluded) is assumed to be a sorted arrays of pairs (addr1, addr2). - This searches for the item with a given addr1 and returns its - address. If not found exactly, it tries to return the address - of the item left of addr1 (i.e. such that result.address[0] < addr1). - """ - count = (end - start) // arrayitemsize - while count > 1: - middleindex = count // 2 - middle = start + middleindex * arrayitemsize - if addr1 < middle.address[0]: - count = middleindex - else: - start = middle - count -= middleindex - return start - -def search_in_gcmap(gcmapstart, gcmapend, retaddr): - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item # found - # 'retaddr' not exactly found. Check that 'item' is the start of a - # compressed range that includes 'retaddr'. - if retaddr > item.address[0] and item.signed[1] < 0: - return item # ok - else: - return llmemory.NULL # failed - -def search_in_gcmap2(gcmapstart, gcmapend, retaddr): - # same as 'search_in_gcmap', but without range checking support - # (item.signed[1] is an address in this case, not a signed at all!) - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item.address[1] # found - else: - return llmemory.NULL # failed - -def sort_gcmap(gcmapstart, gcmapend): - count = (gcmapend - gcmapstart) // arrayitemsize - qsort(gcmapstart, - rffi.cast(rffi.SIZE_T, count), - rffi.cast(rffi.SIZE_T, arrayitemsize), - c_compare_gcmap_entries) - -def replace_dead_entries_with_nulls(start, end): - # replace the dead entries (null value) with a null key. - count = (end - start) // arrayitemsize - 1 - while count >= 0: - item = start + count * arrayitemsize - if item.address[1] == llmemory.NULL: - item.address[0] = llmemory.NULL - count -= 1 - -if sys.platform == 'win32': - def win32_follow_gcmap_jmp(start, end): - # The initial gcmap table contains addresses to a JMP - # instruction that jumps indirectly to the real code. - # Replace them with the target addresses. - assert rffi.SIGNEDP is rffi.LONGP, "win64 support missing" - while start < end: - code = rffi.cast(rffi.CCHARP, start.address[0])[0] - if code == '\xe9': # jmp - rel32 = rffi.cast(rffi.SIGNEDP, start.address[0]+1)[0] - target = start.address[0] + (rel32 + 5) - start.address[0] = target - start += arrayitemsize -else: - def win32_follow_gcmap_jmp(start, end): - pass - -# ____________________________________________________________ - -class ShapeDecompressor: - _alloc_flavor_ = "raw" - - sorted = False - - def setpos(self, pos): - if pos < 0: - pos = ~ pos # can ignore this "range" marker here - gccallshapes = llop.gc_asmgcroot_static(llmemory.Address, 2) - self.addr = gccallshapes + pos - self.jit_index = -1 - - def setjitframe(self, extra_stack_depth): - self.jit_index = 0 - self.extra_stack_depth = extra_stack_depth - - def next(self): - index = self.jit_index - if index < 0: - # case "outside the jit" - addr = self.addr - value = 0 - while True: - b = ord(addr.char[0]) - addr += 1 - value += b - if b < 0x80: - break - value = (value - 0x80) << 7 - self.addr = addr - return value - else: - # case "in the jit" - from rpython.jit.backend.x86.arch import FRAME_FIXED_SIZE - from rpython.jit.backend.x86.arch import PASS_ON_MY_FRAME - self.jit_index = index + 1 - if index == 0: - # the jitframe is an object in EBP - return LOC_REG | ((INDEX_OF_EBP + 1) << 2) - if index == 1: - return 0 - # the remaining returned values should be: - # saved %rbp - # saved %r15 or on 32bit: - # saved %r14 saved %ebp - # saved %r13 saved %edi - # saved %r12 saved %esi - # saved %rbx saved %ebx - # return addr return addr - stack_depth = PASS_ON_MY_FRAME + self.extra_stack_depth - if IS_64_BITS: - if index == 2: # rbp - return LOC_ESP_PLUS | (stack_depth << 2) - if index == 3: # r15 - return LOC_ESP_PLUS | ((stack_depth + 5) << 2) - if index == 4: # r14 - return LOC_ESP_PLUS | ((stack_depth + 4) << 2) - if index == 5: # r13 - return LOC_ESP_PLUS | ((stack_depth + 3) << 2) - if index == 6: # r12 - return LOC_ESP_PLUS | ((stack_depth + 2) << 2) - if index == 7: # rbx - return LOC_ESP_PLUS | ((stack_depth + 1) << 2) - if index == 8: # return addr - return (LOC_ESP_PLUS | - ((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2)) - else: - if index == 2: # ebp - return LOC_ESP_PLUS | (stack_depth << 2) - if index == 3: # edi - return LOC_ESP_PLUS | ((stack_depth + 3) << 2) - if index == 4: # esi - return LOC_ESP_PLUS | ((stack_depth + 2) << 2) - if index == 5: # ebx - return LOC_ESP_PLUS | ((stack_depth + 1) << 2) - if index == 6: # return addr - return (LOC_ESP_PLUS | - ((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2)) - llop.debug_fatalerror(lltype.Void, "asmgcroot: invalid index") - return 0 # annotator fix - - def get_threadlocal_loc(self): - index = self.jit_index - if index < 0: - return LOC_NOWHERE # case "outside the jit" - else: - # case "in the jit" - from rpython.jit.backend.x86.arch import THREADLOCAL_OFS, WORD - return (LOC_ESP_PLUS | - ((THREADLOCAL_OFS // WORD + self.extra_stack_depth) << 2)) - - -# ____________________________________________________________ - -# -# The special pypy_asm_stackwalk(), implemented directly in -# assembler, fills information about the current stack top in an -# ASM_FRAMEDATA array and invokes an RPython callback with it. -# An ASM_FRAMEDATA is an array of 5 values that describe everything -# we need to know about a stack frame: -# -# - the value that %ebx had when the current function started -# - the value that %esi had when the current function started -# - the value that %edi had when the current function started -# - the value that %ebp had when the current function started -# - frame address (actually the addr of the retaddr of the current function; -# that's the last word of the frame in memory) -# -# On 64 bits, it is an array of 7 values instead of 5: -# -# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address -# - -if IS_64_BITS: - CALLEE_SAVED_REGS = 6 - INDEX_OF_EBP = 5 - FRAME_PTR = CALLEE_SAVED_REGS -else: - CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers - INDEX_OF_EBP = 3 - FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array - -JIT_USE_WORDS = 2 + FRAME_PTR + 1 - -ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) - -# used internally by walk_stack_from() -WALKFRAME = lltype.Struct('WALKFRAME', - ('regs_stored_at', # address of where the registers have been saved - lltype.FixedSizeArray(llmemory.Address, CALLEE_SAVED_REGS)), - ('frame_address', - llmemory.Address), - ) - -# We have a circular doubly-linked list of all the ASM_FRAMEDATAs currently -# alive. The list's starting point is given by 'gcrootanchor', which is not -# a full ASM_FRAMEDATA but only contains the prev/next pointers: -ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference()) -ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD', - ('prev', ASM_FRAMEDATA_HEAD_PTR), - ('next', ASM_FRAMEDATA_HEAD_PTR) - )) -gcrootanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, immortal=True) -gcrootanchor.prev = gcrootanchor -gcrootanchor.next = gcrootanchor -c_gcrootanchor = Constant(gcrootanchor, ASM_FRAMEDATA_HEAD_PTR) - -eci = ExternalCompilationInfo(compile_extra=['-DPYPY_USE_ASMGCC'], - post_include_bits=[""" -static int pypy_compare_gcmap_entries(const void *addr1, const void *addr2) -{ - char *key1 = * (char * const *) addr1; - char *key2 = * (char * const *) addr2; - if (key1 < key2) - return -1; - else if (key1 == key2) - return 0; - else - return 1; -} -"""]) - -pypy_asm_stackwalk = rffi.llexternal('pypy_asm_stackwalk', - [ASM_CALLBACK_PTR, - ASM_FRAMEDATA_HEAD_PTR], - lltype.Signed, - sandboxsafe=True, - _nowrapper=True, - random_effects_on_gcobjs=True, - compilation_info=eci) -c_asm_stackwalk = Constant(pypy_asm_stackwalk, - lltype.typeOf(pypy_asm_stackwalk)) - -pypy_asm_gcroot = rffi.llexternal('pypy_asm_gcroot', - [llmemory.Address], - llmemory.Address, - sandboxsafe=True, - _nowrapper=True) -c_asm_gcroot = Constant(pypy_asm_gcroot, lltype.typeOf(pypy_asm_gcroot)) - -pypy_asm_nocollect = rffi.llexternal('pypy_asm_gc_nocollect', - [rffi.CCHARP], lltype.Void, - sandboxsafe=True, - _nowrapper=True) -c_asm_nocollect = Constant(pypy_asm_nocollect, lltype.typeOf(pypy_asm_nocollect)) - -QSORT_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address, - llmemory.Address], rffi.INT)) -c_compare_gcmap_entries = rffi.llexternal('pypy_compare_gcmap_entries', - [llmemory.Address, llmemory.Address], - rffi.INT, compilation_info=eci, - _nowrapper=True, sandboxsafe=True) -qsort = rffi.llexternal('qsort', - [llmemory.Address, - rffi.SIZE_T, - rffi.SIZE_T, - QSORT_CALLBACK_PTR], - lltype.Void, - sandboxsafe=True, - random_effects_on_gcobjs=False, # but has a callback - _nowrapper=True) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1003,21 +1003,6 @@ # for stacklet hop.genop("direct_call", [self.root_walker.gc_modified_shadowstack_ptr]) - def gct_gc_detach_callback_pieces(self, hop): - op = hop.spaceop - assert len(op.args) == 0 - hop.genop("direct_call", - [self.root_walker.gc_detach_callback_pieces_ptr], - resultvar=op.result) - - def gct_gc_reattach_callback_pieces(self, hop): - op = hop.spaceop - assert len(op.args) == 1 - hop.genop("direct_call", - [self.root_walker.gc_reattach_callback_pieces_ptr, - op.args[0]], - resultvar=op.result) - def gct_do_malloc_fixedsize(self, hop): # used by the JIT (see rpython.jit.backend.llsupport.gc) op = hop.spaceop @@ -1244,8 +1229,10 @@ def gct_gc_thread_start(self, hop): assert self.translator.config.translation.thread + # There is no 'thread_start_ptr' any more for now, so the following + # line is always false. if hasattr(self.root_walker, 'thread_start_ptr'): - # only with asmgcc. Note that this is actually called after + # Note that this is actually called after # the first gc_thread_run() in the new thread. hop.genop("direct_call", [self.root_walker.thread_start_ptr]) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -217,9 +217,6 @@ self.var_last_needed_in = None self.curr_block = None - def start_transforming_graph(self, graph): - pass # for asmgcc.py - def transform_graph(self, graph): if graph in self.minimal_transform: if self.minimalgctransformer: @@ -229,7 +226,6 @@ if graph in self.seen_graphs: return self.seen_graphs.add(graph) - self.start_transforming_graph(graph) self.links_to_split = {} # link -> vars to pop_alive across the link diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py deleted file mode 100644 --- a/rpython/rlib/_stacklet_asmgcc.py +++ /dev/null @@ -1,325 +0,0 @@ -from rpython.rlib.debug import ll_assert -from rpython.rlib import rgc -from rpython.rlib.objectmodel import specialize -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.rlib import _rffi_stacklet as _c - - -_asmstackrootwalker = None # BIG HACK: monkey-patched by asmgcroot.py -_stackletrootwalker = None - -def get_stackletrootwalker(): - # XXX this is too complicated now; we don't need a StackletRootWalker - # instance to store global state. We could rewrite it all in one big - # function. We don't care enough for now. - - # lazily called, to make the following imports lazy - global _stackletrootwalker - if _stackletrootwalker is not None: - return _stackletrootwalker - - from rpython.memory.gctransform.asmgcroot import ( - WALKFRAME, CALLEE_SAVED_REGS, INDEX_OF_EBP, sizeofaddr) - - assert _asmstackrootwalker is not None, "should have been monkey-patched" - basewalker = _asmstackrootwalker - - class StackletRootWalker(object): - _alloc_flavor_ = "raw" - - def setup(self, obj): - # initialization: read the SUSPSTACK object - p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK)) - if not p.handle: - return False - self.context = llmemory.cast_ptr_to_adr(p.handle) - self.next_callback_piece = p.callback_pieces - anchor = p.anchor - del p - self.curframe = lltype.malloc(WALKFRAME, flavor='raw') - self.otherframe = lltype.malloc(WALKFRAME, flavor='raw') - self.fill_initial_frame(self.curframe, anchor) - return True - - def fill_initial_frame(self, curframe, initialframedata): - # Copy&paste :-( - initialframedata += 2*sizeofaddr - reg = 0 - while reg < CALLEE_SAVED_REGS: - curframe.regs_stored_at[reg] = initialframedata+reg*sizeofaddr - reg += 1 - retaddraddr = initialframedata + CALLEE_SAVED_REGS * sizeofaddr - retaddraddr = self.translateptr(retaddraddr) - curframe.frame_address = retaddraddr.address[0] - - def fetch_next_stack_piece(self): - if self.next_callback_piece == llmemory.NULL: - lltype.free(self.curframe, flavor='raw') - lltype.free(self.otherframe, flavor='raw') - self.context = llmemory.NULL - return False - else: - anchor = self.next_callback_piece - nextaddr = anchor + sizeofaddr - nextaddr = self.translateptr(nextaddr) - self.next_callback_piece = nextaddr.address[0] - self.fill_initial_frame(self.curframe, anchor) - return True - - @specialize.arg(3) - def customtrace(self, gc, obj, callback, arg): - # - # Pointers to the stack can be "translated" or not: - # - # * Non-translated pointers point to where the data would be - # if the stack was installed and running. - # - # * Translated pointers correspond to where the data - # is now really in memory. - # - # Note that 'curframe' contains non-translated pointers, and - # of course the stack itself is full of non-translated pointers. - # - if not self.setup(obj): - return - - while True: - callee = self.curframe From pypy.commits at gmail.com Thu Dec 12 08:00:40 2019 From: pypy.commits at gmail.com (rlamy) Date: Thu, 12 Dec 2019 05:00:40 -0800 (PST) Subject: [pypy-commit] pypy py3.6-exc-info-2: Close branch py3.6-exc-info-2 Message-ID: <5df239f8.1c69fb81.7e5a.ed72@mx.google.com> Author: Ronan Lamy Branch: py3.6-exc-info-2 Changeset: r98276:abfb925c6fc6 Date: 2019-12-12 12:59 +0000 http://bitbucket.org/pypy/pypy/changeset/abfb925c6fc6/ Log: Close branch py3.6-exc-info-2 From pypy.commits at gmail.com Thu Dec 12 08:01:43 2019 From: pypy.commits at gmail.com (rlamy) Date: Thu, 12 Dec 2019 05:01:43 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Merged in py3.6-exc-info-2 (pull request #686) Message-ID: <5df23a37.1c69fb81.b575.d6c4@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98277:357a713081c3 Date: 2019-12-12 12:59 +0000 http://bitbucket.org/pypy/pypy/changeset/357a713081c3/ Log: Merged in py3.6-exc-info-2 (pull request #686) Fix handling of sys.exc_info() in generators diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -34,7 +34,6 @@ # time it is the exception caught by the topmost 'except ... as e:' # app-level block. self.sys_exc_operror = None - self.previous_operror_stack = [] self.w_tracefunc = None self.is_tracing = 0 self.compiler = space.createcompiler() @@ -248,15 +247,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - result = self.sys_exc_operror - if result is None: - i = len(self.previous_operror_stack) - 1 - while i >= 0: - result = self.previous_operror_stack[i] - if result is not None: - break - i -= 1 - return result + return self.sys_exc_operror def set_sys_exc_info(self, operror): self.sys_exc_operror = operror @@ -277,26 +268,6 @@ operror = OperationError(w_type, w_value, tb) self.set_sys_exc_info(operror) - def enter_error_stack_item(self, saved_operr): - # 'sys_exc_operror' should be logically considered as the last - # item on the stack, so pushing a new item has the following effect: - self.previous_operror_stack.append(self.sys_exc_operror) - self.sys_exc_operror = saved_operr - - def leave_error_stack_item(self): - result = self.sys_exc_operror - self.sys_exc_operror = self.previous_operror_stack.pop() - return result - - def fetch_and_clear_error_stack_state(self): - result = self.sys_exc_operror, self.previous_operror_stack - self.sys_exc_operror = None - self.previous_operror_stack = [] - return result - - def restore_error_stack_state(self, saved): - self.sys_exc_operror, self.previous_operror_stack = saved - @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -119,7 +119,10 @@ "can't send non-None value to a just-started %s", self.KIND) ec = space.getexecutioncontext() - ec.enter_error_stack_item(self.saved_operr) + current_exc_info = ec.sys_exc_info() + if self.saved_operr is not None: + ec.set_sys_exc_info(self.saved_operr) + self.saved_operr = None self.running = True try: w_result = frame.execute_frame(w_arg_or_err) @@ -140,7 +143,9 @@ # note: this is not perfectly correct: see # test_exc_info_in_generator_4. But it's simpler and # bug-to-bug compatible with CPython 3.5 and 3.6. - self.saved_operr = ec.leave_error_stack_item() + if frame._any_except_or_finally_handler(): + self.saved_operr = ec.sys_exc_info() + ec.set_sys_exc_info(current_exc_info) return w_result def get_delegate(self): diff --git a/pypy/interpreter/test/apptest_generator.py b/pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/apptest_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,5 +1,7 @@ from pytest import raises, skip +import sys + def test_generator(): def f(): yield 1 @@ -462,7 +464,6 @@ assert closed == [True] def test_exc_info_in_generator(): - import sys def g(): try: raise ValueError @@ -533,8 +534,35 @@ try: raise IndexError except IndexError: - assert next(gen) is 1 - assert next(gen) is 2 + assert next(gen) == 1 + assert next(gen) == 2 + +def test_except_gen_except(): + def gen(): + try: + assert sys.exc_info()[0] is None + yield + # we are called from "except ValueError:", TypeError must + # inherit ValueError in its context + raise TypeError() + except TypeError as exc: + assert sys.exc_info()[0] is TypeError + assert type(exc.__context__) is ValueError + # here we are still called from the "except ValueError:" + assert sys.exc_info()[0] is ValueError + yield + assert sys.exc_info()[0] is None + yield "done" + + g = gen() + next(g) + try: + raise ValueError + except Exception: + next(g) + + assert next(g) == "done" + assert sys.exc_info() == (None, None, None) def test_multiple_invalid_sends(): def mygen(): @@ -793,13 +821,9 @@ yield from map(operator.truediv, [2, 3], [4, 0]) gen = f() assert next(gen) == 0.5 - try: + with raises(ZeroDivisionError) as excinfo: next(gen) - except ZeroDivisionError as e: - assert e.__context__ is not None - assert isinstance(e.__context__, ValueError) - else: - assert False, "should have raised" + assert isinstance(excinfo.value.__context__, ValueError) def test_past_generator_stop(): diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -46,9 +46,9 @@ # global_state.origin = self self.sthread = sthread - saved_error_state = pre_switch(sthread) + saved_exception = pre_switch(sthread) h = sthread.new(new_stacklet_callback) - post_switch(sthread, h, saved_error_state) + post_switch(sthread, h, saved_exception) def switch(self, w_to): sthread = self.sthread @@ -84,9 +84,9 @@ # double switch: the final destination is to.h global_state.destination = to # - saved_error_state = pre_switch(sthread) + saved_exception = pre_switch(sthread) h = sthread.switch(global_state.destination.h) - return post_switch(sthread, h, saved_error_state) + return post_switch(sthread, h, saved_exception) @unwrap_spec(w_value = WrappedDefault(None), w_to = WrappedDefault(None)) @@ -257,9 +257,11 @@ return self.h def pre_switch(sthread): - return sthread.ec.fetch_and_clear_error_stack_state() + saved_exception = sthread.ec.sys_exc_info() + sthread.ec.set_sys_exc_info(None) + return saved_exception -def post_switch(sthread, h, saved_error_state): +def post_switch(sthread, h, saved_exception): origin = global_state.origin self = global_state.destination global_state.origin = None @@ -268,7 +270,7 @@ # current = sthread.ec.topframeref sthread.ec.topframeref = self.bottomframe.f_backref - sthread.ec.restore_error_stack_state(saved_error_state) + sthread.ec.set_sys_exc_info(saved_exception) self.bottomframe.f_backref = origin.bottomframe.f_backref origin.bottomframe.f_backref = current # From pypy.commits at gmail.com Thu Dec 12 08:05:20 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 12 Dec 2019 05:05:20 -0800 (PST) Subject: [pypy-commit] pypy py3.6: document merged branch Message-ID: <5df23b10.1c69fb81.1636c.03c5@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98278:31b34a79660c Date: 2019-12-12 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/31b34a79660c/ Log: document merged branch diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -5,3 +5,6 @@ .. this is the revision after release-pypy3.6-v7.3.0 .. startrev: 78b4d0a7cf2e +.. branch: py3.6-exc-info-2 + +Fix handling of sys.exc_info() in generators From pypy.commits at gmail.com Thu Dec 12 11:06:13 2019 From: pypy.commits at gmail.com (rlamy) Date: Thu, 12 Dec 2019 08:06:13 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Return W_IntObject from float.__round__() when possible. Message-ID: <5df26575.1c69fb81.1b019.0c0f@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98279:8e5e71e1a26e Date: 2019-12-12 16:05 +0000 http://bitbucket.org/pypy/pypy/changeset/8e5e71e1a26e/ Log: Return W_IntObject from float.__round__() when possible. This should speed up all calculations involving int(round()), fixing e.g. issue #2634. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -145,6 +145,14 @@ return space.w_NotImplemented return func_with_new_name(_compare, 'descr_' + opname) +def _newint_from_float(space, floatval): + try: + value = ovfcheck_float_to_int(floatval) + except OverflowError: + return newlong_from_float(space, floatval) + else: + return space.newint(value) + class W_FloatObject(W_Root): """This is a implementation of the app-level 'float' type. @@ -440,12 +448,7 @@ return W_FloatObject(a) def descr_trunc(self, space): - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return newlong_from_float(space, self.floatval) - else: - return space.newint(value) + return _newint_from_float(space, self.floatval) def descr_neg(self, space): return W_FloatObject(-self.floatval) @@ -935,7 +938,7 @@ if math.fabs(x - rounded) == 0.5: # halfway case: round to even rounded = 2.0 * rfloat.round_away(x / 2.0) - return newlong_from_float(space, rounded) + return _newint_from_float(space, rounded) # interpret 2nd argument as a Py_ssize_t; clip on overflow ndigits = space.getindex_w(w_ndigits, None) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -5,6 +5,7 @@ import py from pypy.objspace.std.floatobject import W_FloatObject, _remove_underscores +from pypy.objspace.std.intobject import W_IntObject class TestW_FloatObject: @@ -127,6 +128,10 @@ for s in invalid: pytest.raises(ValueError, _remove_underscores, s) +def test_avoid_bigints(space): + w_f = space.newfloat(123.456) + assert isinstance(w_f.descr_trunc(space), W_IntObject) + assert isinstance(w_f.descr___round__(space), W_IntObject) class AppTestAppFloatTest: From pypy.commits at gmail.com Thu Dec 12 11:44:45 2019 From: pypy.commits at gmail.com (arigo) Date: Thu, 12 Dec 2019 08:44:45 -0800 (PST) Subject: [pypy-commit] pypy default: Prevent lltype.typeOf(2<<32) from returning SignedLongLong on 32-bit Message-ID: <5df26e7d.1c69fb81.6975f.224a@mx.google.com> Author: Armin Rigo Branch: Changeset: r98280:56cb51f3c081 Date: 2019-12-12 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/56cb51f3c081/ Log: Prevent lltype.typeOf(2<<32) from returning SignedLongLong on 32-bit just because 2<<32 doesn't fit into a regular Signed. If you want to get a SignedLongLong, better be explicit (e.g. with r_int64()) diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -285,12 +285,15 @@ cases = [8, 16, 24] if WORD == 8: cases.append(32) + bigvalue = 0xAAAAAAAAAAAA + else: + bigvalue = 0xAAAAAAA for i in cases: - box = InputArgInt(0xAAAAAAAAAAAA) + box = InputArgInt(bigvalue) res = self.execute_operation(rop.INT_AND, [box, ConstInt(2 ** i - 1)], 'int') - assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + assert res == bigvalue & (2 ** i - 1) def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -815,7 +815,11 @@ if -maxint-1 <= val <= maxint: return Signed elif longlongmask(val) == val: - return SignedLongLong + raise OverflowError("integer %r is out of bounds for Signed " + "(it would fit SignedLongLong, but we " + "won't implicitly return SignedLongLong " + "for typeOf(%r) where type(%r) is long)" + % (val, val, val)) else: raise OverflowError("integer %r is out of bounds" % (val,)) if tp is bool: From pypy.commits at gmail.com Thu Dec 12 12:16:25 2019 From: pypy.commits at gmail.com (arigo) Date: Thu, 12 Dec 2019 09:16:25 -0800 (PST) Subject: [pypy-commit] pypy default: Mention that you should usually not run "py.test lib-python/..." Message-ID: <5df275e9.1c69fb81.b81fe.74fd@mx.google.com> Author: Armin Rigo Branch: Changeset: r98281:b0f74f864c1d Date: 2019-12-12 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/b0f74f864c1d/ Log: Mention that you should usually not run "py.test lib-python/..." diff --git a/pypy/doc/contributing.rst b/pypy/doc/contributing.rst --- a/pypy/doc/contributing.rst +++ b/pypy/doc/contributing.rst @@ -311,16 +311,13 @@ directory or even the top level subdirectory ``pypy``. It takes hours and uses huge amounts of RAM and is not recommended. -To run CPython regression tests you can point to the ``lib-python`` -directory:: - - py.test lib-python/2.7/test/test_datetime.py - -This will usually take a long time because this will run -the PyPy Python interpreter on top of CPython. On the plus -side, it's usually still faster than doing a full translation -and running the regression test with the translated PyPy Python -interpreter. +To run CPython regression tests, you should start with a translated PyPy and +run the tests as you would with CPython (see below). You can, however, also +attempt to run the tests before translation, but be aware that it is done with +a hack that doesn't work in all cases and it is usually extremely slow: +``py.test lib-python/2.7/test/test_datetime.py``. Usually, a better idea is to +extract a minimal failing test of at most a few lines, and put it into one of +our own tests in ``pypy/*/test/``. .. _py.test testing tool: http://pytest.org .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage @@ -350,6 +347,11 @@ cpython2 pytest.py -A pypy/module/cpyext/test --python=path/to/pypy3 +To run a test from the standard CPython regression test suite, use the regular +Python way, i.e. (replace "pypy" with the exact binary name, if needed):: + + pypy -m test.test_datetime + Tooling & Utilities ^^^^^^^^^^^^^^^^^^^ From pypy.commits at gmail.com Thu Dec 12 17:21:27 2019 From: pypy.commits at gmail.com (arigo) Date: Thu, 12 Dec 2019 14:21:27 -0800 (PST) Subject: [pypy-commit] pypy py3.6: more like 8e5e71e1a26e: avoids making W_LongObjects for result that most often Message-ID: <5df2bd67.1c69fb81.306fb.8e8f@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r98283:22226b5e0778 Date: 2019-12-12 23:21 +0100 http://bitbucket.org/pypy/pypy/changeset/22226b5e0778/ Log: more like 8e5e71e1a26e: avoids making W_LongObjects for result that most often fit in a W_IntObject diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -156,12 +156,12 @@ Return the floor of x as an int. This is the largest integral value <= x. """ - from pypy.objspace.std.longobject import newlong_from_float + from pypy.objspace.std.floatobject import newint_from_float w_descr = space.lookup(w_x, '__floor__') if w_descr is not None: return space.get_and_call_function(w_descr, w_x) x = _get_double(space, w_x) - return newlong_from_float(space, math.floor(x)) + return newint_from_float(space, math.floor(x)) def sqrt(space, w_x): """sqrt(x) @@ -259,11 +259,11 @@ Return the ceiling of x as an int. This is the smallest integral value >= x. """ - from pypy.objspace.std.longobject import newlong_from_float + from pypy.objspace.std.floatobject import newint_from_float w_descr = space.lookup(w_x, '__ceil__') if w_descr is not None: return space.get_and_call_function(w_descr, w_x) - return newlong_from_float(space, math1_w(space, math.ceil, w_x)) + return newint_from_float(space, math1_w(space, math.ceil, w_x)) def sinh(space, w_x): """sinh(x) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -145,7 +145,8 @@ return space.w_NotImplemented return func_with_new_name(_compare, 'descr_' + opname) -def _newint_from_float(space, floatval): +def newint_from_float(space, floatval): + """This is also used from module/math/interp_math.py""" try: value = ovfcheck_float_to_int(floatval) except OverflowError: @@ -448,7 +449,7 @@ return W_FloatObject(a) def descr_trunc(self, space): - return _newint_from_float(space, self.floatval) + return newint_from_float(space, self.floatval) def descr_neg(self, space): return W_FloatObject(-self.floatval) @@ -938,7 +939,7 @@ if math.fabs(x - rounded) == 0.5: # halfway case: round to even rounded = 2.0 * rfloat.round_away(x / 2.0) - return _newint_from_float(space, rounded) + return newint_from_float(space, rounded) # interpret 2nd argument as a Py_ssize_t; clip on overflow ndigits = space.getindex_w(w_ndigits, None) From pypy.commits at gmail.com Fri Dec 13 07:43:57 2019 From: pypy.commits at gmail.com (arigo) Date: Fri, 13 Dec 2019 04:43:57 -0800 (PST) Subject: [pypy-commit] pypy default: Skip running test_rx86_64_auto_encoding on 32bits---this makes pointless errors Message-ID: <5df3878d.1c69fb81.1690.2344@mx.google.com> Author: Armin Rigo Branch: Changeset: r98284:34a01c95b95b Date: 2019-12-13 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/34a01c95b95b/ Log: Skip running test_rx86_64_auto_encoding on 32bits---this makes pointless errors now diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -1,7 +1,11 @@ +import sys, py import random from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.test import test_rx86_32_auto_encoding +if sys.maxint <= 2**32: + py.test.skip("skipping this test on x86-32") + class TestRx86_64(test_rx86_32_auto_encoding.TestRx86_32): WORD = 8 From pypy.commits at gmail.com Fri Dec 13 07:49:32 2019 From: pypy.commits at gmail.com (arigo) Date: Fri, 13 Dec 2019 04:49:32 -0800 (PST) Subject: [pypy-commit] pypy default: Fix for pypy.module._file.test.test_large_file on 32bit Message-ID: <5df388dc.1c69fb81.e3e08.5b28@mx.google.com> Author: Armin Rigo Branch: Changeset: r98285:f1ac06bafd45 Date: 2019-12-13 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/f1ac06bafd45/ Log: Fix for pypy.module._file.test.test_large_file on 32bit diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -317,7 +317,8 @@ os.lseek(self.fd, offset, whence) def tell(self): - return os.lseek(self.fd, 0, 1) + result = os.lseek(self.fd, 0, 1) + return r_longlong(result) def read(self, n): assert isinstance(n, int) From pypy.commits at gmail.com Sat Dec 14 16:46:06 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 14 Dec 2019 13:46:06 -0800 (PST) Subject: [pypy-commit] pypy py3.6: handle OpenSSL v1.1, in _ssl, add post_handshake_auth for TLSv1_3 Message-ID: <5df5581e.1c69fb81.20999.8923@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98286:a56889d5df88 Date: 2019-12-14 23:33 +0200 http://bitbucket.org/pypy/pypy/changeset/a56889d5df88/ Log: handle OpenSSL v1.1, in _ssl, add post_handshake_auth for TLSv1_3 diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -76,6 +76,7 @@ static const long SSL_OP_SINGLE_DH_USE; static const long SSL_OP_EPHEMERAL_RSA; static const long SSL_OP_MICROSOFT_SESS_ID_BUG; +static const long SSL_OP_ENABLE_MIDDLEBOX_COMPAT; static const long SSL_OP_NETSCAPE_CHALLENGE_BUG; static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG; diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -84,6 +84,9 @@ OP_NO_SSLv2 = lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 +if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): + OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + SSL_CLIENT = 0 @@ -271,6 +274,20 @@ mode |= lib.SSL_MODE_AUTO_RETRY lib.SSL_set_mode(ssl, mode) + if HAS_TLSv1_3: + if sslctx._post_handshake_auth: + if socket_type == SSL_SERVER: + # bpo-37428: OpenSSL does not ignore SSL_VERIFY_POST_HANDSHAKE. + # Set SSL_VERIFY_POST_HANDSHAKE flag only for server sockets and + # only in combination with SSL_VERIFY_PEER flag. + mode = lib.SSL_CTX_get_verify_mode(lib.SSL_get_SSL_CTX(self.ssl)) + if (mode & lib.SSL_VERIFY_PEER): + verify_cb = lib.SSL_get_verify_callback(self.ssl) + mode |= lib.SSL_VERIFY_POST_HANDSHAKE + lib.SSL_set_verify(ssl, mode, verify_cb) + else: + lib.SSL_set_post_handshake_auth(ssl, 1) + if HAS_SNI and self.server_hostname: name = _str_to_ffi_buffer(self.server_hostname) lib.SSL_set_tlsext_host_name(ssl, name) @@ -688,6 +705,15 @@ else: return None + def verify_client_post_handshake(self): + + if not HAS_TLSv1_3: + raise NotImplementedError("Post-handshake auth is not supported by " + "your OpenSSL version.") + err = lib.SSL_verify_client_post_handshake(self.ssl); + if err == 0: + raise pyssl_error(self, err) + def pending(self): count = lib.SSL_pending(self.ssl) if count < 0: @@ -744,6 +770,7 @@ return bool(lib.SSL_session_reused(self.ssl)) + def _fs_decode(name): return name.decode(sys.getfilesystemencoding()) def _fs_converter(name): @@ -799,13 +826,13 @@ if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): aead = lib.SSL_CIPHER_is_aead(cipher) nid = lib.SSL_CIPHER_get_cipher_nid(cipher) - skcipher = OBJ_nid2ln(nid) if nid != NID_undef else None + skcipher = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_digest_nid(cipher); - digest = OBJ_nid2ln(nid) if nid != NID_undef else None + digest = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_kx_nid(cipher); - kx = OBJ_nid2ln(nid) if nid != NID_undef else None - nid = SSL_CIPHER_get_auth_nid(cipher); - auth = OBJ_nid2ln(nid) if nid != NID_undef else None + kx = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None + nid = lib.SSL_CIPHER_get_auth_nid(cipher); + auth = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None ret.update({'aead' : bool(aead), 'symmmetric' : skcipher, 'digest' : digest, @@ -865,9 +892,8 @@ class _SSLContext(object): __slots__ = ('ctx', '_check_hostname', 'servername_callback', 'alpn_protocols', '_alpn_protocols_handle', - 'npn_protocols', 'set_hostname', + 'npn_protocols', 'set_hostname', '_post_handshake_auth', '_set_hostname_handle', '_npn_protocols_handle') - def __new__(cls, protocol): self = object.__new__(cls) self.ctx = ffi.NULL @@ -944,6 +970,9 @@ if lib.Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST: store = lib.SSL_CTX_get_cert_store(self.ctx) lib.X509_STORE_set_flags(store, lib.X509_V_FLAG_TRUSTED_FIRST) + if HAS_TLSv1_3: + self.post_handshake_auth = 0; + lib.SSL_CTX_set_post_handshake_auth(self.ctx, self.post_handshake_auth) return self @property @@ -1029,6 +1058,7 @@ "CERT_OPTIONAL or CERT_REQUIRED") self._check_hostname = check_hostname + def set_ciphers(self, cipherlist): cipherlistbuf = _str_to_ffi_buffer(cipherlist) ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf) @@ -1362,6 +1392,25 @@ sock = _SSLSocket._new__ssl_socket(self, None, server_side, hostname, incoming, outgoing) return sock + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! From pypy.commits at gmail.com Sat Dec 14 16:46:08 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 14 Dec 2019 13:46:08 -0800 (PST) Subject: [pypy-commit] pypy default: handle OpenSSL v1.1, in _ssl, add post_handshake_auth for TLSv1_3 Message-ID: <5df55820.1c69fb81.d588c.1501@mx.google.com> Author: Matti Picus Branch: Changeset: r98287:826708d0c629 Date: 2019-12-14 23:33 +0200 http://bitbucket.org/pypy/pypy/changeset/826708d0c629/ Log: handle OpenSSL v1.1, in _ssl, add post_handshake_auth for TLSv1_3 (grafted from a56889d5df88dc57c7385151b1f54f644c34c2da) diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -76,6 +76,7 @@ static const long SSL_OP_SINGLE_DH_USE; static const long SSL_OP_EPHEMERAL_RSA; static const long SSL_OP_MICROSOFT_SESS_ID_BUG; +static const long SSL_OP_ENABLE_MIDDLEBOX_COMPAT; static const long SSL_OP_NETSCAPE_CHALLENGE_BUG; static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG; diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -83,6 +83,9 @@ OP_NO_SSLv2 = lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 +if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): + OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + SSL_CLIENT = 0 @@ -265,6 +268,20 @@ mode |= lib.SSL_MODE_AUTO_RETRY lib.SSL_set_mode(ssl, mode) + if HAS_TLSv1_3: + if sslctx._post_handshake_auth: + if socket_type == SSL_SERVER: + # bpo-37428: OpenSSL does not ignore SSL_VERIFY_POST_HANDSHAKE. + # Set SSL_VERIFY_POST_HANDSHAKE flag only for server sockets and + # only in combination with SSL_VERIFY_PEER flag. + mode = lib.SSL_CTX_get_verify_mode(lib.SSL_get_SSL_CTX(self.ssl)) + if (mode & lib.SSL_VERIFY_PEER): + verify_cb = lib.SSL_get_verify_callback(self.ssl) + mode |= lib.SSL_VERIFY_POST_HANDSHAKE + lib.SSL_set_verify(ssl, mode, verify_cb) + else: + lib.SSL_set_post_handshake_auth(ssl, 1) + if HAS_SNI and self.server_hostname: name = _str_to_ffi_buffer(self.server_hostname) lib.SSL_set_tlsext_host_name(ssl, name) @@ -652,6 +669,15 @@ else: return None + def verify_client_post_handshake(self): + + if not HAS_TLSv1_3: + raise NotImplementedError("Post-handshake auth is not supported by " + "your OpenSSL version.") + err = lib.SSL_verify_client_post_handshake(self.ssl); + if err == 0: + raise pyssl_error(self, err) + def pending(self): count = lib.SSL_pending(self.ssl) if count < 0: @@ -708,6 +734,7 @@ return bool(lib.SSL_session_reused(self.ssl)) + def _fs_decode(name): return name.decode(sys.getfilesystemencoding()) def _fs_converter(name): @@ -763,13 +790,13 @@ if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): aead = lib.SSL_CIPHER_is_aead(cipher) nid = lib.SSL_CIPHER_get_cipher_nid(cipher) - skcipher = OBJ_nid2ln(nid) if nid != NID_undef else None + skcipher = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_digest_nid(cipher); - digest = OBJ_nid2ln(nid) if nid != NID_undef else None + digest = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_kx_nid(cipher); - kx = OBJ_nid2ln(nid) if nid != NID_undef else None - nid = SSL_CIPHER_get_auth_nid(cipher); - auth = OBJ_nid2ln(nid) if nid != NID_undef else None + kx = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None + nid = lib.SSL_CIPHER_get_auth_nid(cipher); + auth = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None ret.update({'aead' : bool(aead), 'symmmetric' : skcipher, 'digest' : digest, @@ -829,9 +856,8 @@ class _SSLContext(object): __slots__ = ('ctx', '_check_hostname', 'servername_callback', 'alpn_protocols', '_alpn_protocols_handle', - 'npn_protocols', 'set_hostname', + 'npn_protocols', 'set_hostname', '_post_handshake_auth', '_set_hostname_handle', '_npn_protocols_handle') - def __new__(cls, protocol): self = object.__new__(cls) self.ctx = ffi.NULL @@ -908,6 +934,9 @@ if lib.Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST: store = lib.SSL_CTX_get_cert_store(self.ctx) lib.X509_STORE_set_flags(store, lib.X509_V_FLAG_TRUSTED_FIRST) + if HAS_TLSv1_3: + self.post_handshake_auth = 0; + lib.SSL_CTX_set_post_handshake_auth(self.ctx, self.post_handshake_auth) return self @property @@ -993,6 +1022,7 @@ "CERT_OPTIONAL or CERT_REQUIRED") self._check_hostname = check_hostname + def set_ciphers(self, cipherlist): cipherlistbuf = _str_to_ffi_buffer(cipherlist) ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf) @@ -1329,6 +1359,25 @@ sock = _SSLSocket._new__ssl_socket(self, None, server_side, hostname, incoming, outgoing) return sock + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! From pypy.commits at gmail.com Sat Dec 14 23:32:27 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 14 Dec 2019 20:32:27 -0800 (PST) Subject: [pypy-commit] pypy default: fix backport 826708d0c629 Message-ID: <5df5b75b.1c69fb81.8290a.144b@mx.google.com> Author: Matti Picus Branch: Changeset: r98288:994c42529580 Date: 2019-12-15 06:31 +0200 http://bitbucket.org/pypy/pypy/changeset/994c42529580/ Log: fix backport 826708d0c629 diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -113,6 +113,7 @@ PROTOCOL_TLSv1_2 = 5 # PROTOCOL_TLS_CLIENT = 0x10 # PROTOCOL_TLS_SERVER = 0x11 +HAS_TLSv1_3 = bool(lib.Cryptography_HAS_TLSv1_3) _PROTOCOL_NAMES = (name for name in dir(lib) if name.startswith('PROTOCOL_')) From pypy.commits at gmail.com Sun Dec 15 02:13:34 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 14 Dec 2019 23:13:34 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update download.txt for portable x86 packages, mention certifi for SSL certificates Message-ID: <5df5dd1e.1c69fb81.c6f69.3ca7@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r960:1618d33ae327 Date: 2019-12-15 09:13 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1618d33ae327/ Log: update download.txt for portable x86 packages, mention certifi for SSL certificates diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -12,16 +12,16 @@ as stable as the release, but they contain numerous bugfixes and performance improvements. -We provide binaries for x86, ARM, PPC and s390x running on different operating systems such as -Linux, Mac OS X and Windows (`what's new in PyPy 7.2.0?`_): +We provide binaries for x86, aarch64, ppc64 and s390x running on different operating systems such as +Linux, Mac OS X and Windows (`what's new in PyPy 7.3.0?`_): -* the Python2.7 compatible release — **PyPy2.7 v7.2.0** +* the Python2.7 compatible release — **PyPy2.7 v7.3.0** -* the Python3.6 compatible release — **PyPy3.6 v7.2.0** +* the Python3.6 compatible release — **PyPy3.6 v7.3.0** * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) -.. _what's new in PyPy 7.2.0?: http://doc.pypy.org/en/latest/release-v7.2.0.html +.. _what's new in PyPy 7.3.0?: http://doc.pypy.org/en/latest/release-v7.3.0.html .. class:: download_menu @@ -50,21 +50,46 @@ Linux binaries and common distributions --------------------------------------- -Linux binaries are dynamically linked, as is usual, and thus might -not be usable due to the sad story of linux binary compatibility. This means -that **Linux binaries are only usable on the distributions written next to -them** unless you're ready to hack your system by adding symlinks to the -libraries it tries to open. There are better solutions: +Since version 7.3, the linux x86 binaries in the links below ship with versions +of OpenSSL, SQLite3, libffi, expat, and TCL/TK binary libraries linked in. This +make the binaries "portable" so that they should run on any current glibc-based +linux platform. The ideas were adopted from the `portable-pypy`_ package. -* use Squeaky's `portable Linux binaries`_. +This solution to the portability problem means that the versions of the +packaged libraries are frozen to the version shipped, so updating your system +libraries will not affect this installation of PyPy. Also see the note about +SSL certificates below. If you wish to use your system libraries instead, +there are other solutions. -* or download PyPy from your release vendor (usually an outdated +For aarch64, s390x, and ppc64, the binaries target a specific operating system. +These binaries are dynamically linked, and thus might -not be usable due to the +sad story of linux binary compatibility. This means -that **Linux binaries are +only usable on the distributions written next to -them** unless you're ready to +hack your system by adding symlinks to the -libraries it tries to open. There +are better solutions: + +* download PyPy from your release vendor (usually an outdated version): `Ubuntu`_ (`PPA`_), `Debian`_, `Homebrew`_, MacPorts, `Fedora`_, `Gentoo`_ and `Arch`_ are known to package PyPy, with various degrees of being up-to-date. +* `recompile the CFFI-based`_ TCL/TK, OpenSSL, or sqlite3 modules, using system + libraries and the scripts in ``pypy/lib_pypy``. This solution will not solve + compatibility issues with libffi, since that is baked into PyPy. + * or translate_ your own PyPy. +.. class:: download_menu + + SSL Certificates + + While the linux binaries ship an OpenSSL library, they do not ship a + certificate store for SSL certificates. If you wish to use SSL module, + you will need a valid certificate store. You can use the `certifi`_ package + and set ``SSL_CERT_FILE`` to ``certifi.where()`` or install your platform + certificates which should be discovered by the ``_ssl`` module. + + .. _`Ubuntu`: http://packages.ubuntu.com/search?keywords=pypy&searchon=names .. _`PPA`: https://launchpad.net/~pypy/+archive/ppa .. _`Debian`: http://packages.debian.org/sid/pypy @@ -72,18 +97,19 @@ .. _`Gentoo`: http://packages.gentoo.org/package/dev-python/pypy .. _`Homebrew`: https://github.com/Homebrew/homebrew-core/blob/master/Formula/pypy.rb .. _`Arch`: https://wiki.archlinux.org/index.php/PyPy -.. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux - +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux +.. _`recompile the CFFI-based`: https://doc.pypy.org/en/latest/build.html#build-cffi-import-libraries-for-the-stdlib +.. _`certifi`: https://pypi.org/project/certifi/ .. _release: -Python2.7 compatible PyPy 7.2.0 +Python2.7 compatible PyPy 7.3.0 ------------------------------- .. class:: download_menu -* `Linux x86 binary (32bit, built on Ubuntu 16.04)`__ (see ``[1]`` below) -* `Linux x86-64 binary (64bit, built on Ubuntu 14.04)`__ (see ``[1]`` below) +* `Linux x86 binary (32bit, built on CenOS6)`__ +* `Linux x86-64 binary (64bit, built on CentOS6)`__ * `Mac OS X binary (64bit)`__ * FreeBSD x86 and x86_64: see FreshPorts_ * `Windows binary (32bit)`__ (you might need the VC runtime library @@ -97,29 +123,29 @@ mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-linux32.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-aarch64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-ppc64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-ppc64le.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-s390x.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-src.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-linux32.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-aarch64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-ppc64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-ppc64le.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-s390x.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-src.zip .. _`vcredist.x86.exe`: https://www.microsoft.com/en-us/download/details.aspx?id=52685 .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://buildbot.pypy.org/mirror/ .. _FreshPorts: http://www.freshports.org/lang/pypy -Python 3.6 compatible PyPy3.6 v7.2.0 +Python 3.6 compatible PyPy3.6 v7.3.0 ------------------------------------ .. class:: download_menu -* `Linux x86-64 binary (64bit, built on Ubuntu 16.04)`__ (see ``[1]`` below) -* `Linux x86 binary (32bit, built on Ubuntu 14.04)`__ (see ``[1]`` below) +* `Linux x86-64 binary (64bit, built on CentOS6)`__ +* `Linux x86 binary (32bit, built on CentOS6)`__ * `Mac OS X binary (64bit)`__ (High Sierra >= 10.13, not for Sierra and below) * `Windows binary (32bit)`__ (you might need the VC runtime library installer `vcredist.x86.exe`_.) @@ -131,16 +157,16 @@ * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-linux32.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-aarch64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-ppc64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-ppc64le.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-s390x.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-src.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-linux32.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-aarch64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-ppc64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-ppc64le.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-s390x.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-src.zip .. __: https://bitbucket.org/pypy/pypy/downloads @@ -153,8 +179,7 @@ ``[1]:`` stating it again: the Linux binaries are provided for the distributions listed here. **If your distribution is not exactly this one, it won't work,** you will probably see: ``pypy: error while loading shared -libraries: ...``. Unless you want to hack a lot, try out the -`portable Linux binaries`_. +libraries: ...``. PyPy-STM 2.5.1 ------------------------------ @@ -250,11 +275,11 @@ Alternatively, get one of the following smaller packages for the source at the same revision as the above binaries: - * `pypy2.7-v7.2.0-src.tar.bz2`__ (sources, PyPy 2 only) - * `pypy3.6-v7.2.0-src.tar.bz2`__ (sources, PyPy 3 only) + * `pypy2.7-v7.3.0-src.tar.bz2`__ (sources, PyPy 2 only) + * `pypy3.6-v7.3.0-src.tar.bz2`__ (sources, PyPy 3 only) - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.2.0-src.tar.bz2 - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.2.0-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy2.7-v7.3.0-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy3.6-v7.3.0-src.tar.bz2 2. Make sure you **installed the dependencies.** See the list here__. @@ -317,14 +342,6 @@ .../pypy/tool/build_cffi_imports.py`` if you want to be able to import the cffi-based modules. -* On Linux, translating with ``asmgcroot``, is delicate. - It requires using gcc with no particularly - fancy options. It does not work e.g. with clang, or if you pass uncommon - options with the ``CFLAGS`` environment variable. If you insist on - passing these options or using clang, then you can compile PyPy with - the default `shadow stack`_ option instead (for a small performance price in - non-JITted code). - * Like other JITs, PyPy doesn't work out of the box on some Linux distributions that trade full POSIX compliance for extra security features. E.g. with PAX, you have to run PyPy with ``paxctl -cm``. From pypy.commits at gmail.com Sun Dec 15 07:27:34 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 04:27:34 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into branch Message-ID: <5df626b6.1c69fb81.a209a.8940@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98292:4ede1313d652 Date: 2019-12-15 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/4ede1313d652/ Log: merge default into branch diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -85,7 +85,9 @@ OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): - OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + # OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + # XXX should be conditionally compiled into lib + OP_ENABLE_MIDDLEBOX_COMPAT = 0x00100000 @@ -1269,10 +1271,12 @@ return stats def set_default_verify_paths(self): - if not os.environ.get('SSL_CERT_FILE') and not os.environ.get('SSL_CERT_DIR'): - locations = get_default_verify_paths() - self.load_verify_locations(locations[1], locations[3]) - return + if (not os.environ.get('SSL_CERT_FILE') and + not os.environ.get('SSL_CERT_DIR') and + not sys.platform == 'win32'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1411,6 +1415,25 @@ return 0; + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! diff --git a/pypy/doc/contributing.rst b/pypy/doc/contributing.rst --- a/pypy/doc/contributing.rst +++ b/pypy/doc/contributing.rst @@ -311,16 +311,13 @@ directory or even the top level subdirectory ``pypy``. It takes hours and uses huge amounts of RAM and is not recommended. -To run CPython regression tests you can point to the ``lib-python`` -directory:: - - py.test lib-python/2.7/test/test_datetime.py - -This will usually take a long time because this will run -the PyPy Python interpreter on top of CPython. On the plus -side, it's usually still faster than doing a full translation -and running the regression test with the translated PyPy Python -interpreter. +To run CPython regression tests, you should start with a translated PyPy and +run the tests as you would with CPython (see below). You can, however, also +attempt to run the tests before translation, but be aware that it is done with +a hack that doesn't work in all cases and it is usually extremely slow: +``py.test lib-python/2.7/test/test_datetime.py``. Usually, a better idea is to +extract a minimal failing test of at most a few lines, and put it into one of +our own tests in ``pypy/*/test/``. .. _py.test testing tool: http://pytest.org .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage @@ -350,6 +347,11 @@ cpython2 pytest.py -A pypy/module/cpyext/test --python=path/to/pypy3 +To run a test from the standard CPython regression test suite, use the regular +Python way, i.e. (replace "pypy" with the exact binary name, if needed):: + + pypy -m test.test_datetime + Tooling & Utilities ^^^^^^^^^^^^^^^^^^^ diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -133,6 +133,11 @@ * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) * Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) +* Support OpenSSL 1.1 and TLSv1_3 +* Remove the (deprecated since 5.7) asmgcc rootfinder from the GC +* Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit + platforms rather than automatically using a ``SignedLongLong``, require an + explicit ``r_int64()`` call instead C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -179,13 +184,16 @@ * Add missing ``os.getgrouplist`` (part of `issue 2375`_) * Back-port the tentative fix from cpython: "Import deadlock detection causes deadlock" (part of `issue 3111`_) +* Fix handling of ``sys.exc_info()`` in generators +* Return ``W_IntObject`` when converting from ``float`` to ``int`` when + possible, which speeds up many code paths. Python 3.6 C-API ~~~~~~~~~~~~~~~~ * Add ``PyObject_GenericGetDict``, ``PyObject_GenericSetDict``, ``_Py_strhex``, ``_Py_strhex_bytes``, ``PyUnicodeNew``, ``_PyFinalizing``, - ``PySlice_Unpack``, ``PySlice_AdjustIndices`` + ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath`` * Implement ``pystrhex.h`` (`issue 2687`_) * Make ``PyUnicodeObject`` slightly more compact * Fix memory leak when releasing a ``PyUnicodeObject`` diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,13 +3,5 @@ ============================ .. this is a revision shortly after release-pypy-7.3.0 -.. startrev: dbbbae99135f +.. startrev: 994c42529580 -.. branch: backport-decode_timeval_ns-py3.7 - -Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython - -.. branch: kill-asmgcc - -Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` -because it no longer works with a recent enough ``gcc``. diff --git a/pypy/doc/whatsnew-pypy2-7.3.0.rst b/pypy/doc/whatsnew-pypy2-7.3.0.rst --- a/pypy/doc/whatsnew-pypy2-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy2-7.3.0.rst @@ -31,3 +31,11 @@ anonymous struct/unions, cmake fragments for distribution, optimizations for PODs, and faster wrapper calls. +.. branch: backport-decode_timeval_ns-py3.7 + +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. diff --git a/pypy/doc/whatsnew-pypy3-7.3.0.rst b/pypy/doc/whatsnew-pypy3-7.3.0.rst --- a/pypy/doc/whatsnew-pypy3-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy3-7.3.0.rst @@ -17,3 +17,8 @@ .. branch: code_page-utf8 Add encoding, decoding of codepages on windows + +.. branch: py3.6-exc-info-2 + +Fix handling of sys.exc_info() in generators + diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -3,8 +3,5 @@ ========================== .. this is the revision after release-pypy3.6-v7.3.0 -.. startrev: 78b4d0a7cf2e +.. startrev: a56889d5df88 -.. branch: py3.6-exc-info-2 - -Fix handling of sys.exc_info() in generators diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4725,6 +4725,7 @@ def test_raw_load_int(self): from rpython.rlib import rawstorage + from rpython.rlib.rarithmetic import r_longlong for T in [rffi.UCHAR, rffi.SIGNEDCHAR, rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, @@ -4738,7 +4739,7 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, -0x4243444546474849) + value = rffi.cast(T, r_longlong(-0x4243444546474849)) rawstorage.raw_storage_setitem(p, 16, value) got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, arraydescr) diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -285,12 +285,15 @@ cases = [8, 16, 24] if WORD == 8: cases.append(32) + bigvalue = 0xAAAAAAAAAAAA + else: + bigvalue = 0xAAAAAAA for i in cases: - box = InputArgInt(0xAAAAAAAAAAAA) + box = InputArgInt(bigvalue) res = self.execute_operation(rop.INT_AND, [box, ConstInt(2 ** i - 1)], 'int') - assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + assert res == bigvalue & (2 ** i - 1) def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -1,4 +1,5 @@ import py, struct +from rpython.rlib.rarithmetic import r_longlong from rpython.jit.backend.x86.rx86 import * globals().update(R.__dict__) @@ -210,8 +211,8 @@ s.MOV_ri(ebx, -0x80000003) s.MOV_ri(r13, -0x80000002) s.MOV_ri(ecx, 42) - s.MOV_ri(r12, 0x80000042) - s.MOV_ri(r12, 0x100000007) + s.MOV_ri(r12, r_longlong(0x80000042)) + s.MOV_ri(r12, r_longlong(0x100000007)) assert s.getvalue() == ('\x48\xC7\xC1\xFE\xFF\xFF\xFF' + '\x49\xC7\xC7\xFD\xFF\xFF\xFF' + '\x48\xBB\xFD\xFF\xFF\x7F\xFF\xFF\xFF\xFF' + diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -1,7 +1,11 @@ +import sys, py import random from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.test import test_rx86_32_auto_encoding +if sys.maxint <= 2**32: + py.test.skip("skipping this test on x86-32") + class TestRx86_64(test_rx86_32_auto_encoding.TestRx86_32): WORD = 8 diff --git a/rpython/rlib/rstruct/test/test_pack.py b/rpython/rlib/rstruct/test/test_pack.py --- a/rpython/rlib/rstruct/test/test_pack.py +++ b/rpython/rlib/rstruct/test/test_pack.py @@ -1,5 +1,5 @@ import pytest -from rpython.rlib.rarithmetic import r_ulonglong +from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rstruct import standardfmttable, nativefmttable from rpython.rlib.rstruct.error import StructOverflowError from rpython.rlib import buffer @@ -129,9 +129,9 @@ self.check("i", 0x41424344) self.check("i", -3) self.check("i", -2147483648) - self.check("I", 0x81424344) - self.check("q", 0x4142434445464748) - self.check("q", -0x41B2B3B4B5B6B7B8) + self.check("I", r_uint(0x81424344)) + self.check("q", r_longlong(0x4142434445464748)) + self.check("q", r_longlong(-0x41B2B3B4B5B6B7B8)) self.check("Q", r_ulonglong(0x8142434445464748)) def test_pack_ieee(self): diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -317,7 +317,8 @@ os.lseek(self.fd, offset, whence) def tell(self): - return os.lseek(self.fd, 0, 1) + result = os.lseek(self.fd, 0, 1) + return r_longlong(result) def read(self, n): assert isinstance(n, int) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -1455,7 +1455,7 @@ def __new__(cls, void_p): if isinstance(void_p, (int, long)): void_p = ctypes.c_void_p(void_p) - self = long.__new__(cls, void_p.value) + self = long.__new__(cls, intmask(void_p.value)) self.void_p = void_p self.intval = intmask(void_p.value) return self diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -815,7 +815,11 @@ if -maxint-1 <= val <= maxint: return Signed elif longlongmask(val) == val: - return SignedLongLong + raise OverflowError("integer %r is out of bounds for Signed " + "(it would fit SignedLongLong, but we " + "won't implicitly return SignedLongLong " + "for typeOf(%r) where type(%r) is long)" + % (val, val, val)) else: raise OverflowError("integer %r is out of bounds" % (val,)) if tp is bool: From pypy.commits at gmail.com Sun Dec 15 07:27:36 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 04:27:36 -0800 (PST) Subject: [pypy-commit] pypy default: backport whatsnew from py3.6 Message-ID: <5df626b8.1c69fb81.8e047.7d90@mx.google.com> Author: Matti Picus Branch: Changeset: r98293:1ecff2e3689f Date: 2019-12-15 12:09 +0200 http://bitbucket.org/pypy/pypy/changeset/1ecff2e3689f/ Log: backport whatsnew from py3.6 diff --git a/pypy/doc/whatsnew-pypy3-7.3.0.rst b/pypy/doc/whatsnew-pypy3-7.3.0.rst --- a/pypy/doc/whatsnew-pypy3-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy3-7.3.0.rst @@ -5,6 +5,20 @@ .. this is the revision after release-pypy3.6-v7.2 .. startrev: 6d2f8470165b + +.. branch: py3.6-asyncgen + +Fix asyncgen_hooks and refactor coroutine execution + +.. branch: py3.6-exc-info + +Follow CPython's use of exc_info more closely (issue 3096) + +.. branch: code_page-utf8 + +Add encoding, decoding of codepages on windows + .. branch: py3.6-exc-info-2 Fix handling of sys.exc_info() in generators + From pypy.commits at gmail.com Sun Dec 15 07:27:39 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 04:27:39 -0800 (PST) Subject: [pypy-commit] pypy release-pypy2.7-v7.x: merge default into release Message-ID: <5df626bb.1c69fb81.b81fe.c3b8@mx.google.com> Author: Matti Picus Branch: release-pypy2.7-v7.x Changeset: r98294:285307a0f5a7 Date: 2019-12-15 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/285307a0f5a7/ Log: merge default into release diff too long, truncating to 2000 out of 13046 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -57,3 +57,5 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0rc2 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 +e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 +533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -1,4 +1,5 @@ import sys +import os import time import thread as _thread import weakref @@ -82,6 +83,11 @@ OP_NO_SSLv2 = lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 +if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): + # OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + # XXX should be conditionally compiled into lib + OP_ENABLE_MIDDLEBOX_COMPAT = 0x00100000 + SSL_CLIENT = 0 @@ -109,6 +115,7 @@ PROTOCOL_TLSv1_2 = 5 # PROTOCOL_TLS_CLIENT = 0x10 # PROTOCOL_TLS_SERVER = 0x11 +HAS_TLSv1_3 = bool(lib.Cryptography_HAS_TLSv1_3) _PROTOCOL_NAMES = (name for name in dir(lib) if name.startswith('PROTOCOL_')) @@ -264,6 +271,20 @@ mode |= lib.SSL_MODE_AUTO_RETRY lib.SSL_set_mode(ssl, mode) + if HAS_TLSv1_3: + if sslctx._post_handshake_auth: + if socket_type == SSL_SERVER: + # bpo-37428: OpenSSL does not ignore SSL_VERIFY_POST_HANDSHAKE. + # Set SSL_VERIFY_POST_HANDSHAKE flag only for server sockets and + # only in combination with SSL_VERIFY_PEER flag. + mode = lib.SSL_CTX_get_verify_mode(lib.SSL_get_SSL_CTX(self.ssl)) + if (mode & lib.SSL_VERIFY_PEER): + verify_cb = lib.SSL_get_verify_callback(self.ssl) + mode |= lib.SSL_VERIFY_POST_HANDSHAKE + lib.SSL_set_verify(ssl, mode, verify_cb) + else: + lib.SSL_set_post_handshake_auth(ssl, 1) + if HAS_SNI and self.server_hostname: name = _str_to_ffi_buffer(self.server_hostname) lib.SSL_set_tlsext_host_name(ssl, name) @@ -651,6 +672,15 @@ else: return None + def verify_client_post_handshake(self): + + if not HAS_TLSv1_3: + raise NotImplementedError("Post-handshake auth is not supported by " + "your OpenSSL version.") + err = lib.SSL_verify_client_post_handshake(self.ssl); + if err == 0: + raise pyssl_error(self, err) + def pending(self): count = lib.SSL_pending(self.ssl) if count < 0: @@ -707,6 +737,7 @@ return bool(lib.SSL_session_reused(self.ssl)) + def _fs_decode(name): return name.decode(sys.getfilesystemencoding()) def _fs_converter(name): @@ -762,13 +793,13 @@ if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): aead = lib.SSL_CIPHER_is_aead(cipher) nid = lib.SSL_CIPHER_get_cipher_nid(cipher) - skcipher = OBJ_nid2ln(nid) if nid != NID_undef else None + skcipher = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_digest_nid(cipher); - digest = OBJ_nid2ln(nid) if nid != NID_undef else None + digest = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_kx_nid(cipher); - kx = OBJ_nid2ln(nid) if nid != NID_undef else None - nid = SSL_CIPHER_get_auth_nid(cipher); - auth = OBJ_nid2ln(nid) if nid != NID_undef else None + kx = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None + nid = lib.SSL_CIPHER_get_auth_nid(cipher); + auth = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None ret.update({'aead' : bool(aead), 'symmmetric' : skcipher, 'digest' : digest, @@ -828,9 +859,8 @@ class _SSLContext(object): __slots__ = ('ctx', '_check_hostname', 'servername_callback', 'alpn_protocols', '_alpn_protocols_handle', - 'npn_protocols', 'set_hostname', + 'npn_protocols', 'set_hostname', '_post_handshake_auth', '_set_hostname_handle', '_npn_protocols_handle') - def __new__(cls, protocol): self = object.__new__(cls) self.ctx = ffi.NULL @@ -907,6 +937,9 @@ if lib.Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST: store = lib.SSL_CTX_get_cert_store(self.ctx) lib.X509_STORE_set_flags(store, lib.X509_V_FLAG_TRUSTED_FIRST) + if HAS_TLSv1_3: + self.post_handshake_auth = 0; + lib.SSL_CTX_set_post_handshake_auth(self.ctx, self.post_handshake_auth) return self @property @@ -992,6 +1025,7 @@ "CERT_OPTIONAL or CERT_REQUIRED") self._check_hostname = check_hostname + def set_ciphers(self, cipherlist): cipherlistbuf = _str_to_ffi_buffer(cipherlist) ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf) @@ -1205,6 +1239,12 @@ return stats def set_default_verify_paths(self): + if (not os.environ.get('SSL_CERT_FILE') and + not os.environ.get('SSL_CERT_DIR') and + not sys.platform == 'win32'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1324,6 +1364,25 @@ sock = _SSLSocket._new__ssl_socket(self, None, server_side, hostname, incoming, outgoing) return sock + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! @@ -1548,20 +1607,69 @@ lib.RAND_add(buf, len(buf), entropy) def get_default_verify_paths(): + ''' + Find a certificate store and associated values + Returns something like + `('SSL_CERT_FILE', '/usr/lib/ssl/cert.pem', 'SSL_CERT_DIR', '/usr/lib/ssl/certs')` + on Ubuntu and windows10 + + `('SSL_CERT_FILE', '/usr/local/cert.pem', 'SSL_CERT_DIR', '/usr/local/certs')` + on CentOS + + `('SSL_CERT_FILE', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem', + 'SSL_CERT_DIR', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/certs')` + on Darwin + + For portable builds (based on CentOS, but could be running on any glibc + linux) we need to check other locations. The list of places to try was taken + from golang in Dec 2018: + https://golang.org/src/crypto/x509/root_unix.go (for the directories), + https://golang.org/src/crypto/x509/root_linux.go (for the files) + ''' + certFiles = [ + "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", # OpenSUSE + "/etc/pki/tls/cacert.pem", # OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7 + "/etc/ssl/cert.pem", # Alpine Linux + ] + certDirectories = [ + "/etc/ssl/certs", # SLES10/SLES11 + "/system/etc/security/cacerts", # Android + "/usr/local/share/certs", # FreeBSD + "/etc/pki/tls/certs", # Fedora/RHEL + "/etc/openssl/certs", # NetBSD + "/var/ssl/certs", # AIX + ] + + # optimization: reuse the values from a local varaible + if getattr(get_default_verify_paths, 'retval', None): + return get_default_verify_paths.retval + + # This should never fail, it should always return SSL_CERT_FILE and SSL_CERT_DIR ofile_env = _str_from_buf(lib.X509_get_default_cert_file_env()) - if ofile_env is None: - return None + odir_env = _str_from_buf(lib.X509_get_default_cert_dir_env()) + + # Platform depenedent ofile = _str_from_buf(lib.X509_get_default_cert_file()) - if ofile is None: - return None - odir_env = _str_from_buf(lib.X509_get_default_cert_dir_env()) - if odir_env is None: - return None odir = _str_from_buf(lib.X509_get_default_cert_dir()) - if odir is None: - return odir - return (ofile_env, ofile, odir_env, odir); + + if os.path.exists(ofile) and os.path.exists(odir): + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + + # OpenSSL didn't supply the goods. Try some other options + for f in certFiles: + if os.path.exists(f): + ofile = f + for f in certDirectories: + if os.path.exists(f): + odir = f + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + @ffi.callback("int(SSL*,unsigned char **,unsigned char *,const unsigned char *,unsigned int,void *)") def select_alpn_callback(ssl, out, outlen, client_protocols, client_protocols_len, args): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -14,7 +14,7 @@ conf = get_pypy_config() conf.translation.gc = "boehm" with py.test.raises(ConfigError): - conf.translation.gcrootfinder = 'asmgcc' + conf.translation.gcrootfinder = 'shadowstack' def test_frameworkgc(): for name in ["minimark", "semispace"]: diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,16 +1,7 @@ Choose the method used to find the roots in the GC. This only -applies to our framework GCs. You have a choice of two -alternatives: +applies to our framework GCs. - ``--gcrootfinder=shadowstack``: use a so-called "shadow stack", which is an explicitly maintained custom stack of - root pointers. This is the most portable solution. - -- ``--gcrootfinder=asmgcc``: use assembler hackery to find the - roots directly from the normal stack. This is a bit faster, - but platform specific. It works so far with GCC or MSVC, - on i386 and x86-64. It is tested only on Linux - so other platforms (as well as MSVC) may need - various fixes before they can be used. Note asmgcc will be deprecated - at some future date, and does not work with clang. - + root pointers. This is the most portable solution, and also + the only one available now. diff --git a/pypy/doc/contributing.rst b/pypy/doc/contributing.rst --- a/pypy/doc/contributing.rst +++ b/pypy/doc/contributing.rst @@ -311,16 +311,13 @@ directory or even the top level subdirectory ``pypy``. It takes hours and uses huge amounts of RAM and is not recommended. -To run CPython regression tests you can point to the ``lib-python`` -directory:: - - py.test lib-python/2.7/test/test_datetime.py - -This will usually take a long time because this will run -the PyPy Python interpreter on top of CPython. On the plus -side, it's usually still faster than doing a full translation -and running the regression test with the translated PyPy Python -interpreter. +To run CPython regression tests, you should start with a translated PyPy and +run the tests as you would with CPython (see below). You can, however, also +attempt to run the tests before translation, but be aware that it is done with +a hack that doesn't work in all cases and it is usually extremely slow: +``py.test lib-python/2.7/test/test_datetime.py``. Usually, a better idea is to +extract a minimal failing test of at most a few lines, and put it into one of +our own tests in ``pypy/*/test/``. .. _py.test testing tool: http://pytest.org .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage @@ -350,6 +347,11 @@ cpython2 pytest.py -A pypy/module/cpyext/test --python=path/to/pypy3 +To run a test from the standard CPython regression test suite, use the regular +Python way, i.e. (replace "pypy" with the exact binary name, if needed):: + + pypy -m test.test_datetime + Tooling & Utilities ^^^^^^^^^^^^^^^^^^^ diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -133,6 +133,11 @@ * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) * Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) +* Support OpenSSL 1.1 and TLSv1_3 +* Remove the (deprecated since 5.7) asmgcc rootfinder from the GC +* Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit + platforms rather than automatically using a ``SignedLongLong``, require an + explicit ``r_int64()`` call instead C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -177,13 +182,18 @@ * Adds encoding, decoding codepages on win32 * Remove socket error attributes from ``_ssl`` (`issue 3119`_) * Add missing ``os.getgrouplist`` (part of `issue 2375`_) +* Back-port the tentative fix from cpython: "Import deadlock detection causes + deadlock" (part of `issue 3111`_) +* Fix handling of ``sys.exc_info()`` in generators +* Return ``W_IntObject`` when converting from ``float`` to ``int`` when + possible, which speeds up many code paths. Python 3.6 C-API ~~~~~~~~~~~~~~~~ * Add ``PyObject_GenericGetDict``, ``PyObject_GenericSetDict``, ``_Py_strhex``, ``_Py_strhex_bytes``, ``PyUnicodeNew``, ``_PyFinalizing``, - ``PySlice_Unpack``, ``PySlice_AdjustIndices`` + ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath`` * Implement ``pystrhex.h`` (`issue 2687`_) * Make ``PyUnicodeObject`` slightly more compact * Fix memory leak when releasing a ``PyUnicodeObject`` @@ -210,6 +220,7 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3111`: https://bitbucket.com/pypy/pypy/issues/3111 .. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,5 @@ ============================ .. this is a revision shortly after release-pypy-7.3.0 -.. startrev: dbbbae99135f +.. startrev: 994c42529580 -.. branch: backport-decode_timeval_ns-py3.7 - -Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython diff --git a/pypy/doc/whatsnew-pypy2-7.3.0.rst b/pypy/doc/whatsnew-pypy2-7.3.0.rst --- a/pypy/doc/whatsnew-pypy2-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy2-7.3.0.rst @@ -31,3 +31,11 @@ anonymous struct/unions, cmake fragments for distribution, optimizations for PODs, and faster wrapper calls. +.. branch: backport-decode_timeval_ns-py3.7 + +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. diff --git a/pypy/doc/whatsnew-pypy3-7.3.0.rst b/pypy/doc/whatsnew-pypy3-7.3.0.rst --- a/pypy/doc/whatsnew-pypy3-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy3-7.3.0.rst @@ -5,3 +5,20 @@ .. this is the revision after release-pypy3.6-v7.2 .. startrev: 6d2f8470165b + +.. branch: py3.6-asyncgen + +Fix asyncgen_hooks and refactor coroutine execution + +.. branch: py3.6-exc-info + +Follow CPython's use of exc_info more closely (issue 3096) + +.. branch: code_page-utf8 + +Add encoding, decoding of codepages on windows + +.. branch: py3.6-exc-info-2 + +Fix handling of sys.exc_info() in generators + diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -3,5 +3,5 @@ ========================== .. this is the revision after release-pypy3.6-v7.3.0 -.. startrev: 78b4d0a7cf2e +.. startrev: a56889d5df88 diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -43,8 +43,7 @@ from rpython.rlib import rgil rgil.acquire() - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) @@ -69,7 +68,6 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - rffi.stackcounter.stacks_counter -= 1 rgil.release() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1026,8 +1026,7 @@ else: gilstate = pystate.PyGILState_IGNORE - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C retval = fatal_value boxed_args = () tb = None @@ -1104,7 +1103,6 @@ return fatal_value assert lltype.typeOf(retval) == restype - rffi.stackcounter.stacks_counter -= 1 _restore_gil_state(pygilstate_release, gilstate, gil_release, _gil_auto, tid) return retval diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -30,7 +30,7 @@ # called from the rffi-generated wrapper). The gc_thread_run() # operation will automatically notice that the current thread id was # not seen before, and (in shadowstack) it will allocate and use a -# fresh new stack. Again, this has no effect in asmgcc. +# fresh new stack. # # * Only then does bootstrap() really run. The first thing it does # is grab the start-up information (app-level callable and args) @@ -43,7 +43,7 @@ # thread. # # * Just before a thread finishes, gc_thread_die() is called to free -# its shadow stack. This has no effect in asmgcc. +# its shadow stack. class Bootstrapper(object): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,8 +2,8 @@ pmaj=2 # python main version: 2 or 3 pmin=7 # python minor version maj=7 -min=2 -rev=0rc2 +min=3 +rev=0rc1 case $pmaj in "2") exe=pypy;; diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -13,13 +13,6 @@ config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') -if compiler.name == 'msvc' or sys.platform == 'darwin': - def test_no_asmgcrot_on_msvc(): - config = get_combined_translation_config() - config.translation.gcrootfinder = "asmgcc" - py.test.raises(ConfigError, set_opt_level, config, 'jit') - - def test_get_translation_config(): from rpython.translator.interactive import Translation from rpython.config import config diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -18,10 +18,6 @@ DEFL_GC = "incminimark" # XXX DEFL_ROOTFINDER_WITHJIT = "shadowstack" -## if sys.platform.startswith("linux"): -## _mach = os.popen('uname -m', 'r').read().strip() -## if _mach.startswith('x86') or _mach in ['i386', 'i486', 'i586', 'i686']: -## DEFL_ROOTFINDER_WITHJIT = "asmgcc" # only for Linux on x86 / x86-64 IS_64_BITS = sys.maxint > 2147483647 @@ -100,13 +96,11 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ["n/a", "shadowstack"], "shadowstack", cmdline="--gcrootfinder", requires={ "shadowstack": [("translation.gctransformer", "framework")], - "asmgcc": [("translation.gctransformer", "framework"), - ("translation.backend", "c")], }), # other noticeable options @@ -402,10 +396,6 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X and on Win32 - if config.translation.gcrootfinder == "asmgcc": - if sys.platform == "darwin" or sys.platform =="win32": - raise ConfigError("'asmgcc' not supported on this platform") # ---------------------------------------------------------------- diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -438,51 +438,8 @@ @staticmethod @rgc.no_collect - def _reacquire_gil_asmgcc(css, old_rpy_fastgil): - # Before doing an external call, 'rpy_fastgil' is initialized to - # be equal to css. This function is called if we find out after - # the call that it is no longer equal to css. See description - # in translator/c/src/thread_pthread.c. - - # XXX some duplicated logic here, but note that rgil.acquire() - # does more than just RPyGilAcquire() - if old_rpy_fastgil == 0: - # this case occurs if some other thread stole the GIL but - # released it again. What occurred here is that we changed - # 'rpy_fastgil' from 0 to 1, thus successfully reaquiring the - # GIL. - pass - - elif old_rpy_fastgil == 1: - # 'rpy_fastgil' was (and still is) locked by someone else. - # We need to wait for the regular mutex. - from rpython.rlib import rgil - rgil.acquire() - else: - # stole the GIL from a different thread that is also - # currently in an external call from the jit. Attach - # the 'old_rpy_fastgil' into the chained list. - from rpython.memory.gctransform import asmgcroot - oth = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, old_rpy_fastgil) - next = asmgcroot.gcrootanchor.next - oth.next = next - oth.prev = asmgcroot.gcrootanchor - asmgcroot.gcrootanchor.next = oth - next.prev = oth - - # similar to trackgcroot.py:pypy_asm_stackwalk, second part: - # detach the 'css' from the chained list - from rpython.memory.gctransform import asmgcroot - old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) - prev = old.prev - next = old.next - prev.next = next - next.prev = prev - - @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): - # Simplified version of _reacquire_gil_asmgcc(): in shadowstack mode, + # This used to be more complex for asmgcc. In shadowstack mode, # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) @@ -499,13 +456,10 @@ self._reacquire_gil_shadowstack) self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) else: - reacqgil_func = llhelper(self._REACQGIL2_FUNC, - self._reacquire_gil_asmgcc) - self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) + raise AssertionError("!is_shadow_stack") def _is_asmgcc(self): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - return bool(gcrootmap) and not gcrootmap.is_shadow_stack + return False # legacy def debug_bridge(descr_number, rawstart, codeendpos): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -21,7 +21,6 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler -from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo # ____________________________________________________________ @@ -117,7 +116,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(jitframe.JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth', + 'jf_frame_info', 'jf_gcmap', 'jf_savedata', 'jf_forward']: setattr(descrs, name, cpu.fielddescrof(jitframe.JITFRAME, name)) descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, @@ -244,15 +243,6 @@ # ____________________________________________________________ # All code below is for the hybrid or minimark GC -class GcRootMap_asmgcc(object): - is_shadow_stack = False - - def __init__(self, gcdescr): - pass - - def register_asm_addr(self, start, mark): - pass - class GcRootMap_shadowstack(object): is_shadow_stack = True diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -49,7 +49,6 @@ rgc.register_custom_trace_hook(JITFRAME, lambda_jitframe_trace) frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info - frame.jf_extra_stack_depth = 0 return frame def jitframe_resolve(frame): @@ -71,8 +70,6 @@ ('jf_force_descr', llmemory.GCREF), # a map of GC pointers ('jf_gcmap', lltype.Ptr(GCMAP)), - # how much we decrease stack pointer. Used around calls and malloc slowpath - ('jf_extra_stack_depth', lltype.Signed), # For the front-end: a GCREF for the savedata ('jf_savedata', llmemory.GCREF), # For GUARD_(NO)_EXCEPTION and GUARD_NOT_FORCED: the exception we @@ -103,7 +100,6 @@ LENGTHOFS = llmemory.arraylengthoffset(JITFRAME.jf_frame) SIGN_SIZE = llmemory.sizeof(lltype.Signed) UNSIGN_SIZE = llmemory.sizeof(lltype.Unsigned) -STACK_DEPTH_OFS = getofs('jf_extra_stack_depth') def jitframe_trace(gc, obj_addr, callback, arg): gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr')) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -584,8 +584,6 @@ length = self.emit_getfield(ConstInt(frame_info), descr=descrs.jfi_frame_depth, raw=True) - self.emit_setfield(frame, self.c_zero, - descr=descrs.jf_extra_stack_depth) self.emit_setfield(frame, self.c_null, descr=descrs.jf_savedata) self.emit_setfield(frame, self.c_null, diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -110,7 +110,7 @@ class config_(object): class translation(object): gc = self.gc - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False class FakeTranslator(object): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -507,7 +507,6 @@ ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), ('jf_descr', llmemory.GCREF), ('jf_force_descr', llmemory.GCREF), - ('jf_extra_stack_depth', lltype.Signed), ('jf_guard_exc', llmemory.GCREF), ('jf_gcmap', lltype.Ptr(jitframe.GCMAP)), ('jf_gc_trace_state', lltype.Signed), @@ -594,7 +593,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + 'jf_frame_info', 'jf_gcmap']: setattr(descrs, name, cpu.fielddescrof(JITFRAME, name)) descrs.jfi_frame_depth = cpu.fielddescrof(jitframe.JITFRAMEINFO, 'jfi_frame_depth') diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -170,7 +170,6 @@ jf_descr = framedescrs.jf_descr jf_guard_exc = framedescrs.jf_guard_exc jf_forward = framedescrs.jf_forward - jf_extra_stack_depth = framedescrs.jf_extra_stack_depth signedframedescr = self.cpu.signedframedescr floatframedescr = self.cpu.floatframedescr casmdescr.compiled_loop_token = clt @@ -386,7 +385,7 @@ class config_(object): class translation(object): gc = 'minimark' - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False gcdescr = get_description(config_) @@ -1102,7 +1101,6 @@ p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) - %(setfield('p1', 0, jf_extra_stack_depth))s %(setfield('p1', 'NULL', jf_savedata))s %(setfield('p1', 'NULL', jf_force_descr))s %(setfield('p1', 'NULL', jf_descr))s diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -176,9 +176,6 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -331,9 +331,6 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4725,6 +4725,7 @@ def test_raw_load_int(self): from rpython.rlib import rawstorage + from rpython.rlib.rarithmetic import r_longlong for T in [rffi.UCHAR, rffi.SIGNEDCHAR, rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, @@ -4738,7 +4739,7 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, -0x4243444546474849) + value = rffi.cast(T, r_longlong(-0x4243444546474849)) rawstorage.raw_storage_setitem(p, 16, value) got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, arraydescr) diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -35,9 +35,7 @@ PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, - # and it can be left here for when it is needed. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and it can be left here for when it is needed. THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 @@ -45,12 +43,10 @@ PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, - # and is moved into this frame location. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and is moved into this frame location. THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD -assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 +assert PASS_ON_MY_FRAME >= 12 # return address, followed by FRAME_FIXED_SIZE words DEFAULT_FRAME_BYTES = (1 + FRAME_FIXED_SIZE) * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -137,11 +137,6 @@ self.expand_byte_mask_addr = float_constants + 64 self.element_ones = [float_constants + 80 + 16*i for i in range(4)] - def set_extra_stack_depth(self, mc, value): - if self._is_asmgcc(): - extra_ofs = self.cpu.get_ofs_of_frame_field('jf_extra_stack_depth') - mc.MOV_bi(extra_ofs, value) - def build_frame_realloc_slowpath(self): mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) @@ -161,14 +156,20 @@ mc.MOV_sr(0, ebp.value) # align - self.set_extra_stack_depth(mc, align * WORD) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.set_extra_stack_depth(mc, align * WORD) + self._store_and_reset_exception(mc, None, ebx, ecx) mc.CALL(imm(self.cpu.realloc_frame)) mc.MOV_rr(ebp.value, eax.value) self._restore_exception(mc, None, ebx, ecx) mc.ADD_ri(esp.value, (align - 1) * WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -196,12 +197,12 @@ # the caller already did push_gcmap(store=True) if IS_X86_64: mc.SUB(esp, imm(WORD)) # alignment - self.set_extra_stack_depth(mc, 2 * WORD) + #self.set_extra_stack_depth(mc, 2 * WORD) # the arguments are already in the correct registers else: # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 8 * WORD) + #self.set_extra_stack_depth(mc, 8 * WORD) # store the arguments at the correct place in the stack for i in range(4): mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) @@ -211,7 +212,7 @@ mc.ADD(esp, imm(WORD)) else: mc.ADD(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [eax], supports_floats, callee_only) mc.RET() @@ -275,11 +276,11 @@ # (already in edx) # length mc.MOV_rr(esi.value, ecx.value) # tid mc.MOV_rs(edi.value, WORD * 3) # load the itemsize - self.set_extra_stack_depth(mc, 16) + #self.set_extra_stack_depth(mc, 16) mc.CALL(imm(follow_jump(addr))) self._reload_frame_if_necessary(mc) mc.ADD_ri(esp.value, 16 - WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) # mc.TEST_rr(eax.value, eax.value) # common case: not taken @@ -1018,8 +1019,6 @@ from rpython.rlib.rvmprof.rvmprof import cintf # edx = address of pypy_threadlocal_s self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(edx.value, ~1) # eax = (our local vmprof_tl_stack).next self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax @@ -2236,25 +2235,6 @@ def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) - if self._is_asmgcc(): - # We need to remove the bit "already seen during the - # previous minor collection" instead of passing this - # value directly. - if IS_X86_64: - tmploc = esi # already the correct place - if argloc is tmploc: - # this case is theoretical only so far: in practice, - # argloc is always eax, never esi - self.mc.MOV_rr(edi.value, esi.value) - argloc = edi - else: - tmploc = eax - if tmploc is argloc: - tmploc = edx - self.mc.MOV(tmploc, threadlocal_loc) - self.mc.AND_ri(tmploc.value, ~1) - threadlocal_loc = tmploc - # self.simple_call(addr, [argloc, threadlocal_loc]) def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): @@ -2672,8 +2652,6 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(resloc.value, ~1) self.load_from_mem(resloc, addr_add_const(resloc, offset), imm(size), imm(sign)) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -61,13 +61,6 @@ self.arglocs = arglocs + [fnloc] self.start_frame_size = self.mc._frame_size - def select_call_release_gil_mode(self): - AbstractCallBuilder.select_call_release_gil_mode(self) - if self.asm._is_asmgcc(): - from rpython.memory.gctransform import asmgcroot - self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS - assert self.stack_max >= 3 - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -103,9 +96,14 @@ # value eax, if necessary assert not self.is_call_release_gil current_esp = self.get_current_esp() - self.change_extra_stack_depth = (current_esp != 0) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, -current_esp) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.change_extra_stack_depth = (current_esp != 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, -current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) @@ -119,13 +117,14 @@ # top at this point, so reuse it instead of loading it again ssreg = ebx self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap assert self.is_call_release_gil + assert not self.asm._is_asmgcc() # # Save this thread's shadowstack pointer into 'ebx', # for later comparison @@ -135,38 +134,12 @@ rst = gcrootmap.get_root_stack_top_addr() self.mc.MOV(ebx, heap(rst)) # - if not self.asm._is_asmgcc(): - # shadowstack: change 'rpy_fastgil' to 0 (it should be - # non-zero right now). - self.change_extra_stack_depth = False - # ^^ note that set_extra_stack_depth() in this case is a no-op - css_value = imm(0) - else: - from rpython.memory.gctransform import asmgcroot - # build a 'css' structure on the stack: 2 words for the linkage, - # and 5/7 words as described for asmgcroot.ASM_FRAMEDATA, for a - # total size of JIT_USE_WORDS. This structure is found at - # [ESP+css]. - css = -self.get_current_esp() + ( - WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS)) - assert css >= 2 * WORD - # Save ebp - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_sr(index_of_ebp, ebp.value) # MOV [css.ebp], EBP - # Save the "return address": we pretend that it's css - self.mc.LEA_rs(eax.value, css) # LEA eax, [css] - frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_sr(frame_ptr, eax.value) # MOV [css.frame], eax - # Set up jf_extra_stack_depth to pretend that the return address - # was at css, and so our stack frame is supposedly shorter by - # (PASS_ON_MY_FRAME-JIT_USE_WORDS+1) words - delta = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS + 1 - self.change_extra_stack_depth = True - self.asm.set_extra_stack_depth(self.mc, -delta * WORD) - css_value = eax + # shadowstack: change 'rpy_fastgil' to 0 (it should be + # non-zero right now). + #self.change_extra_stack_depth = False # # <--here--> would come a memory fence, if the CPU needed one. - self.mc.MOV(heap(fastgil), css_value) + self.mc.MOV(heap(fastgil), imm(0)) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more @@ -184,8 +157,6 @@ self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, THREADLOCAL_OFS - self.get_current_esp()) - if self.asm._is_asmgcc(): - self.mc.AND_ri(self.tlofs_reg.value, ~1) return self.tlofs_reg def save_stack_position(self): @@ -318,13 +289,6 @@ cb = self.callbuilder if not cb.result_value_saved_early: cb.save_result_value(save_edx=False) - if assembler._is_asmgcc(): - if IS_X86_32: - css_value = edx - old_value = ecx - mc.MOV_sr(4, old_value.value) - mc.MOV_sr(0, css_value.value) - # on X86_64, they are already in the right registers mc.CALL(imm(follow_jump(assembler.reacqgil_addr))) if not cb.result_value_saved_early: cb.restore_result_value(save_edx=False) @@ -333,29 +297,10 @@ from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not - # (to acquiring the GIL, remove the asmgcc head from - # the chained list, etc.) + # (to acquiring the GIL) mc = self.mc restore_edx = False - if not self.asm._is_asmgcc(): - css = 0 - css_value = imm(0) - old_value = ecx - else: - from rpython.memory.gctransform import asmgcroot - css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) - if IS_X86_32: - assert css >= 16 - if self.restype == 'L': # long long result: eax/edx - if not self.result_value_saved_early: - mc.MOV_sr(12, edx.value) - restore_edx = True - css_value = edx # note: duplicated in ReacqGilSlowPath - old_value = ecx # - elif IS_X86_64: - css_value = edi - old_value = esi - mc.LEA_rs(css_value.value, css) + old_value = ecx # # Use XCHG as an atomic test-and-set-lock. It also implicitly # does a memory barrier. @@ -365,11 +310,12 @@ else: mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) - mc.CMP(old_value, css_value) + mc.CMP(old_value, imm(0)) # gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap - if bool(gcrootmap) and gcrootmap.is_shadow_stack: + if bool(gcrootmap): from rpython.jit.backend.x86.assembler import heap + assert gcrootmap.is_shadow_stack # # When doing a call_release_gil with shadowstack, there # is the risk that the 'rpy_fastgil' was free but the @@ -406,14 +352,8 @@ if not we_are_translated(): # for testing: now we can accesss mc.SUB(ebp, imm(1)) # ebp again # - # Now that we required the GIL, we can reload a possibly modified ebp - if self.asm._is_asmgcc(): - # special-case: reload ebp from the css - from rpython.memory.gctransform import asmgcroot - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] - #else: - # for shadowstack, done for us by _reload_frame_if_necessary() + # Now that we required the GIL, we will reload a possibly modified ebp: + # this done for us by _reload_frame_if_necessary() def save_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -829,10 +829,7 @@ self.xrm.before_call(save_all_regs=save_all_regs) if gc_level == SAVE_GCREF_REGS: gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap - # we save all the GCREF registers for shadowstack and asmgcc for now - # --- for asmgcc too: we can't say "register x is a gc ref" - # without distinguishing call sites, which we don't do any - # more for now. + # we save all the GCREF registers for shadowstack if gcrootmap: # and gcrootmap.is_shadow_stack: save_all_regs = SAVE_GCREF_REGS self.rm.before_call(save_all_regs=save_all_regs) @@ -940,15 +937,6 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): - # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' - # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. - # We must make sure that edi and esi do not contain GC pointers. - if IS_X86_32 and self.assembler._is_asmgcc(): - for box, loc in self.rm.reg_bindings.items(): - if (loc == edi or loc == esi) and box.type == REF: - self.rm.force_spill_var(box) - assert box not in self.rm.reg_bindings - # args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments v_func = args[1] diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -285,12 +285,15 @@ cases = [8, 16, 24] if WORD == 8: cases.append(32) + bigvalue = 0xAAAAAAAAAAAA + else: + bigvalue = 0xAAAAAAA for i in cases: - box = InputArgInt(0xAAAAAAAAAAAA) + box = InputArgInt(bigvalue) res = self.execute_operation(rop.INT_AND, [box, ConstInt(2 ** i - 1)], 'int') - assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + assert res == bigvalue & (2 ** i - 1) def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -1,4 +1,5 @@ import py, struct +from rpython.rlib.rarithmetic import r_longlong from rpython.jit.backend.x86.rx86 import * globals().update(R.__dict__) @@ -210,8 +211,8 @@ s.MOV_ri(ebx, -0x80000003) s.MOV_ri(r13, -0x80000002) s.MOV_ri(ecx, 42) - s.MOV_ri(r12, 0x80000042) - s.MOV_ri(r12, 0x100000007) + s.MOV_ri(r12, r_longlong(0x80000042)) + s.MOV_ri(r12, r_longlong(0x100000007)) assert s.getvalue() == ('\x48\xC7\xC1\xFE\xFF\xFF\xFF' + '\x49\xC7\xC7\xFD\xFF\xFF\xFF' + '\x48\xBB\xFD\xFF\xFF\x7F\xFF\xFF\xFF\xFF' + diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -1,7 +1,11 @@ +import sys, py import random from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.test import test_rx86_32_auto_encoding +if sys.maxint <= 2**32: + py.test.skip("skipping this test on x86-32") + class TestRx86_64(test_rx86_32_auto_encoding.TestRx86_32): WORD = 8 diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py deleted file mode 100644 --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ /dev/null @@ -1,9 +0,0 @@ -import py -from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests -from rpython.translator.platform import platform as compiler - -if compiler.name == 'msvc': - py.test.skip('asmgcc buggy on msvc') - -class TestAsmGcc(CompileFrameworkTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py --- a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py +++ b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py @@ -1,11 +1,5 @@ from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests -from rpython.translator.platform import platform as compiler class TestShadowStack(ReleaseGILTests): gcrootfinder = "shadowstack" - - -if compiler.name != 'msvc': - class TestAsmGcc(ReleaseGILTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py --- a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py @@ -1,19 +1,12 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC -from rpython.translator.platform import platform as compiler -if compiler.name == 'msvc': - _MSVC = True -else: - _MSVC = False class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest): def _get_TranslationContext(self): t = TranslationContext() t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' - if not _MSVC: - t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True return t diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1156,8 +1156,7 @@ 'CALL_ASSEMBLER/*d/rfin', # call already compiled assembler 'CALL_MAY_FORCE/*d/rfin', 'CALL_LOOPINVARIANT/*d/rfin', - 'CALL_RELEASE_GIL/*d/fin', - # release the GIL and "close the stack" for asmgcc + 'CALL_RELEASE_GIL/*d/fin', # release the GIL around the call 'CALL_PURE/*d/rfin', # removed before it's passed to the backend 'CHECK_MEMORY_ERROR/1/n', # after a CALL: NULL => propagate MemoryError 'CALL_MALLOC_NURSERY/1/r', # nursery malloc, const number of bytes, zeroed diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py deleted file mode 100644 --- a/rpython/memory/gctransform/asmgcroot.py +++ /dev/null @@ -1,870 +0,0 @@ -from rpython.flowspace.model import (Constant, Variable, Block, Link, - copygraph, SpaceOperation, checkgraph) -from rpython.rlib.debug import ll_assert -from rpython.rlib.nonconst import NonConstant -from rpython.rlib import rgil -from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import ( - BaseFrameworkGCTransformer, BaseRootWalker) -from rpython.rtyper.llannotation import SomeAddress -from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import varoftype -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys - - -# -# This transformer avoids the use of a shadow stack in a completely -# platform-specific way, by directing genc to insert asm() special -# instructions in the C source, which are recognized by GCC. -# The .s file produced by GCC is then parsed by trackgcroot.py. -# - -IS_64_BITS = sys.maxint > 2147483647 - -class AsmGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer): - _asmgcc_save_restore_arguments = None - - def push_roots(self, hop, keep_current_args=False): - livevars = self.get_livevars_for_roots(hop, keep_current_args) - self.num_pushs += len(livevars) - return livevars - - def pop_roots(self, hop, livevars): - if not livevars: - return - # mark the values as gc roots - for var in livevars: - v_adr = gen_cast(hop.llops, llmemory.Address, var) - v_newaddr = hop.genop("direct_call", [c_asm_gcroot, v_adr], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) - - def build_root_walker(self): - return AsmStackRootWalker(self) - - def mark_call_cannotcollect(self, hop, name): - hop.genop("direct_call", [c_asm_nocollect, name]) - - def gct_direct_call(self, hop): - # just a sanity check: if we find a fnptr with the hint on the - # _callable, then we'd also find the hint by looking only at the - # graph. We'll actually change this graph only later, in - # start_transforming_graph(). - fnptr = hop.spaceop.args[0].value - try: - close_stack = fnptr._obj._callable._gctransformer_hint_close_stack_ - except AttributeError: - pass - else: - assert fnptr._obj.graph.func is fnptr._obj._callable - BaseFrameworkGCTransformer.gct_direct_call(self, hop) - - def start_transforming_graph(self, graph): - try: - close_stack = graph.func._gctransformer_hint_close_stack_ - except AttributeError: - close_stack = False - if close_stack: - self._transform_hint_close_stack(graph) - - def _transform_hint_close_stack(self, graph): - # We cannot easily pass variable amount of arguments of the call - # across the call to the pypy_asm_stackwalk helper. So we store - # them away and restore them. More precisely, we need to - # replace 'graph' with code that saves the arguments, and make - # a new graph that starts with restoring the arguments. - if self._asmgcc_save_restore_arguments is None: - self._asmgcc_save_restore_arguments = {} - sradict = self._asmgcc_save_restore_arguments - sra = [] # list of pointers to raw-malloced containers for args - seen = {} - ARGS = [v.concretetype for v in graph.getargs()] - for TYPE in ARGS: - if isinstance(TYPE, lltype.Ptr): - TYPE = llmemory.Address - num = seen.get(TYPE, 0) - seen[TYPE] = num + 1 - key = (TYPE, num) - if key not in sradict: - CONTAINER = lltype.FixedSizeArray(TYPE, 1) - p = lltype.malloc(CONTAINER, flavor='raw', zero=True, - immortal=True) - sradict[key] = Constant(p, lltype.Ptr(CONTAINER)) - sra.append(sradict[key]) - # - # make a copy of the graph that will reload the values - graph2 = copygraph(graph) - del graph2.func # otherwise, start_transforming_graph() will - # again transform graph2, and we get an - # infinite loop - # - # edit the original graph to only store the value of the arguments - block = Block(graph.startblock.inputargs) - c_item0 = Constant('item0', lltype.Void) - assert len(block.inputargs) == len(sra) - for v_arg, c_p in zip(block.inputargs, sra): - if isinstance(v_arg.concretetype, lltype.Ptr): - v_adr = varoftype(llmemory.Address) - block.operations.append( - SpaceOperation("cast_ptr_to_adr", [v_arg], v_adr)) - v_arg = v_adr - v_void = varoftype(lltype.Void) - block.operations.append( - SpaceOperation("bare_setfield", [c_p, c_item0, v_arg], v_void)) - # - # call asm_stackwalk(graph2) - RESULT = graph.getreturnvar().concretetype - FUNC2 = lltype.FuncType([], RESULT) - fnptr2 = lltype.functionptr(FUNC2, - graph.name + '_reload', - graph=graph2) - c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2)) - HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2), - ASM_FRAMEDATA_HEAD_PTR], RESULT) - v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk") - block.operations.append( - SpaceOperation("cast_pointer", [c_asm_stackwalk], v_asm_stackwalk)) - v_result = varoftype(RESULT) - block.operations.append( - SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2, - c_gcrootanchor, - Constant(None, lltype.Void)], - v_result)) - block.closeblock(Link([v_result], graph.returnblock)) - graph.startblock = block - # - # edit the copy of the graph to reload the values - block2 = graph2.startblock - block1 = Block([]) - reloadedvars = [] - for v, c_p in zip(block2.inputargs, sra): - v = v.copy() - if isinstance(v.concretetype, lltype.Ptr): - w = varoftype(llmemory.Address) - else: - w = v - block1.operations.append(SpaceOperation('getfield', - [c_p, c_item0], w)) - if w is not v: - block1.operations.append(SpaceOperation('cast_adr_to_ptr', - [w], v)) - reloadedvars.append(v) - block1.closeblock(Link(reloadedvars, block2)) - graph2.startblock = block1 - # - checkgraph(graph) - checkgraph(graph2) - - -class AsmStackRootWalker(BaseRootWalker): - - def __init__(self, gctransformer): - BaseRootWalker.__init__(self, gctransformer) - - def _asm_callback(): - self.walk_stack_from() - self._asm_callback = _asm_callback - self._shape_decompressor = ShapeDecompressor() - self._with_jit = hasattr(gctransformer.translator, '_jit2gc') - if self._with_jit: - jit2gc = gctransformer.translator._jit2gc - self.frame_tid = jit2gc['frame_tid'] - self.gctransformer = gctransformer - # - # unless overridden in need_thread_support(): - self.belongs_to_current_thread = lambda framedata: True - - def need_stacklet_support(self, gctransformer, getfn): - from rpython.annotator import model as annmodel - from rpython.rlib import _stacklet_asmgcc - # stacklet support: BIG HACK for rlib.rstacklet - _stacklet_asmgcc._asmstackrootwalker = self # as a global! argh - _stacklet_asmgcc.complete_destrptr(gctransformer) - # - def gc_detach_callback_pieces(): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - result = llmemory.NULL - framedata = anchor.address[1] - while framedata != anchor: - next = framedata.address[1] - if self.belongs_to_current_thread(framedata): - # detach it - prev = framedata.address[0] - prev.address[1] = next - next.address[0] = prev - # update the global stack counter - rffi.stackcounter.stacks_counter -= 1 - # reattach framedata into the singly-linked list 'result' - framedata.address[0] = rffi.cast(llmemory.Address, -1) - framedata.address[1] = result - result = framedata - framedata = next - return result - # - def gc_reattach_callback_pieces(pieces): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while pieces != llmemory.NULL: - framedata = pieces - pieces = pieces.address[1] - # attach 'framedata' into the normal doubly-linked list - following = anchor.address[1] - following.address[0] = framedata - framedata.address[1] = following - anchor.address[1] = framedata - framedata.address[0] = anchor - # update the global stack counter - rffi.stackcounter.stacks_counter += 1 - # - s_addr = SomeAddress() - s_None = annmodel.s_None - self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, - [], s_addr) - self.gc_reattach_callback_pieces_ptr=getfn(gc_reattach_callback_pieces, - [s_addr], s_None) - - def need_thread_support(self, gctransformer, getfn): - # Threads supported "out of the box" by the rest of the code. - # The whole code in this function is only there to support - # fork()ing in a multithreaded process :-( - # For this, we need to handle gc_thread_start and gc_thread_die - # to record the mapping {thread_id: stack_start}, and - # gc_thread_before_fork and gc_thread_after_fork to get rid of - # all ASM_FRAMEDATA structures that do no belong to the current - # thread after a fork(). - from rpython.rlib import rthread - from rpython.memory.support import AddressDict - from rpython.memory.support import copy_without_null_values - from rpython.annotator import model as annmodel - gcdata = self.gcdata - - def get_aid(): - """Return the thread identifier, cast to an (opaque) address.""" - return llmemory.cast_int_to_adr(rthread.get_ident()) - - def thread_start(): - value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) - gcdata.aid2stack.setitem(get_aid(), value) - thread_start._always_inline_ = True - - def thread_setup(): - gcdata.aid2stack = AddressDict() - gcdata.dead_threads_count = 0 - # to also register the main thread's stack - thread_start() - thread_setup._always_inline_ = True - - def thread_die(): - gcdata.aid2stack.setitem(get_aid(), llmemory.NULL) - # from time to time, rehash the dictionary to remove - # old NULL entries - gcdata.dead_threads_count += 1 - if (gcdata.dead_threads_count & 511) == 0: - copy = copy_without_null_values(gcdata.aid2stack) - gcdata.aid2stack.delete() - gcdata.aid2stack = copy - - def belongs_to_current_thread(framedata): - # xxx obscure: the answer is Yes if, as a pointer, framedata - # lies between the start of the current stack and the top of it. - stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) - ll_assert(stack_start != llmemory.NULL, - "current thread not found in gcdata.aid2stack!") - stack_stop = llmemory.cast_int_to_adr( - llop.stack_current(lltype.Signed)) - return (stack_start <= framedata <= stack_stop or - stack_start >= framedata >= stack_stop) - self.belongs_to_current_thread = belongs_to_current_thread - - def thread_before_fork(): - # before fork(): collect all ASM_FRAMEDATA structures that do - # not belong to the current thread, and move them out of the - # way, i.e. out of the main circular doubly linked list. - detached_pieces = llmemory.NULL - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - while initialframedata != anchor: # while we have not looped back - if not belongs_to_current_thread(initialframedata): - # Unlink it - prev = initialframedata.address[0] - next = initialframedata.address[1] - prev.address[1] = next - next.address[0] = prev - # Link it to the singly linked list 'detached_pieces' - initialframedata.address[0] = detached_pieces - detached_pieces = initialframedata - rffi.stackcounter.stacks_counter -= 1 - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - return detached_pieces - - def thread_after_fork(result_of_fork, detached_pieces): - if result_of_fork == 0: - # We are in the child process. Assumes that only the - # current thread survived. All the detached_pieces - # are pointers in other stacks, so have likely been - # freed already by the multithreaded library. - # Nothing more for us to do. - pass - else: - # We are still in the parent process. The fork() may - # have succeeded or not, but that's irrelevant here. - # We need to reattach the detached_pieces now, to the - # circular doubly linked list at 'gcrootanchor'. The - # order is not important. - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while detached_pieces != llmemory.NULL: - reattach = detached_pieces - detached_pieces = detached_pieces.address[0] - a_next = anchor.address[1] - reattach.address[0] = anchor - reattach.address[1] = a_next - anchor.address[1] = reattach - a_next.address[0] = reattach - rffi.stackcounter.stacks_counter += 1 - - self.thread_setup = thread_setup - self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None, - inline=True) - self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) - self.thread_before_fork_ptr = getfn(thread_before_fork, [], - SomeAddress()) - self.thread_after_fork_ptr = getfn(thread_after_fork, - [annmodel.SomeInteger(), - SomeAddress()], - annmodel.s_None) - # - # check that the order of the need_*() is correct for us: if we - # need both threads and stacklets, need_thread_support() must be - # called first, to initialize self.belongs_to_current_thread. - assert not hasattr(self, 'gc_detach_callback_pieces_ptr') - - def postprocess_graph(self, gct, graph, any_inlining): - pass - - def walk_stack_roots(self, collect_stack_root, is_minor=False): - gcdata = self.gcdata - gcdata._gc_collect_stack_root = collect_stack_root - gcdata._gc_collect_is_minor = is_minor - pypy_asm_stackwalk(llhelper(ASM_CALLBACK_PTR, self._asm_callback), - gcrootanchor) - - def walk_stack_from(self): - curframe = lltype.malloc(WALKFRAME, flavor='raw') - otherframe = lltype.malloc(WALKFRAME, flavor='raw') - - # Walk over all the pieces of stack. They are in a circular linked - # list of structures of 7 words, the 2 first words being prev/next. - # The anchor of this linked list is: - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - stackscount = 0 - while initialframedata != anchor: # while we have not looped back - self.walk_frames(curframe, otherframe, initialframedata) - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - stackscount += 1 - # - # for the JIT: rpy_fastgil may contain an extra framedata - rpy_fastgil = rgil.gil_fetch_fastgil().signed[0] - if rpy_fastgil != 1: - ll_assert(rpy_fastgil != 0, "walk_stack_from doesn't have the GIL") - initialframedata = rffi.cast(llmemory.Address, rpy_fastgil) - # - # very rare issue: initialframedata.address[0] is uninitialized - # in this case, but "retaddr = callee.frame_address.address[0]" - # reads it. If it happens to be exactly a valid return address - # inside the C code, then bad things occur. - initialframedata.address[0] = llmemory.NULL - # - self.walk_frames(curframe, otherframe, initialframedata) - stackscount += 1 - # - expected = rffi.stackcounter.stacks_counter - if NonConstant(0): - rffi.stackcounter.stacks_counter += 42 # hack to force it - ll_assert(not (stackscount < expected), "non-closed stacks around") - ll_assert(not (stackscount > expected), "stacks counter corruption?") - lltype.free(otherframe, flavor='raw') - lltype.free(curframe, flavor='raw') - - def walk_frames(self, curframe, otherframe, initialframedata): - self.fill_initial_frame(curframe, initialframedata) - # Loop over all the frames in the stack - while self.walk_to_parent_frame(curframe, otherframe): - swap = curframe - curframe = otherframe # caller becomes callee - otherframe = swap - - def fill_initial_frame(self, curframe, initialframedata): - # Read the information provided by initialframedata - initialframedata += 2*sizeofaddr #skip the prev/next words at the start - reg = 0 - while reg < CALLEE_SAVED_REGS: - # NB. 'initialframedata' stores the actual values of the - # registers %ebx etc., and if these values are modified - # they are reloaded by pypy_asm_stackwalk(). By contrast, - # 'regs_stored_at' merely points to the actual values - # from the 'initialframedata'. - curframe.regs_stored_at[reg] = initialframedata + reg*sizeofaddr - reg += 1 - curframe.frame_address = initialframedata.address[CALLEE_SAVED_REGS] - - def walk_to_parent_frame(self, callee, caller): - """Starting from 'callee', walk the next older frame on the stack - and fill 'caller' accordingly. Also invokes the collect_stack_root() - callback from the GC code for each GC root found in 'caller'. - """ - # - # The gcmap table is a list of entries, two machine words each: - # void *SafePointAddress; - # int Shape; - # - # A "safe point" is the return address of a call. - # The "shape" of a safe point is a list of integers - # that represent "locations". A "location" can be - # either in the stack or in a register. See - # getlocation() for the decoding of this integer. - # The locations stored in a "shape" are as follows: - # - # * The "location" of the return address. This is just - # after the end of the frame of 'callee'; it is the - # first word of the frame of 'caller' (see picture - # below). - # - # * Four "locations" that specify where the function saves - # each of the four callee-saved registers (%ebx, %esi, - # %edi, %ebp). - # - # * The number of live GC roots around the call. - # - # * For each GC root, an integer that specify where the - # GC pointer is stored. This is a "location" too. - # - # XXX the details are completely specific to X86!!! - # a picture of the stack may help: - # ^ ^ ^ - # | ... | to older frames - # +--------------+ - # | ret addr | <------ caller_frame (addr of retaddr) - # | ... | - # | caller frame | - # | ... | - # +--------------+ - # | ret addr | <------ callee_frame (addr of retaddr) - # | ... | - # | callee frame | - # | ... | lower addresses - # +--------------+ v v v - # - - retaddr = callee.frame_address.address[0] - # - # try to locate the caller function based on retaddr. - # set up self._shape_decompressor. - # - ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0] - self.locate_caller_based_on_retaddr(retaddr, ebp_in_caller) - # - # found! Enumerate the GC roots in the caller frame - # - collect_stack_root = self.gcdata._gc_collect_stack_root - gc = self.gc - while True: - location = self._shape_decompressor.next() - if location == 0: - break - addr = self.getlocation(callee, ebp_in_caller, location) - if gc.points_to_valid_gc_object(addr): - collect_stack_root(gc, addr) - # - # small hack: the JIT reserves THREADLOCAL_OFS's last bit for - # us. We use it to store an "already traced past this frame" - # flag. - if self._with_jit and self.gcdata._gc_collect_is_minor: - if self.mark_jit_frame_can_stop(callee): - return False - # - # track where the caller_frame saved the registers from its own - # caller - # - reg = CALLEE_SAVED_REGS - 1 - while reg >= 0: - location = self._shape_decompressor.next() - addr = self.getlocation(callee, ebp_in_caller, location) - caller.regs_stored_at[reg] = addr - reg -= 1 - - location = self._shape_decompressor.next() - caller.frame_address = self.getlocation(callee, ebp_in_caller, - location) - # we get a NULL marker to mean "I'm the frame - # of the entry point, stop walking" - return caller.frame_address != llmemory.NULL - - def locate_caller_based_on_retaddr(self, retaddr, ebp_in_caller): - gcmapstart = llop.gc_asmgcroot_static(llmemory.Address, 0) - gcmapend = llop.gc_asmgcroot_static(llmemory.Address, 1) - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if not self._shape_decompressor.sorted: - # the item may have been not found because the main array was - # not sorted. Sort it and try again. - win32_follow_gcmap_jmp(gcmapstart, gcmapend) - sort_gcmap(gcmapstart, gcmapend) - self._shape_decompressor.sorted = True - item = search_in_gcmap(gcmapstart, gcmapend, retaddr) - if item: - self._shape_decompressor.setpos(item.signed[1]) - return - - if self._with_jit: - # item not found. We assume that it's a JIT-generated - # location -- but we check for consistency that ebp points - # to a JITFRAME object. - from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - - tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) - if (rffi.cast(lltype.Signed, tid) == - rffi.cast(lltype.Signed, self.frame_tid)): - # fish the depth - extra_stack_depth = (ebp_in_caller + STACK_DEPTH_OFS).signed[0] - ll_assert((extra_stack_depth & (rffi.sizeof(lltype.Signed) - 1)) - == 0, "asmgcc: misaligned extra_stack_depth") - extra_stack_depth //= rffi.sizeof(lltype.Signed) - self._shape_decompressor.setjitframe(extra_stack_depth) - return - llop.debug_fatalerror(lltype.Void, "cannot find gc roots!") - - def getlocation(self, callee, ebp_in_caller, location): - """Get the location in the 'caller' frame of a variable, based - on the integer 'location' that describes it. All locations are - computed based on information saved by the 'callee'. - """ - ll_assert(location >= 0, "negative location") - kind = location & LOC_MASK - offset = location & ~ LOC_MASK - if IS_64_BITS: - offset <<= 1 - if kind == LOC_REG: # register - if location == LOC_NOWHERE: - return llmemory.NULL - reg = (location >> 2) - 1 - ll_assert(reg < CALLEE_SAVED_REGS, "bad register location") - return callee.regs_stored_at[reg] - elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp) - esp_in_caller = callee.frame_address + sizeofaddr - return esp_in_caller + offset - elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp) - return ebp_in_caller + offset - else: # kind == LOC_EBP_MINUS: at -N(%ebp) - return ebp_in_caller - offset - - def mark_jit_frame_can_stop(self, callee): - location = self._shape_decompressor.get_threadlocal_loc() - if location == LOC_NOWHERE: - return False - addr = self.getlocation(callee, llmemory.NULL, location) - # - x = addr.signed[0] - if x & 1: - return True # this JIT stack frame is already marked! - else: - addr.signed[0] = x | 1 # otherwise, mark it but don't stop - return False - - -LOC_REG = 0 -LOC_ESP_PLUS = 1 -LOC_EBP_PLUS = 2 -LOC_EBP_MINUS = 3 -LOC_MASK = 0x03 -LOC_NOWHERE = LOC_REG | 0 - -# ____________________________________________________________ - -sizeofaddr = llmemory.sizeof(llmemory.Address) -arrayitemsize = 2 * sizeofaddr - - -def binary_search(start, end, addr1): - """Search for an element in a sorted array. - - The interval from the start address (included) to the end address - (excluded) is assumed to be a sorted arrays of pairs (addr1, addr2). - This searches for the item with a given addr1 and returns its - address. If not found exactly, it tries to return the address - of the item left of addr1 (i.e. such that result.address[0] < addr1). - """ - count = (end - start) // arrayitemsize - while count > 1: - middleindex = count // 2 - middle = start + middleindex * arrayitemsize - if addr1 < middle.address[0]: - count = middleindex - else: - start = middle - count -= middleindex - return start - -def search_in_gcmap(gcmapstart, gcmapend, retaddr): - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item # found - # 'retaddr' not exactly found. Check that 'item' is the start of a - # compressed range that includes 'retaddr'. - if retaddr > item.address[0] and item.signed[1] < 0: - return item # ok - else: - return llmemory.NULL # failed - -def search_in_gcmap2(gcmapstart, gcmapend, retaddr): - # same as 'search_in_gcmap', but without range checking support - # (item.signed[1] is an address in this case, not a signed at all!) - item = binary_search(gcmapstart, gcmapend, retaddr) - if item.address[0] == retaddr: - return item.address[1] # found - else: - return llmemory.NULL # failed - -def sort_gcmap(gcmapstart, gcmapend): - count = (gcmapend - gcmapstart) // arrayitemsize - qsort(gcmapstart, - rffi.cast(rffi.SIZE_T, count), - rffi.cast(rffi.SIZE_T, arrayitemsize), - c_compare_gcmap_entries) - -def replace_dead_entries_with_nulls(start, end): - # replace the dead entries (null value) with a null key. - count = (end - start) // arrayitemsize - 1 - while count >= 0: - item = start + count * arrayitemsize - if item.address[1] == llmemory.NULL: - item.address[0] = llmemory.NULL - count -= 1 - -if sys.platform == 'win32': - def win32_follow_gcmap_jmp(start, end): - # The initial gcmap table contains addresses to a JMP - # instruction that jumps indirectly to the real code. - # Replace them with the target addresses. - assert rffi.SIGNEDP is rffi.LONGP, "win64 support missing" - while start < end: - code = rffi.cast(rffi.CCHARP, start.address[0])[0] - if code == '\xe9': # jmp - rel32 = rffi.cast(rffi.SIGNEDP, start.address[0]+1)[0] - target = start.address[0] + (rel32 + 5) - start.address[0] = target - start += arrayitemsize -else: - def win32_follow_gcmap_jmp(start, end): - pass - -# ____________________________________________________________ - -class ShapeDecompressor: - _alloc_flavor_ = "raw" - - sorted = False - - def setpos(self, pos): - if pos < 0: - pos = ~ pos # can ignore this "range" marker here - gccallshapes = llop.gc_asmgcroot_static(llmemory.Address, 2) - self.addr = gccallshapes + pos - self.jit_index = -1 - - def setjitframe(self, extra_stack_depth): - self.jit_index = 0 - self.extra_stack_depth = extra_stack_depth - - def next(self): - index = self.jit_index - if index < 0: - # case "outside the jit" - addr = self.addr - value = 0 - while True: - b = ord(addr.char[0]) - addr += 1 - value += b From pypy.commits at gmail.com Sun Dec 15 07:27:42 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 04:27:42 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release Message-ID: <5df626be.1c69fb81.1e2d7.8985@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98295:008914050bae Date: 2019-12-15 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/008914050bae/ Log: merge py3.6 into release diff too long, truncating to 2000 out of 13429 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -57,3 +57,5 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0rc2 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 +e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 +533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -76,6 +76,7 @@ static const long SSL_OP_SINGLE_DH_USE; static const long SSL_OP_EPHEMERAL_RSA; static const long SSL_OP_MICROSOFT_SESS_ID_BUG; +static const long SSL_OP_ENABLE_MIDDLEBOX_COMPAT; static const long SSL_OP_NETSCAPE_CHALLENGE_BUG; static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG; diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -1,4 +1,5 @@ import sys +import os import time import _thread import weakref @@ -83,6 +84,11 @@ OP_NO_SSLv2 = lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 +if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): + # OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + # XXX should be conditionally compiled into lib + OP_ENABLE_MIDDLEBOX_COMPAT = 0x00100000 + SSL_CLIENT = 0 @@ -270,6 +276,20 @@ mode |= lib.SSL_MODE_AUTO_RETRY lib.SSL_set_mode(ssl, mode) + if HAS_TLSv1_3: + if sslctx._post_handshake_auth: + if socket_type == SSL_SERVER: + # bpo-37428: OpenSSL does not ignore SSL_VERIFY_POST_HANDSHAKE. + # Set SSL_VERIFY_POST_HANDSHAKE flag only for server sockets and + # only in combination with SSL_VERIFY_PEER flag. + mode = lib.SSL_CTX_get_verify_mode(lib.SSL_get_SSL_CTX(self.ssl)) + if (mode & lib.SSL_VERIFY_PEER): + verify_cb = lib.SSL_get_verify_callback(self.ssl) + mode |= lib.SSL_VERIFY_POST_HANDSHAKE + lib.SSL_set_verify(ssl, mode, verify_cb) + else: + lib.SSL_set_post_handshake_auth(ssl, 1) + if HAS_SNI and self.server_hostname: name = _str_to_ffi_buffer(self.server_hostname) lib.SSL_set_tlsext_host_name(ssl, name) @@ -687,6 +707,15 @@ else: return None + def verify_client_post_handshake(self): + + if not HAS_TLSv1_3: + raise NotImplementedError("Post-handshake auth is not supported by " + "your OpenSSL version.") + err = lib.SSL_verify_client_post_handshake(self.ssl); + if err == 0: + raise pyssl_error(self, err) + def pending(self): count = lib.SSL_pending(self.ssl) if count < 0: @@ -743,6 +772,7 @@ return bool(lib.SSL_session_reused(self.ssl)) + def _fs_decode(name): return name.decode(sys.getfilesystemencoding()) def _fs_converter(name): @@ -798,13 +828,13 @@ if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): aead = lib.SSL_CIPHER_is_aead(cipher) nid = lib.SSL_CIPHER_get_cipher_nid(cipher) - skcipher = OBJ_nid2ln(nid) if nid != NID_undef else None + skcipher = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_digest_nid(cipher); - digest = OBJ_nid2ln(nid) if nid != NID_undef else None + digest = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_kx_nid(cipher); - kx = OBJ_nid2ln(nid) if nid != NID_undef else None - nid = SSL_CIPHER_get_auth_nid(cipher); - auth = OBJ_nid2ln(nid) if nid != NID_undef else None + kx = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None + nid = lib.SSL_CIPHER_get_auth_nid(cipher); + auth = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None ret.update({'aead' : bool(aead), 'symmmetric' : skcipher, 'digest' : digest, @@ -864,9 +894,8 @@ class _SSLContext(object): __slots__ = ('ctx', '_check_hostname', 'servername_callback', 'alpn_protocols', '_alpn_protocols_handle', - 'npn_protocols', 'set_hostname', + 'npn_protocols', 'set_hostname', '_post_handshake_auth', '_set_hostname_handle', '_npn_protocols_handle') - def __new__(cls, protocol): self = object.__new__(cls) self.ctx = ffi.NULL @@ -943,6 +972,9 @@ if lib.Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST: store = lib.SSL_CTX_get_cert_store(self.ctx) lib.X509_STORE_set_flags(store, lib.X509_V_FLAG_TRUSTED_FIRST) + if HAS_TLSv1_3: + self.post_handshake_auth = 0; + lib.SSL_CTX_set_post_handshake_auth(self.ctx, self.post_handshake_auth) return self @property @@ -1028,6 +1060,7 @@ "CERT_OPTIONAL or CERT_REQUIRED") self._check_hostname = check_hostname + def set_ciphers(self, cipherlist): cipherlistbuf = _str_to_ffi_buffer(cipherlist) ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf) @@ -1238,6 +1271,12 @@ return stats def set_default_verify_paths(self): + if (not os.environ.get('SSL_CERT_FILE') and + not os.environ.get('SSL_CERT_DIR') and + not sys.platform == 'win32'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1357,6 +1396,44 @@ sock = _SSLSocket._new__ssl_socket(self, None, server_side, hostname, incoming, outgoing) return sock + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! @@ -1581,20 +1658,69 @@ lib.RAND_add(buf, len(buf), entropy) def get_default_verify_paths(): + ''' + Find a certificate store and associated values + Returns something like + `('SSL_CERT_FILE', '/usr/lib/ssl/cert.pem', 'SSL_CERT_DIR', '/usr/lib/ssl/certs')` + on Ubuntu and windows10 + + `('SSL_CERT_FILE', '/usr/local/cert.pem', 'SSL_CERT_DIR', '/usr/local/certs')` + on CentOS + + `('SSL_CERT_FILE', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem', + 'SSL_CERT_DIR', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/certs')` + on Darwin + + For portable builds (based on CentOS, but could be running on any glibc + linux) we need to check other locations. The list of places to try was taken + from golang in Dec 2018: + https://golang.org/src/crypto/x509/root_unix.go (for the directories), + https://golang.org/src/crypto/x509/root_linux.go (for the files) + ''' + certFiles = [ + "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", # OpenSUSE + "/etc/pki/tls/cacert.pem", # OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7 + "/etc/ssl/cert.pem", # Alpine Linux + ] + certDirectories = [ + "/etc/ssl/certs", # SLES10/SLES11 + "/system/etc/security/cacerts", # Android + "/usr/local/share/certs", # FreeBSD + "/etc/pki/tls/certs", # Fedora/RHEL + "/etc/openssl/certs", # NetBSD + "/var/ssl/certs", # AIX + ] + + # optimization: reuse the values from a local varaible + if getattr(get_default_verify_paths, 'retval', None): + return get_default_verify_paths.retval + + # This should never fail, it should always return SSL_CERT_FILE and SSL_CERT_DIR ofile_env = _cstr_decode_fs(lib.X509_get_default_cert_file_env()) - if ofile_env is None: - return None + odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) + + # Platform depenedent ofile = _cstr_decode_fs(lib.X509_get_default_cert_file()) - if ofile is None: - return None - odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) - if odir_env is None: - return None odir = _cstr_decode_fs(lib.X509_get_default_cert_dir()) - if odir is None: - return odir - return (ofile_env, ofile, odir_env, odir); + + if os.path.exists(ofile) and os.path.exists(odir): + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + + # OpenSSL didn't supply the goods. Try some other options + for f in certFiles: + if os.path.exists(f): + ofile = f + for f in certDirectories: + if os.path.exists(f): + odir = f + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + @ffi.callback("int(SSL*,unsigned char **,unsigned char *,const unsigned char *,unsigned int,void *)") def select_alpn_callback(ssl, out, outlen, client_protocols, client_protocols_len, args): diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -14,7 +14,7 @@ conf = get_pypy_config() conf.translation.gc = "boehm" with py.test.raises(ConfigError): - conf.translation.gcrootfinder = 'asmgcc' + conf.translation.gcrootfinder = 'shadowstack' def test_frameworkgc(): for name in ["minimark", "semispace"]: diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,16 +1,7 @@ Choose the method used to find the roots in the GC. This only -applies to our framework GCs. You have a choice of two -alternatives: +applies to our framework GCs. - ``--gcrootfinder=shadowstack``: use a so-called "shadow stack", which is an explicitly maintained custom stack of - root pointers. This is the most portable solution. - -- ``--gcrootfinder=asmgcc``: use assembler hackery to find the - roots directly from the normal stack. This is a bit faster, - but platform specific. It works so far with GCC or MSVC, - on i386 and x86-64. It is tested only on Linux - so other platforms (as well as MSVC) may need - various fixes before they can be used. Note asmgcc will be deprecated - at some future date, and does not work with clang. - + root pointers. This is the most portable solution, and also + the only one available now. diff --git a/pypy/doc/contributing.rst b/pypy/doc/contributing.rst --- a/pypy/doc/contributing.rst +++ b/pypy/doc/contributing.rst @@ -311,16 +311,13 @@ directory or even the top level subdirectory ``pypy``. It takes hours and uses huge amounts of RAM and is not recommended. -To run CPython regression tests you can point to the ``lib-python`` -directory:: - - py.test lib-python/2.7/test/test_datetime.py - -This will usually take a long time because this will run -the PyPy Python interpreter on top of CPython. On the plus -side, it's usually still faster than doing a full translation -and running the regression test with the translated PyPy Python -interpreter. +To run CPython regression tests, you should start with a translated PyPy and +run the tests as you would with CPython (see below). You can, however, also +attempt to run the tests before translation, but be aware that it is done with +a hack that doesn't work in all cases and it is usually extremely slow: +``py.test lib-python/2.7/test/test_datetime.py``. Usually, a better idea is to +extract a minimal failing test of at most a few lines, and put it into one of +our own tests in ``pypy/*/test/``. .. _py.test testing tool: http://pytest.org .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage @@ -350,6 +347,11 @@ cpython2 pytest.py -A pypy/module/cpyext/test --python=path/to/pypy3 +To run a test from the standard CPython regression test suite, use the regular +Python way, i.e. (replace "pypy" with the exact binary name, if needed):: + + pypy -m test.test_datetime + Tooling & Utilities ^^^^^^^^^^^^^^^^^^^ diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -133,6 +133,11 @@ * Better support and report MSVC versions used to compile on windows * Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) * Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) +* Support OpenSSL 1.1 and TLSv1_3 +* Remove the (deprecated since 5.7) asmgcc rootfinder from the GC +* Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit + platforms rather than automatically using a ``SignedLongLong``, require an + explicit ``r_int64()`` call instead C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -177,13 +182,18 @@ * Adds encoding, decoding codepages on win32 * Remove socket error attributes from ``_ssl`` (`issue 3119`_) * Add missing ``os.getgrouplist`` (part of `issue 2375`_) +* Back-port the tentative fix from cpython: "Import deadlock detection causes + deadlock" (part of `issue 3111`_) +* Fix handling of ``sys.exc_info()`` in generators +* Return ``W_IntObject`` when converting from ``float`` to ``int`` when + possible, which speeds up many code paths. Python 3.6 C-API ~~~~~~~~~~~~~~~~ * Add ``PyObject_GenericGetDict``, ``PyObject_GenericSetDict``, ``_Py_strhex``, ``_Py_strhex_bytes``, ``PyUnicodeNew``, ``_PyFinalizing``, - ``PySlice_Unpack``, ``PySlice_AdjustIndices`` + ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath`` * Implement ``pystrhex.h`` (`issue 2687`_) * Make ``PyUnicodeObject`` slightly more compact * Fix memory leak when releasing a ``PyUnicodeObject`` @@ -210,6 +220,7 @@ .. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 .. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 .. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3111`: https://bitbucket.com/pypy/pypy/issues/3111 .. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 .. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,5 @@ ============================ .. this is a revision shortly after release-pypy-7.3.0 -.. startrev: dbbbae99135f +.. startrev: 994c42529580 -.. branch: backport-decode_timeval_ns-py3.7 - -Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython diff --git a/pypy/doc/whatsnew-pypy2-7.3.0.rst b/pypy/doc/whatsnew-pypy2-7.3.0.rst --- a/pypy/doc/whatsnew-pypy2-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy2-7.3.0.rst @@ -31,3 +31,11 @@ anonymous struct/unions, cmake fragments for distribution, optimizations for PODs, and faster wrapper calls. +.. branch: backport-decode_timeval_ns-py3.7 + +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. diff --git a/pypy/doc/whatsnew-pypy3-7.3.0.rst b/pypy/doc/whatsnew-pypy3-7.3.0.rst --- a/pypy/doc/whatsnew-pypy3-7.3.0.rst +++ b/pypy/doc/whatsnew-pypy3-7.3.0.rst @@ -17,3 +17,8 @@ .. branch: code_page-utf8 Add encoding, decoding of codepages on windows + +.. branch: py3.6-exc-info-2 + +Fix handling of sys.exc_info() in generators + diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -3,5 +3,5 @@ ========================== .. this is the revision after release-pypy3.6-v7.3.0 -.. startrev: 78b4d0a7cf2e +.. startrev: a56889d5df88 diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -34,7 +34,6 @@ # time it is the exception caught by the topmost 'except ... as e:' # app-level block. self.sys_exc_operror = None - self.previous_operror_stack = [] self.w_tracefunc = None self.is_tracing = 0 self.compiler = space.createcompiler() @@ -248,15 +247,7 @@ # NOTE: the result is not the wrapped sys.exc_info() !!! """ - result = self.sys_exc_operror - if result is None: - i = len(self.previous_operror_stack) - 1 - while i >= 0: - result = self.previous_operror_stack[i] - if result is not None: - break - i -= 1 - return result + return self.sys_exc_operror def set_sys_exc_info(self, operror): self.sys_exc_operror = operror @@ -277,26 +268,6 @@ operror = OperationError(w_type, w_value, tb) self.set_sys_exc_info(operror) - def enter_error_stack_item(self, saved_operr): - # 'sys_exc_operror' should be logically considered as the last - # item on the stack, so pushing a new item has the following effect: - self.previous_operror_stack.append(self.sys_exc_operror) - self.sys_exc_operror = saved_operr - - def leave_error_stack_item(self): - result = self.sys_exc_operror - self.sys_exc_operror = self.previous_operror_stack.pop() - return result - - def fetch_and_clear_error_stack_state(self): - result = self.sys_exc_operror, self.previous_operror_stack - self.sys_exc_operror = None - self.previous_operror_stack = [] - return result - - def restore_error_stack_state(self, saved): - self.sys_exc_operror, self.previous_operror_stack = saved - @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -119,7 +119,10 @@ "can't send non-None value to a just-started %s", self.KIND) ec = space.getexecutioncontext() - ec.enter_error_stack_item(self.saved_operr) + current_exc_info = ec.sys_exc_info() + if self.saved_operr is not None: + ec.set_sys_exc_info(self.saved_operr) + self.saved_operr = None self.running = True try: w_result = frame.execute_frame(w_arg_or_err) @@ -140,7 +143,9 @@ # note: this is not perfectly correct: see # test_exc_info_in_generator_4. But it's simpler and # bug-to-bug compatible with CPython 3.5 and 3.6. - self.saved_operr = ec.leave_error_stack_item() + if frame._any_except_or_finally_handler(): + self.saved_operr = ec.sys_exc_info() + ec.set_sys_exc_info(current_exc_info) return w_result def get_delegate(self): diff --git a/pypy/interpreter/test/apptest_generator.py b/pypy/interpreter/test/apptest_generator.py --- a/pypy/interpreter/test/apptest_generator.py +++ b/pypy/interpreter/test/apptest_generator.py @@ -1,5 +1,7 @@ from pytest import raises, skip +import sys + def test_generator(): def f(): yield 1 @@ -462,7 +464,6 @@ assert closed == [True] def test_exc_info_in_generator(): - import sys def g(): try: raise ValueError @@ -533,8 +534,35 @@ try: raise IndexError except IndexError: - assert next(gen) is 1 - assert next(gen) is 2 + assert next(gen) == 1 + assert next(gen) == 2 + +def test_except_gen_except(): + def gen(): + try: + assert sys.exc_info()[0] is None + yield + # we are called from "except ValueError:", TypeError must + # inherit ValueError in its context + raise TypeError() + except TypeError as exc: + assert sys.exc_info()[0] is TypeError + assert type(exc.__context__) is ValueError + # here we are still called from the "except ValueError:" + assert sys.exc_info()[0] is ValueError + yield + assert sys.exc_info()[0] is None + yield "done" + + g = gen() + next(g) + try: + raise ValueError + except Exception: + next(g) + + assert next(g) == "done" + assert sys.exc_info() == (None, None, None) def test_multiple_invalid_sends(): def mygen(): @@ -793,13 +821,9 @@ yield from map(operator.truediv, [2, 3], [4, 0]) gen = f() assert next(gen) == 0.5 - try: + with raises(ZeroDivisionError) as excinfo: next(gen) - except ZeroDivisionError as e: - assert e.__context__ is not None - assert isinstance(e.__context__, ValueError) - else: - assert False, "should have raised" + assert isinstance(excinfo.value.__context__, ValueError) def test_past_generator_stop(): diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -43,8 +43,7 @@ from rpython.rlib import rgil rgil.acquire() - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) @@ -69,7 +68,6 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) - rffi.stackcounter.stacks_counter -= 1 rgil.release() diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py --- a/pypy/module/_continuation/interp_continuation.py +++ b/pypy/module/_continuation/interp_continuation.py @@ -46,9 +46,9 @@ # global_state.origin = self self.sthread = sthread - saved_error_state = pre_switch(sthread) + saved_exception = pre_switch(sthread) h = sthread.new(new_stacklet_callback) - post_switch(sthread, h, saved_error_state) + post_switch(sthread, h, saved_exception) def switch(self, w_to): sthread = self.sthread @@ -84,9 +84,9 @@ # double switch: the final destination is to.h global_state.destination = to # - saved_error_state = pre_switch(sthread) + saved_exception = pre_switch(sthread) h = sthread.switch(global_state.destination.h) - return post_switch(sthread, h, saved_error_state) + return post_switch(sthread, h, saved_exception) @unwrap_spec(w_value = WrappedDefault(None), w_to = WrappedDefault(None)) @@ -257,9 +257,11 @@ return self.h def pre_switch(sthread): - return sthread.ec.fetch_and_clear_error_stack_state() + saved_exception = sthread.ec.sys_exc_info() + sthread.ec.set_sys_exc_info(None) + return saved_exception -def post_switch(sthread, h, saved_error_state): +def post_switch(sthread, h, saved_exception): origin = global_state.origin self = global_state.destination global_state.origin = None @@ -268,7 +270,7 @@ # current = sthread.ec.topframeref sthread.ec.topframeref = self.bottomframe.f_backref - sthread.ec.restore_error_stack_state(saved_error_state) + sthread.ec.set_sys_exc_info(saved_exception) self.bottomframe.f_backref = origin.bottomframe.f_backref origin.bottomframe.f_backref = current # diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1046,8 +1046,7 @@ else: gilstate = pystate.PyGILState_IGNORE - rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py + llop.gc_stack_bottom(lltype.Void) # marker to enter RPython from C retval = fatal_value boxed_args = () tb = None @@ -1124,7 +1123,6 @@ return fatal_value assert lltype.typeOf(retval) == restype - rffi.stackcounter.stacks_counter -= 1 _restore_gil_state(pygilstate_release, gilstate, gil_release, _gil_auto, tid) return retval diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -62,3 +62,26 @@ w_str = space.repr(w_obj) space.call_method(w_p, "write", w_str) return 0 + + at cpython_api([PyObject], PyObject) +def PyOS_FSPath(space, w_path): + """ + Return the file system representation for path. If the object is a str or + bytes object, then its reference count is incremented. If the object + implements the os.PathLike interface, then __fspath__() is returned as long + as it is a str or bytes object. Otherwise TypeError is raised and NULL is + returned. + """ + if (space.isinstance_w(w_path, space.w_unicode) or + space.isinstance_w(w_path, space.w_bytes)): + return w_path + if not space.lookup(w_path, '__fspath__'): + raise oefmt(space.w_TypeError, + "expected str, bytes or os.PathLike object, not %T", w_path) + w_ret = space.call_method(w_path, '__fspath__') + if (space.isinstance_w(w_ret, space.w_unicode) or + space.isinstance_w(w_ret, space.w_bytes)): + return w_ret + raise oefmt(space.w_TypeError, + "expected %T.__fspath__() to return str or bytes, not %T", w_path, w_ret) + diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -1,6 +1,7 @@ import pytest from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.object import Py_PRINT_RAW +from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi from rpython.tool.udir import udir @@ -70,3 +71,31 @@ out, err = capfd.readouterr() out = out.replace('\r\n', '\n') assert out == "test\n'test\\n'" + + def test_fspath(self, space, api): + w_obj = space.newtext("test") + w_ret = api.PyOS_FSPath(w_obj) + assert space.eq_w(w_ret, w_obj) + + w_obj = space.newint(3) + with pytest.raises(OperationError): + w_ret = api.PyOS_FSPath(w_obj) + + + w_p1 = space.appexec([], '''(): + class Pathlike(): + def __fspath__(self): + return 'test' + return Pathlike()''') + + w_p2 = space.appexec([], '''(): + class UnPathlike(): + def __fspath__(self): + return 42 + return UnPathlike()''') + + w_ret = api.PyOS_FSPath(w_p1) + assert space.eq_w(w_ret, space.newtext('test')) + + with pytest.raises(OperationError): + w_ret = api.PyOS_FSPath(w_p2) diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -156,12 +156,12 @@ Return the floor of x as an int. This is the largest integral value <= x. """ - from pypy.objspace.std.longobject import newlong_from_float + from pypy.objspace.std.floatobject import newint_from_float w_descr = space.lookup(w_x, '__floor__') if w_descr is not None: return space.get_and_call_function(w_descr, w_x) x = _get_double(space, w_x) - return newlong_from_float(space, math.floor(x)) + return newint_from_float(space, math.floor(x)) def sqrt(space, w_x): """sqrt(x) @@ -259,11 +259,11 @@ Return the ceiling of x as an int. This is the smallest integral value >= x. """ - from pypy.objspace.std.longobject import newlong_from_float + from pypy.objspace.std.floatobject import newint_from_float w_descr = space.lookup(w_x, '__ceil__') if w_descr is not None: return space.get_and_call_function(w_descr, w_x) - return newlong_from_float(space, math1_w(space, math.ceil, w_x)) + return newint_from_float(space, math1_w(space, math.ceil, w_x)) def sinh(space, w_x): """sinh(x) diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -30,7 +30,7 @@ # called from the rffi-generated wrapper). The gc_thread_run() # operation will automatically notice that the current thread id was # not seen before, and (in shadowstack) it will allocate and use a -# fresh new stack. Again, this has no effect in asmgcc. +# fresh new stack. # # * Only then does bootstrap() really run. The first thing it does # is grab the start-up information (app-level callable and args) @@ -43,7 +43,7 @@ # thread. # # * Just before a thread finishes, gc_thread_die() is called to free -# its shadow stack. This has no effect in asmgcc. +# its shadow stack. class Bootstrapper(object): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -145,6 +145,15 @@ return space.w_NotImplemented return func_with_new_name(_compare, 'descr_' + opname) +def newint_from_float(space, floatval): + """This is also used from module/math/interp_math.py""" + try: + value = ovfcheck_float_to_int(floatval) + except OverflowError: + return newlong_from_float(space, floatval) + else: + return space.newint(value) + class W_FloatObject(W_Root): """This is a implementation of the app-level 'float' type. @@ -440,12 +449,7 @@ return W_FloatObject(a) def descr_trunc(self, space): - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return newlong_from_float(space, self.floatval) - else: - return space.newint(value) + return newint_from_float(space, self.floatval) def descr_neg(self, space): return W_FloatObject(-self.floatval) @@ -935,7 +939,7 @@ if math.fabs(x - rounded) == 0.5: # halfway case: round to even rounded = 2.0 * rfloat.round_away(x / 2.0) - return newlong_from_float(space, rounded) + return newint_from_float(space, rounded) # interpret 2nd argument as a Py_ssize_t; clip on overflow ndigits = space.getindex_w(w_ndigits, None) diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -5,6 +5,7 @@ import py from pypy.objspace.std.floatobject import W_FloatObject, _remove_underscores +from pypy.objspace.std.intobject import W_IntObject class TestW_FloatObject: @@ -127,6 +128,10 @@ for s in invalid: pytest.raises(ValueError, _remove_underscores, s) +def test_avoid_bigints(space): + w_f = space.newfloat(123.456) + assert isinstance(w_f.descr_trunc(space), W_IntObject) + assert isinstance(w_f.descr___round__(space), W_IntObject) class AppTestAppFloatTest: diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,8 +2,8 @@ pmaj=2 # python main version: 2 or 3 pmin=7 # python minor version maj=7 -min=2 -rev=0rc2 +min=3 +rev=0rc1 case $pmaj in "2") exe=pypy;; diff --git a/rpython/config/test/test_translationoption.py b/rpython/config/test/test_translationoption.py --- a/rpython/config/test/test_translationoption.py +++ b/rpython/config/test/test_translationoption.py @@ -13,13 +13,6 @@ config.translation.gcrootfinder = "shadowstack" py.test.raises(ConflictConfigError, set_opt_level, config, '0') -if compiler.name == 'msvc' or sys.platform == 'darwin': - def test_no_asmgcrot_on_msvc(): - config = get_combined_translation_config() - config.translation.gcrootfinder = "asmgcc" - py.test.raises(ConfigError, set_opt_level, config, 'jit') - - def test_get_translation_config(): from rpython.translator.interactive import Translation from rpython.config import config diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -18,10 +18,6 @@ DEFL_GC = "incminimark" # XXX DEFL_ROOTFINDER_WITHJIT = "shadowstack" -## if sys.platform.startswith("linux"): -## _mach = os.popen('uname -m', 'r').read().strip() -## if _mach.startswith('x86') or _mach in ['i386', 'i486', 'i586', 'i686']: -## DEFL_ROOTFINDER_WITHJIT = "asmgcc" # only for Linux on x86 / x86-64 IS_64_BITS = sys.maxint > 2147483647 @@ -100,13 +96,11 @@ default=IS_64_BITS, cmdline="--gcremovetypeptr"), ChoiceOption("gcrootfinder", "Strategy for finding GC Roots (framework GCs only)", - ["n/a", "shadowstack", "asmgcc"], + ["n/a", "shadowstack"], "shadowstack", cmdline="--gcrootfinder", requires={ "shadowstack": [("translation.gctransformer", "framework")], - "asmgcc": [("translation.gctransformer", "framework"), - ("translation.backend", "c")], }), # other noticeable options @@ -402,10 +396,6 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc - # disallow asmgcc on OS/X and on Win32 - if config.translation.gcrootfinder == "asmgcc": - if sys.platform == "darwin" or sys.platform =="win32": - raise ConfigError("'asmgcc' not supported on this platform") # ---------------------------------------------------------------- diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -438,51 +438,8 @@ @staticmethod @rgc.no_collect - def _reacquire_gil_asmgcc(css, old_rpy_fastgil): - # Before doing an external call, 'rpy_fastgil' is initialized to - # be equal to css. This function is called if we find out after - # the call that it is no longer equal to css. See description - # in translator/c/src/thread_pthread.c. - - # XXX some duplicated logic here, but note that rgil.acquire() - # does more than just RPyGilAcquire() - if old_rpy_fastgil == 0: - # this case occurs if some other thread stole the GIL but - # released it again. What occurred here is that we changed - # 'rpy_fastgil' from 0 to 1, thus successfully reaquiring the - # GIL. - pass - - elif old_rpy_fastgil == 1: - # 'rpy_fastgil' was (and still is) locked by someone else. - # We need to wait for the regular mutex. - from rpython.rlib import rgil - rgil.acquire() - else: - # stole the GIL from a different thread that is also - # currently in an external call from the jit. Attach - # the 'old_rpy_fastgil' into the chained list. - from rpython.memory.gctransform import asmgcroot - oth = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, old_rpy_fastgil) - next = asmgcroot.gcrootanchor.next - oth.next = next - oth.prev = asmgcroot.gcrootanchor - asmgcroot.gcrootanchor.next = oth - next.prev = oth - - # similar to trackgcroot.py:pypy_asm_stackwalk, second part: - # detach the 'css' from the chained list - from rpython.memory.gctransform import asmgcroot - old = rffi.cast(asmgcroot.ASM_FRAMEDATA_HEAD_PTR, css) - prev = old.prev - next = old.next - prev.next = next - next.prev = prev - - @staticmethod - @rgc.no_collect def _reacquire_gil_shadowstack(): - # Simplified version of _reacquire_gil_asmgcc(): in shadowstack mode, + # This used to be more complex for asmgcc. In shadowstack mode, # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) @@ -499,13 +456,10 @@ self._reacquire_gil_shadowstack) self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) else: - reacqgil_func = llhelper(self._REACQGIL2_FUNC, - self._reacquire_gil_asmgcc) - self.reacqgil_addr = self.cpu.cast_ptr_to_int(reacqgil_func) + raise AssertionError("!is_shadow_stack") def _is_asmgcc(self): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - return bool(gcrootmap) and not gcrootmap.is_shadow_stack + return False # legacy def debug_bridge(descr_number, rawstart, codeendpos): diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -21,7 +21,6 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler -from rpython.memory.gctransform import asmgcroot from rpython.jit.codewriter.effectinfo import EffectInfo # ____________________________________________________________ @@ -117,7 +116,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(jitframe.JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth', + 'jf_frame_info', 'jf_gcmap', 'jf_savedata', 'jf_forward']: setattr(descrs, name, cpu.fielddescrof(jitframe.JITFRAME, name)) descrs.jfi_frame_size = cpu.fielddescrof(jitframe.JITFRAMEINFO, @@ -244,15 +243,6 @@ # ____________________________________________________________ # All code below is for the hybrid or minimark GC -class GcRootMap_asmgcc(object): - is_shadow_stack = False - - def __init__(self, gcdescr): - pass - - def register_asm_addr(self, start, mark): - pass - class GcRootMap_shadowstack(object): is_shadow_stack = True diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -49,7 +49,6 @@ rgc.register_custom_trace_hook(JITFRAME, lambda_jitframe_trace) frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info - frame.jf_extra_stack_depth = 0 return frame def jitframe_resolve(frame): @@ -71,8 +70,6 @@ ('jf_force_descr', llmemory.GCREF), # a map of GC pointers ('jf_gcmap', lltype.Ptr(GCMAP)), - # how much we decrease stack pointer. Used around calls and malloc slowpath - ('jf_extra_stack_depth', lltype.Signed), # For the front-end: a GCREF for the savedata ('jf_savedata', llmemory.GCREF), # For GUARD_(NO)_EXCEPTION and GUARD_NOT_FORCED: the exception we @@ -103,7 +100,6 @@ LENGTHOFS = llmemory.arraylengthoffset(JITFRAME.jf_frame) SIGN_SIZE = llmemory.sizeof(lltype.Signed) UNSIGN_SIZE = llmemory.sizeof(lltype.Unsigned) -STACK_DEPTH_OFS = getofs('jf_extra_stack_depth') def jitframe_trace(gc, obj_addr, callback, arg): gc._trace_callback(callback, arg, obj_addr + getofs('jf_descr')) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -584,8 +584,6 @@ length = self.emit_getfield(ConstInt(frame_info), descr=descrs.jfi_frame_depth, raw=True) - self.emit_setfield(frame, self.c_zero, - descr=descrs.jf_extra_stack_depth) self.emit_setfield(frame, self.c_null, descr=descrs.jf_savedata) self.emit_setfield(frame, self.c_null, diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -110,7 +110,7 @@ class config_(object): class translation(object): gc = self.gc - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False class FakeTranslator(object): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -507,7 +507,6 @@ ('jf_frame_info', lltype.Ptr(jitframe.JITFRAMEINFO)), ('jf_descr', llmemory.GCREF), ('jf_force_descr', llmemory.GCREF), - ('jf_extra_stack_depth', lltype.Signed), ('jf_guard_exc', llmemory.GCREF), ('jf_gcmap', lltype.Ptr(jitframe.GCMAP)), ('jf_gc_trace_state', lltype.Signed), @@ -594,7 +593,7 @@ descrs = JitFrameDescrs() descrs.arraydescr = cpu.arraydescrof(JITFRAME) for name in ['jf_descr', 'jf_guard_exc', 'jf_force_descr', - 'jf_frame_info', 'jf_gcmap', 'jf_extra_stack_depth']: + 'jf_frame_info', 'jf_gcmap']: setattr(descrs, name, cpu.fielddescrof(JITFRAME, name)) descrs.jfi_frame_depth = cpu.fielddescrof(jitframe.JITFRAMEINFO, 'jfi_frame_depth') diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -170,7 +170,6 @@ jf_descr = framedescrs.jf_descr jf_guard_exc = framedescrs.jf_guard_exc jf_forward = framedescrs.jf_forward - jf_extra_stack_depth = framedescrs.jf_extra_stack_depth signedframedescr = self.cpu.signedframedescr floatframedescr = self.cpu.floatframedescr casmdescr.compiled_loop_token = clt @@ -386,7 +385,7 @@ class config_(object): class translation(object): gc = 'minimark' - gcrootfinder = 'asmgcc' + gcrootfinder = 'shadowstack' gctransformer = 'framework' gcremovetypeptr = False gcdescr = get_description(config_) @@ -1102,7 +1101,6 @@ p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) - %(setfield('p1', 0, jf_extra_stack_depth))s %(setfield('p1', 'NULL', jf_savedata))s %(setfield('p1', 'NULL', jf_force_descr))s %(setfield('p1', 'NULL', jf_descr))s diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -176,9 +176,6 @@ cls.cbuilder = compile(get_entry(allfuncs), cls.gc, gcrootfinder=cls.gcrootfinder, jit=True, thread=True) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: GcLLDescr_framework.DEBUG = OLD_DEBUG diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -331,9 +331,6 @@ try: res = self.meta_interp(main, [400]) assert res == main(400) - except ConfigError as e: - assert str(e).startswith('invalid value asmgcc') - py.test.skip('asmgcc not supported') finally: del os.environ['PYPYLOG'] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4725,6 +4725,7 @@ def test_raw_load_int(self): from rpython.rlib import rawstorage + from rpython.rlib.rarithmetic import r_longlong for T in [rffi.UCHAR, rffi.SIGNEDCHAR, rffi.USHORT, rffi.SHORT, rffi.UINT, rffi.INT, @@ -4738,7 +4739,7 @@ p = rawstorage.alloc_raw_storage(31) for i in range(31): p[i] = '\xDD' - value = rffi.cast(T, -0x4243444546474849) + value = rffi.cast(T, r_longlong(-0x4243444546474849)) rawstorage.raw_storage_setitem(p, 16, value) got = self.cpu.bh_raw_load_i(rffi.cast(lltype.Signed, p), 16, arraydescr) diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -35,9 +35,7 @@ PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, - # and it can be left here for when it is needed. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and it can be left here for when it is needed. THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 @@ -45,12 +43,10 @@ PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, - # and is moved into this frame location. As an additional hack, - # with asmgcc, it is made odd-valued to mean "already seen this frame - # during the previous minor collection". + # and is moved into this frame location. THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD -assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 +assert PASS_ON_MY_FRAME >= 12 # return address, followed by FRAME_FIXED_SIZE words DEFAULT_FRAME_BYTES = (1 + FRAME_FIXED_SIZE) * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -137,11 +137,6 @@ self.expand_byte_mask_addr = float_constants + 64 self.element_ones = [float_constants + 80 + 16*i for i in range(4)] - def set_extra_stack_depth(self, mc, value): - if self._is_asmgcc(): - extra_ofs = self.cpu.get_ofs_of_frame_field('jf_extra_stack_depth') - mc.MOV_bi(extra_ofs, value) - def build_frame_realloc_slowpath(self): mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats) @@ -161,14 +156,20 @@ mc.MOV_sr(0, ebp.value) # align - self.set_extra_stack_depth(mc, align * WORD) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.set_extra_stack_depth(mc, align * WORD) + self._store_and_reset_exception(mc, None, ebx, ecx) mc.CALL(imm(self.cpu.realloc_frame)) mc.MOV_rr(ebp.value, eax.value) self._restore_exception(mc, None, ebx, ecx) mc.ADD_ri(esp.value, (align - 1) * WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: @@ -196,12 +197,12 @@ # the caller already did push_gcmap(store=True) if IS_X86_64: mc.SUB(esp, imm(WORD)) # alignment - self.set_extra_stack_depth(mc, 2 * WORD) + #self.set_extra_stack_depth(mc, 2 * WORD) # the arguments are already in the correct registers else: # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 8 * WORD) + #self.set_extra_stack_depth(mc, 8 * WORD) # store the arguments at the correct place in the stack for i in range(4): mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) @@ -211,7 +212,7 @@ mc.ADD(esp, imm(WORD)) else: mc.ADD(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller self._pop_all_regs_from_frame(mc, [eax], supports_floats, callee_only) mc.RET() @@ -275,11 +276,11 @@ # (already in edx) # length mc.MOV_rr(esi.value, ecx.value) # tid mc.MOV_rs(edi.value, WORD * 3) # load the itemsize - self.set_extra_stack_depth(mc, 16) + #self.set_extra_stack_depth(mc, 16) mc.CALL(imm(follow_jump(addr))) self._reload_frame_if_necessary(mc) mc.ADD_ri(esp.value, 16 - WORD) - self.set_extra_stack_depth(mc, 0) + #self.set_extra_stack_depth(mc, 0) # mc.TEST_rr(eax.value, eax.value) # common case: not taken @@ -1018,8 +1019,6 @@ from rpython.rlib.rvmprof.rvmprof import cintf # edx = address of pypy_threadlocal_s self.mc.MOV_rs(edx.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(edx.value, ~1) # eax = (our local vmprof_tl_stack).next self.mc.MOV_rs(eax.value, (FRAME_FIXED_SIZE - 4 + 0) * WORD) # save in vmprof_tl_stack the value eax @@ -2236,25 +2235,6 @@ def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) - if self._is_asmgcc(): - # We need to remove the bit "already seen during the - # previous minor collection" instead of passing this - # value directly. - if IS_X86_64: - tmploc = esi # already the correct place - if argloc is tmploc: - # this case is theoretical only so far: in practice, - # argloc is always eax, never esi - self.mc.MOV_rr(edi.value, esi.value) - argloc = edi - else: - tmploc = eax - if tmploc is argloc: - tmploc = edx - self.mc.MOV(tmploc, threadlocal_loc) - self.mc.AND_ri(tmploc.value, ~1) - threadlocal_loc = tmploc - # self.simple_call(addr, [argloc, threadlocal_loc]) def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): @@ -2672,8 +2652,6 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) - if self._is_asmgcc(): - self.mc.AND_ri(resloc.value, ~1) self.load_from_mem(resloc, addr_add_const(resloc, offset), imm(size), imm(sign)) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -61,13 +61,6 @@ self.arglocs = arglocs + [fnloc] self.start_frame_size = self.mc._frame_size - def select_call_release_gil_mode(self): - AbstractCallBuilder.select_call_release_gil_mode(self) - if self.asm._is_asmgcc(): - from rpython.memory.gctransform import asmgcroot - self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS - assert self.stack_max >= 3 - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -103,9 +96,14 @@ # value eax, if necessary assert not self.is_call_release_gil current_esp = self.get_current_esp() - self.change_extra_stack_depth = (current_esp != 0) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, -current_esp) + # + # * Note: these commented-out pieces of code about 'extra_stack_depth' + # * are not necessary any more, but they are kept around in case we + # * need in the future again to track the exact stack depth. + # + #self.change_extra_stack_depth = (current_esp != 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, -current_esp) noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() gcmap = self.asm._regalloc.get_gcmap([eax], noregs=noregs) self.asm.push_gcmap(self.mc, gcmap, store=True) @@ -119,13 +117,14 @@ # top at this point, so reuse it instead of loading it again ssreg = ebx self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) - if self.change_extra_stack_depth: - self.asm.set_extra_stack_depth(self.mc, 0) + #if self.change_extra_stack_depth: + # self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap assert self.is_call_release_gil + assert not self.asm._is_asmgcc() # # Save this thread's shadowstack pointer into 'ebx', # for later comparison @@ -135,38 +134,12 @@ rst = gcrootmap.get_root_stack_top_addr() self.mc.MOV(ebx, heap(rst)) # - if not self.asm._is_asmgcc(): - # shadowstack: change 'rpy_fastgil' to 0 (it should be - # non-zero right now). - self.change_extra_stack_depth = False - # ^^ note that set_extra_stack_depth() in this case is a no-op - css_value = imm(0) - else: - from rpython.memory.gctransform import asmgcroot - # build a 'css' structure on the stack: 2 words for the linkage, - # and 5/7 words as described for asmgcroot.ASM_FRAMEDATA, for a - # total size of JIT_USE_WORDS. This structure is found at - # [ESP+css]. - css = -self.get_current_esp() + ( - WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS)) - assert css >= 2 * WORD - # Save ebp - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - self.mc.MOV_sr(index_of_ebp, ebp.value) # MOV [css.ebp], EBP - # Save the "return address": we pretend that it's css - self.mc.LEA_rs(eax.value, css) # LEA eax, [css] - frame_ptr = css + WORD * (2+asmgcroot.FRAME_PTR) - self.mc.MOV_sr(frame_ptr, eax.value) # MOV [css.frame], eax - # Set up jf_extra_stack_depth to pretend that the return address - # was at css, and so our stack frame is supposedly shorter by - # (PASS_ON_MY_FRAME-JIT_USE_WORDS+1) words - delta = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS + 1 - self.change_extra_stack_depth = True - self.asm.set_extra_stack_depth(self.mc, -delta * WORD) - css_value = eax + # shadowstack: change 'rpy_fastgil' to 0 (it should be + # non-zero right now). + #self.change_extra_stack_depth = False # # <--here--> would come a memory fence, if the CPU needed one. - self.mc.MOV(heap(fastgil), css_value) + self.mc.MOV(heap(fastgil), imm(0)) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more @@ -184,8 +157,6 @@ self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, THREADLOCAL_OFS - self.get_current_esp()) - if self.asm._is_asmgcc(): - self.mc.AND_ri(self.tlofs_reg.value, ~1) return self.tlofs_reg def save_stack_position(self): @@ -318,13 +289,6 @@ cb = self.callbuilder if not cb.result_value_saved_early: cb.save_result_value(save_edx=False) - if assembler._is_asmgcc(): - if IS_X86_32: - css_value = edx - old_value = ecx - mc.MOV_sr(4, old_value.value) - mc.MOV_sr(0, css_value.value) - # on X86_64, they are already in the right registers mc.CALL(imm(follow_jump(assembler.reacqgil_addr))) if not cb.result_value_saved_early: cb.restore_result_value(save_edx=False) @@ -333,29 +297,10 @@ from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not - # (to acquiring the GIL, remove the asmgcc head from - # the chained list, etc.) + # (to acquiring the GIL) mc = self.mc restore_edx = False - if not self.asm._is_asmgcc(): - css = 0 - css_value = imm(0) - old_value = ecx - else: - from rpython.memory.gctransform import asmgcroot - css = WORD * (PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS) - if IS_X86_32: - assert css >= 16 - if self.restype == 'L': # long long result: eax/edx - if not self.result_value_saved_early: - mc.MOV_sr(12, edx.value) - restore_edx = True - css_value = edx # note: duplicated in ReacqGilSlowPath - old_value = ecx # - elif IS_X86_64: - css_value = edi - old_value = esi - mc.LEA_rs(css_value.value, css) + old_value = ecx # # Use XCHG as an atomic test-and-set-lock. It also implicitly # does a memory barrier. @@ -365,11 +310,12 @@ else: mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) - mc.CMP(old_value, css_value) + mc.CMP(old_value, imm(0)) # gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap - if bool(gcrootmap) and gcrootmap.is_shadow_stack: + if bool(gcrootmap): from rpython.jit.backend.x86.assembler import heap + assert gcrootmap.is_shadow_stack # # When doing a call_release_gil with shadowstack, there # is the risk that the 'rpy_fastgil' was free but the @@ -406,14 +352,8 @@ if not we_are_translated(): # for testing: now we can accesss mc.SUB(ebp, imm(1)) # ebp again # - # Now that we required the GIL, we can reload a possibly modified ebp - if self.asm._is_asmgcc(): - # special-case: reload ebp from the css - from rpython.memory.gctransform import asmgcroot - index_of_ebp = css + WORD * (2+asmgcroot.INDEX_OF_EBP) - mc.MOV_rs(ebp.value, index_of_ebp) # MOV EBP, [css.ebp] - #else: - # for shadowstack, done for us by _reload_frame_if_necessary() + # Now that we required the GIL, we will reload a possibly modified ebp: + # this done for us by _reload_frame_if_necessary() def save_result_value(self, save_edx): """Overridden in CallBuilder32 and CallBuilder64""" diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -829,10 +829,7 @@ self.xrm.before_call(save_all_regs=save_all_regs) if gc_level == SAVE_GCREF_REGS: gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap - # we save all the GCREF registers for shadowstack and asmgcc for now - # --- for asmgcc too: we can't say "register x is a gc ref" - # without distinguishing call sites, which we don't do any - # more for now. + # we save all the GCREF registers for shadowstack if gcrootmap: # and gcrootmap.is_shadow_stack: save_all_regs = SAVE_GCREF_REGS self.rm.before_call(save_all_regs=save_all_regs) @@ -940,15 +937,6 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): - # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' - # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. - # We must make sure that edi and esi do not contain GC pointers. - if IS_X86_32 and self.assembler._is_asmgcc(): - for box, loc in self.rm.reg_bindings.items(): - if (loc == edi or loc == esi) and box.type == REF: - self.rm.force_spill_var(box) - assert box not in self.rm.reg_bindings - # args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments v_func = args[1] diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -285,12 +285,15 @@ cases = [8, 16, 24] if WORD == 8: cases.append(32) + bigvalue = 0xAAAAAAAAAAAA + else: + bigvalue = 0xAAAAAAA for i in cases: - box = InputArgInt(0xAAAAAAAAAAAA) + box = InputArgInt(bigvalue) res = self.execute_operation(rop.INT_AND, [box, ConstInt(2 ** i - 1)], 'int') - assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + assert res == bigvalue & (2 ** i - 1) def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] diff --git a/rpython/jit/backend/x86/test/test_rx86.py b/rpython/jit/backend/x86/test/test_rx86.py --- a/rpython/jit/backend/x86/test/test_rx86.py +++ b/rpython/jit/backend/x86/test/test_rx86.py @@ -1,4 +1,5 @@ import py, struct +from rpython.rlib.rarithmetic import r_longlong from rpython.jit.backend.x86.rx86 import * globals().update(R.__dict__) @@ -210,8 +211,8 @@ s.MOV_ri(ebx, -0x80000003) s.MOV_ri(r13, -0x80000002) s.MOV_ri(ecx, 42) - s.MOV_ri(r12, 0x80000042) - s.MOV_ri(r12, 0x100000007) + s.MOV_ri(r12, r_longlong(0x80000042)) + s.MOV_ri(r12, r_longlong(0x100000007)) assert s.getvalue() == ('\x48\xC7\xC1\xFE\xFF\xFF\xFF' + '\x49\xC7\xC7\xFD\xFF\xFF\xFF' + '\x48\xBB\xFD\xFF\xFF\x7F\xFF\xFF\xFF\xFF' + diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -1,7 +1,11 @@ +import sys, py import random from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.test import test_rx86_32_auto_encoding +if sys.maxint <= 2**32: + py.test.skip("skipping this test on x86-32") + class TestRx86_64(test_rx86_32_auto_encoding.TestRx86_32): WORD = 8 diff --git a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py b/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py deleted file mode 100644 --- a/rpython/jit/backend/x86/test/test_zrpy_gcasmgcc.py +++ /dev/null @@ -1,9 +0,0 @@ -import py -from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests -from rpython.translator.platform import platform as compiler - -if compiler.name == 'msvc': - py.test.skip('asmgcc buggy on msvc') - -class TestAsmGcc(CompileFrameworkTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py --- a/rpython/jit/backend/x86/test/test_zrpy_releasegil.py +++ b/rpython/jit/backend/x86/test/test_zrpy_releasegil.py @@ -1,11 +1,5 @@ from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests -from rpython.translator.platform import platform as compiler class TestShadowStack(ReleaseGILTests): gcrootfinder = "shadowstack" - - -if compiler.name != 'msvc': - class TestAsmGcc(ReleaseGILTests): - gcrootfinder = "asmgcc" diff --git a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py --- a/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py +++ b/rpython/jit/backend/x86/test/test_ztranslation_external_exception.py @@ -1,19 +1,12 @@ from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest from rpython.translator.translator import TranslationContext from rpython.config.translationoption import DEFL_GC -from rpython.translator.platform import platform as compiler -if compiler.name == 'msvc': - _MSVC = True -else: - _MSVC = False class TestTranslationRemoveTypePtrX86(TranslationRemoveTypePtrTest): def _get_TranslationContext(self): t = TranslationContext() t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark' - if not _MSVC: - t.config.translation.gcrootfinder = 'asmgcc' t.config.translation.list_comprehension_operations = True t.config.translation.gcremovetypeptr = True return t diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1156,8 +1156,7 @@ 'CALL_ASSEMBLER/*d/rfin', # call already compiled assembler 'CALL_MAY_FORCE/*d/rfin', 'CALL_LOOPINVARIANT/*d/rfin', - 'CALL_RELEASE_GIL/*d/fin', - # release the GIL and "close the stack" for asmgcc + 'CALL_RELEASE_GIL/*d/fin', # release the GIL around the call 'CALL_PURE/*d/rfin', # removed before it's passed to the backend 'CHECK_MEMORY_ERROR/1/n', # after a CALL: NULL => propagate MemoryError 'CALL_MALLOC_NURSERY/1/r', # nursery malloc, const number of bytes, zeroed diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py deleted file mode 100644 --- a/rpython/memory/gctransform/asmgcroot.py +++ /dev/null @@ -1,870 +0,0 @@ -from rpython.flowspace.model import (Constant, Variable, Block, Link, - copygraph, SpaceOperation, checkgraph) -from rpython.rlib.debug import ll_assert -from rpython.rlib.nonconst import NonConstant -from rpython.rlib import rgil -from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import ( - BaseFrameworkGCTransformer, BaseRootWalker) -from rpython.rtyper.llannotation import SomeAddress -from rpython.rtyper.rbuiltin import gen_cast -from rpython.translator.unsimplify import varoftype -from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys - - -# -# This transformer avoids the use of a shadow stack in a completely -# platform-specific way, by directing genc to insert asm() special -# instructions in the C source, which are recognized by GCC. -# The .s file produced by GCC is then parsed by trackgcroot.py. -# - -IS_64_BITS = sys.maxint > 2147483647 - -class AsmGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer): - _asmgcc_save_restore_arguments = None - - def push_roots(self, hop, keep_current_args=False): - livevars = self.get_livevars_for_roots(hop, keep_current_args) - self.num_pushs += len(livevars) - return livevars - - def pop_roots(self, hop, livevars): - if not livevars: - return - # mark the values as gc roots - for var in livevars: - v_adr = gen_cast(hop.llops, llmemory.Address, var) - v_newaddr = hop.genop("direct_call", [c_asm_gcroot, v_adr], - resulttype=llmemory.Address) - hop.genop("gc_reload_possibly_moved", [v_newaddr, var]) - - def build_root_walker(self): - return AsmStackRootWalker(self) - - def mark_call_cannotcollect(self, hop, name): - hop.genop("direct_call", [c_asm_nocollect, name]) - - def gct_direct_call(self, hop): - # just a sanity check: if we find a fnptr with the hint on the - # _callable, then we'd also find the hint by looking only at the - # graph. We'll actually change this graph only later, in - # start_transforming_graph(). - fnptr = hop.spaceop.args[0].value - try: - close_stack = fnptr._obj._callable._gctransformer_hint_close_stack_ - except AttributeError: - pass - else: - assert fnptr._obj.graph.func is fnptr._obj._callable - BaseFrameworkGCTransformer.gct_direct_call(self, hop) - - def start_transforming_graph(self, graph): - try: - close_stack = graph.func._gctransformer_hint_close_stack_ - except AttributeError: - close_stack = False - if close_stack: - self._transform_hint_close_stack(graph) - - def _transform_hint_close_stack(self, graph): - # We cannot easily pass variable amount of arguments of the call - # across the call to the pypy_asm_stackwalk helper. So we store - # them away and restore them. More precisely, we need to - # replace 'graph' with code that saves the arguments, and make - # a new graph that starts with restoring the arguments. - if self._asmgcc_save_restore_arguments is None: - self._asmgcc_save_restore_arguments = {} - sradict = self._asmgcc_save_restore_arguments - sra = [] # list of pointers to raw-malloced containers for args - seen = {} - ARGS = [v.concretetype for v in graph.getargs()] - for TYPE in ARGS: - if isinstance(TYPE, lltype.Ptr): - TYPE = llmemory.Address - num = seen.get(TYPE, 0) - seen[TYPE] = num + 1 - key = (TYPE, num) - if key not in sradict: - CONTAINER = lltype.FixedSizeArray(TYPE, 1) - p = lltype.malloc(CONTAINER, flavor='raw', zero=True, - immortal=True) - sradict[key] = Constant(p, lltype.Ptr(CONTAINER)) - sra.append(sradict[key]) - # - # make a copy of the graph that will reload the values - graph2 = copygraph(graph) - del graph2.func # otherwise, start_transforming_graph() will - # again transform graph2, and we get an - # infinite loop - # - # edit the original graph to only store the value of the arguments - block = Block(graph.startblock.inputargs) - c_item0 = Constant('item0', lltype.Void) - assert len(block.inputargs) == len(sra) - for v_arg, c_p in zip(block.inputargs, sra): - if isinstance(v_arg.concretetype, lltype.Ptr): - v_adr = varoftype(llmemory.Address) - block.operations.append( - SpaceOperation("cast_ptr_to_adr", [v_arg], v_adr)) - v_arg = v_adr - v_void = varoftype(lltype.Void) - block.operations.append( - SpaceOperation("bare_setfield", [c_p, c_item0, v_arg], v_void)) - # - # call asm_stackwalk(graph2) - RESULT = graph.getreturnvar().concretetype - FUNC2 = lltype.FuncType([], RESULT) - fnptr2 = lltype.functionptr(FUNC2, - graph.name + '_reload', - graph=graph2) - c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2)) - HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2), - ASM_FRAMEDATA_HEAD_PTR], RESULT) - v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk") - block.operations.append( - SpaceOperation("cast_pointer", [c_asm_stackwalk], v_asm_stackwalk)) - v_result = varoftype(RESULT) - block.operations.append( - SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2, - c_gcrootanchor, - Constant(None, lltype.Void)], - v_result)) - block.closeblock(Link([v_result], graph.returnblock)) - graph.startblock = block - # - # edit the copy of the graph to reload the values - block2 = graph2.startblock - block1 = Block([]) - reloadedvars = [] - for v, c_p in zip(block2.inputargs, sra): - v = v.copy() - if isinstance(v.concretetype, lltype.Ptr): - w = varoftype(llmemory.Address) - else: - w = v - block1.operations.append(SpaceOperation('getfield', - [c_p, c_item0], w)) - if w is not v: - block1.operations.append(SpaceOperation('cast_adr_to_ptr', - [w], v)) - reloadedvars.append(v) - block1.closeblock(Link(reloadedvars, block2)) - graph2.startblock = block1 - # - checkgraph(graph) - checkgraph(graph2) - - -class AsmStackRootWalker(BaseRootWalker): - - def __init__(self, gctransformer): - BaseRootWalker.__init__(self, gctransformer) - - def _asm_callback(): - self.walk_stack_from() - self._asm_callback = _asm_callback - self._shape_decompressor = ShapeDecompressor() - self._with_jit = hasattr(gctransformer.translator, '_jit2gc') - if self._with_jit: - jit2gc = gctransformer.translator._jit2gc - self.frame_tid = jit2gc['frame_tid'] - self.gctransformer = gctransformer - # - # unless overridden in need_thread_support(): - self.belongs_to_current_thread = lambda framedata: True - - def need_stacklet_support(self, gctransformer, getfn): - from rpython.annotator import model as annmodel - from rpython.rlib import _stacklet_asmgcc - # stacklet support: BIG HACK for rlib.rstacklet - _stacklet_asmgcc._asmstackrootwalker = self # as a global! argh - _stacklet_asmgcc.complete_destrptr(gctransformer) - # - def gc_detach_callback_pieces(): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - result = llmemory.NULL - framedata = anchor.address[1] - while framedata != anchor: - next = framedata.address[1] - if self.belongs_to_current_thread(framedata): - # detach it - prev = framedata.address[0] - prev.address[1] = next - next.address[0] = prev - # update the global stack counter - rffi.stackcounter.stacks_counter -= 1 - # reattach framedata into the singly-linked list 'result' - framedata.address[0] = rffi.cast(llmemory.Address, -1) - framedata.address[1] = result - result = framedata - framedata = next - return result - # - def gc_reattach_callback_pieces(pieces): - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - while pieces != llmemory.NULL: - framedata = pieces - pieces = pieces.address[1] - # attach 'framedata' into the normal doubly-linked list - following = anchor.address[1] - following.address[0] = framedata - framedata.address[1] = following - anchor.address[1] = framedata - framedata.address[0] = anchor - # update the global stack counter - rffi.stackcounter.stacks_counter += 1 - # - s_addr = SomeAddress() - s_None = annmodel.s_None - self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, - [], s_addr) - self.gc_reattach_callback_pieces_ptr=getfn(gc_reattach_callback_pieces, - [s_addr], s_None) - - def need_thread_support(self, gctransformer, getfn): - # Threads supported "out of the box" by the rest of the code. - # The whole code in this function is only there to support - # fork()ing in a multithreaded process :-( - # For this, we need to handle gc_thread_start and gc_thread_die - # to record the mapping {thread_id: stack_start}, and - # gc_thread_before_fork and gc_thread_after_fork to get rid of - # all ASM_FRAMEDATA structures that do no belong to the current - # thread after a fork(). - from rpython.rlib import rthread - from rpython.memory.support import AddressDict - from rpython.memory.support import copy_without_null_values - from rpython.annotator import model as annmodel - gcdata = self.gcdata - - def get_aid(): - """Return the thread identifier, cast to an (opaque) address.""" - return llmemory.cast_int_to_adr(rthread.get_ident()) - - def thread_start(): - value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed)) - gcdata.aid2stack.setitem(get_aid(), value) - thread_start._always_inline_ = True - - def thread_setup(): - gcdata.aid2stack = AddressDict() - gcdata.dead_threads_count = 0 - # to also register the main thread's stack - thread_start() - thread_setup._always_inline_ = True - - def thread_die(): - gcdata.aid2stack.setitem(get_aid(), llmemory.NULL) - # from time to time, rehash the dictionary to remove - # old NULL entries - gcdata.dead_threads_count += 1 - if (gcdata.dead_threads_count & 511) == 0: - copy = copy_without_null_values(gcdata.aid2stack) - gcdata.aid2stack.delete() - gcdata.aid2stack = copy - - def belongs_to_current_thread(framedata): - # xxx obscure: the answer is Yes if, as a pointer, framedata - # lies between the start of the current stack and the top of it. - stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL) - ll_assert(stack_start != llmemory.NULL, - "current thread not found in gcdata.aid2stack!") - stack_stop = llmemory.cast_int_to_adr( - llop.stack_current(lltype.Signed)) - return (stack_start <= framedata <= stack_stop or - stack_start >= framedata >= stack_stop) - self.belongs_to_current_thread = belongs_to_current_thread - - def thread_before_fork(): - # before fork(): collect all ASM_FRAMEDATA structures that do - # not belong to the current thread, and move them out of the - # way, i.e. out of the main circular doubly linked list. - detached_pieces = llmemory.NULL - anchor = llmemory.cast_ptr_to_adr(gcrootanchor) - initialframedata = anchor.address[1] - while initialframedata != anchor: # while we have not looped back - if not belongs_to_current_thread(initialframedata): - # Unlink it - prev = initialframedata.address[0] - next = initialframedata.address[1] - prev.address[1] = next - next.address[0] = prev - # Link it to the singly linked list 'detached_pieces' - initialframedata.address[0] = detached_pieces - detached_pieces = initialframedata - rffi.stackcounter.stacks_counter -= 1 - # Then proceed to the next piece of stack - initialframedata = initialframedata.address[1] - return detached_pieces - - def thread_after_fork(result_of_fork, detached_pieces): - if result_of_fork == 0: - # We are in the child process. Assumes that only the - # current thread survived. All the detached_pieces - # are pointers in other stacks, so have likely been - # freed already by the multithreaded library. - # Nothing more for us to do. - pass - else: - # We are still in the parent process. The fork() may From pypy.commits at gmail.com Mon Dec 16 00:37:05 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 21:37:05 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v7.3.0rc2 for changeset 285307a0f5a7 Message-ID: <5df71801.1c69fb81.2165e.648a@mx.google.com> Author: Matti Picus Branch: Changeset: r98296:5b24dd282506 Date: 2019-12-16 07:35 +0200 http://bitbucket.org/pypy/pypy/changeset/5b24dd282506/ Log: Added tag release-pypy2.7-v7.3.0rc2 for changeset 285307a0f5a7 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -59,3 +59,4 @@ 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 +285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 From pypy.commits at gmail.com Mon Dec 16 00:37:07 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 21:37:07 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy3.6-v7.3.0rc2 for changeset 008914050bae Message-ID: <5df71803.1c69fb81.f3344.8433@mx.google.com> Author: Matti Picus Branch: Changeset: r98297:8f12789e7b8b Date: 2019-12-16 07:35 +0200 http://bitbucket.org/pypy/pypy/changeset/8f12789e7b8b/ Log: Added tag release-pypy3.6-v7.3.0rc2 for changeset 008914050bae diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -60,3 +60,4 @@ e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 +008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 From pypy.commits at gmail.com Mon Dec 16 00:53:38 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 21:53:38 -0800 (PST) Subject: [pypy-commit] pypy py3.6: fix bad merge Message-ID: <5df71be2.1c69fb81.c44dd.06a9@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98298:4d6be0690edb Date: 2019-12-16 07:51 +0200 http://bitbucket.org/pypy/pypy/changeset/4d6be0690edb/ Log: fix bad merge diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -76,7 +76,6 @@ static const long SSL_OP_SINGLE_DH_USE; static const long SSL_OP_EPHEMERAL_RSA; static const long SSL_OP_MICROSOFT_SESS_ID_BUG; -static const long SSL_OP_ENABLE_MIDDLEBOX_COMPAT; static const long SSL_OP_NETSCAPE_CHALLENGE_BUG; static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG; From pypy.commits at gmail.com Mon Dec 16 00:53:39 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 15 Dec 2019 21:53:39 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into branch Message-ID: <5df71be3.1c69fb81.97a6b.746c@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98299:16b2e4be93b9 Date: 2019-12-16 07:52 +0200 http://bitbucket.org/pypy/pypy/changeset/16b2e4be93b9/ Log: merge py3.6 into branch diff --git a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py --- a/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py +++ b/lib_pypy/_cffi_ssl/_cffi_src/openssl/ssl.py @@ -76,7 +76,6 @@ static const long SSL_OP_SINGLE_DH_USE; static const long SSL_OP_EPHEMERAL_RSA; static const long SSL_OP_MICROSOFT_SESS_ID_BUG; -static const long SSL_OP_ENABLE_MIDDLEBOX_COMPAT; static const long SSL_OP_NETSCAPE_CHALLENGE_BUG; static const long SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG; static const long SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG; From pypy.commits at gmail.com Mon Dec 16 13:09:38 2019 From: pypy.commits at gmail.com (arigo) Date: Mon, 16 Dec 2019 10:09:38 -0800 (PST) Subject: [pypy-commit] pypy py3.6: hg merge default Message-ID: <5df7c862.1c69fb81.8b393.17d0@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r98301:ae244c1519cf Date: 2019-12-16 19:09 +0100 http://bitbucket.org/pypy/pypy/changeset/ae244c1519cf/ Log: hg merge default diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -59,3 +59,5 @@ 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 +285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 +008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -83,6 +83,8 @@ self.space = space self.all_primitives = [None] * cffi_opcode._NUM_PRIM self.file_struct = None + self.lock = None + self.lock_owner = 0 self.rec_level = 0 def get_file_struct(self): @@ -90,6 +92,33 @@ self.file_struct = ctypestruct.W_CTypeStruct(self.space, "FILE") return self.file_struct + def __enter__(self): + # This is a simple recursive lock implementation + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + if tid != self.lock_owner: + if self.lock is None: + self.lock = self.space.allocate_lock() + self.lock.acquire(True) + assert self.lock_owner == 0 + assert self.rec_level == 0 + self.lock_owner = tid + self.rec_level += 1 + + def __exit__(self, *args): + assert self.rec_level > 0 + self.rec_level -= 1 + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + assert tid == self.lock_owner + if self.rec_level == 0: + self.lock_owner = 0 + self.lock.release() + def get_primitive_type(ffi, num): space = ffi.space @@ -408,17 +437,20 @@ return ffi.cached_types[index] realize_cache = ffi.space.fromcache(RealizeCache) - if realize_cache.rec_level >= 1000: - raise oefmt(ffi.space.w_RuntimeError, - "type-building recursion too deep or infinite. " - "This is known to occur e.g. in ``struct s { void(*callable)" - "(struct s); }''. Please report if you get this error and " - "really need support for your case.") - realize_cache.rec_level += 1 - try: + with realize_cache: + # + # check again cached_types, which might have been filled while + # we were waiting for the recursive lock + if from_ffi and ffi.cached_types[index] is not None: + return ffi.cached_types[index] + + if realize_cache.rec_level > 1000: + raise oefmt(ffi.space.w_RuntimeError, + "type-building recursion too deep or infinite. " + "This is known to occur e.g. in ``struct s { void(*callable)" + "(struct s); }''. Please report if you get this error and " + "really need support for your case.") x = realize_c_type_or_func_now(ffi, op, opcodes, index) - finally: - realize_cache.rec_level -= 1 if from_ffi: assert ffi.cached_types[index] is None or ffi.cached_types[index] is x From pypy.commits at gmail.com Mon Dec 16 13:25:55 2019 From: pypy.commits at gmail.com (arigo) Date: Mon, 16 Dec 2019 10:25:55 -0800 (PST) Subject: [pypy-commit] pypy default: oops, fix for 853267f08f60: the final update of cached_types should occur while Message-ID: <5df7cc33.1c69fb81.45561.1e4c@mx.google.com> Author: Armin Rigo Branch: Changeset: r98302:36cf5ebfa3ec Date: 2019-12-16 19:25 +0100 http://bitbucket.org/pypy/pypy/changeset/36cf5ebfa3ec/ Log: oops, fix for 853267f08f60: the final update of cached_types should occur while we still have the lock (maybe it works anyway because of the GIL, but better safe than sorry) diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -452,9 +452,10 @@ "really need support for your case.") x = realize_c_type_or_func_now(ffi, op, opcodes, index) - if from_ffi: - assert ffi.cached_types[index] is None or ffi.cached_types[index] is x - ffi.cached_types[index] = x + if from_ffi: + old = ffi.cached_types[index] + assert old is None or old is x + ffi.cached_types[index] = x return x From pypy.commits at gmail.com Mon Dec 16 13:26:31 2019 From: pypy.commits at gmail.com (arigo) Date: Mon, 16 Dec 2019 10:26:31 -0800 (PST) Subject: [pypy-commit] pypy py3.6: hg merge default Message-ID: <5df7cc57.1c69fb81.49139.3f75@mx.google.com> Author: Armin Rigo Branch: py3.6 Changeset: r98303:15207108964c Date: 2019-12-16 19:26 +0100 http://bitbucket.org/pypy/pypy/changeset/15207108964c/ Log: hg merge default diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -452,9 +452,10 @@ "really need support for your case.") x = realize_c_type_or_func_now(ffi, op, opcodes, index) - if from_ffi: - assert ffi.cached_types[index] is None or ffi.cached_types[index] is x - ffi.cached_types[index] = x + if from_ffi: + old = ffi.cached_types[index] + assert old is None or old is x + ffi.cached_types[index] = x return x From pypy.commits at gmail.com Tue Dec 17 07:27:20 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 17 Dec 2019 04:27:20 -0800 (PST) Subject: [pypy-commit] pypy meth_fastcall: add python3.6 version of METH_FASTCALL, only becomes official in 3.8 Message-ID: <5df8c9a8.1c69fb81.ac5f4.d693@mx.google.com> Author: Matti Picus Branch: meth_fastcall Changeset: r98304:10a5fb96ac84 Date: 2019-12-17 14:26 +0200 http://bitbucket.org/pypy/pypy/changeset/10a5fb96ac84/ Log: add python3.6 version of METH_FASTCALL, only becomes official in 3.8 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -128,7 +128,7 @@ constant_names = """ Py_TPFLAGS_READY Py_TPFLAGS_READYING METH_COEXIST METH_STATIC METH_CLASS Py_TPFLAGS_BASETYPE Py_MAX_FMT -METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O +METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O METH_FASTCALL Py_TPFLAGS_HEAPTYPE Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_MAX_NDIMS Py_CLEANUP_SUPPORTED diff --git a/pypy/module/cpyext/include/methodobject.h b/pypy/module/cpyext/include/methodobject.h --- a/pypy/module/cpyext/include/methodobject.h +++ b/pypy/module/cpyext/include/methodobject.h @@ -27,6 +27,11 @@ #define METH_COEXIST 0x0040 +/* In python3.7 this is equivalent to METH_FASTCALL | METH_KEYWORDS, + and is used with _PyCFunctionFast which becomes in 3.7 + _PyCFunctionFastWithKeywords*/ +#define METH_FASTCALL 0x0080 + #define PyCFunction_New(ml, self) PyCFunction_NewEx((ml), (self), NULL) /* Macros for direct access to these values. Type checks are *not* diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.typeobject import W_TypeObject from pypy.module.cpyext.api import ( CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O, - METH_STATIC, METH_VARARGS, PyObject, bootstrap_function, + METH_STATIC, METH_VARARGS, METH_FASTCALL, PyObject, bootstrap_function, cpython_api, generic_cpy_call, CANNOT_FAIL, slot_function, cts, build_type_checkers) from pypy.module.cpyext.pyobject import ( @@ -21,6 +21,7 @@ PyMethodDef = cts.gettype('PyMethodDef') PyCFunction = cts.gettype('PyCFunction') PyCFunctionKwArgs = cts.gettype('PyCFunctionWithKeywords') +_PyCFunctionFast = cts.gettype('_PyCFunctionFast') PyCFunctionObject = cts.gettype('PyCFunctionObject*') @bootstrap_function @@ -57,6 +58,11 @@ space.setitem(w_kwargs, space.newtext(key), w_obj) return w_kwargs +def w_names_from_args(space, __args__): + if __args__.keywords is None: + return [] + return [space.newtext(x) for x in __args__.keywords] + def undotted_name(name): """Return the last component of a dotted name""" dotpos = name.rfind('.') @@ -106,12 +112,14 @@ def call(self, space, w_self, __args__): flags = self.flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST) length = len(__args__.arguments_w) - if not flags & METH_KEYWORDS and __args__.keywords: + if flags & METH_FASTCALL: + return self.call_fastcall(space, w_self, __args__) + elif flags & METH_KEYWORDS: + return self.call_keywords(space, w_self, __args__) + elif __args__.keywords: raise oefmt(space.w_TypeError, "%s() takes no keyword arguments", self.name) - if flags & METH_KEYWORDS: - return self.call_keywords(space, w_self, __args__) - elif flags & METH_NOARGS: + if flags & METH_NOARGS: if length == 0: return self.call_noargs(space, w_self, __args__) raise oefmt(space.w_TypeError, @@ -154,6 +162,30 @@ finally: decref(space, py_args) + def call_fastcall(self, space, w_self, __args__): + func = rffi.cast(_PyCFunctionFast, self.ml.c_ml_meth) + args_w = __args__.arguments_w + names = w_names_from_args(space, __args__) + nargs = len(__args__.arguments_w) + with lltype.scoped_alloc(rffi.CArray(PyObject), nargs + len(names)) as args: + i = 0 + py_names = None + for w_arg in args_w: + args[i] = make_ref(space, w_arg) + i += 1 + if names: + for w_val in __args__.keywords_w: + args[i] = make_ref(space, w_val) + i += 1 + py_names = tuple_from_args_w(space, names) + try: + return generic_cpy_call(space, func, w_self, args, nargs, py_names) + finally: + for arg in args: + decref(space, arg) + if py_names: + decref(space, py_names) + def get_doc(self, space): c_doc = self.ml.c_ml_doc if c_doc: diff --git a/pypy/module/cpyext/parse/cpyext_object.h b/pypy/module/cpyext/parse/cpyext_object.h --- a/pypy/module/cpyext/parse/cpyext_object.h +++ b/pypy/module/cpyext/parse/cpyext_object.h @@ -176,6 +176,8 @@ /* from methodobject.h */ typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); +typedef PyObject *(*_PyCFunctionFast) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *, PyObject *); typedef PyObject *(*PyNoArgsFunction)(PyObject *); diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -89,6 +89,45 @@ assert mod.getarg_KW.__name__ == "getarg_KW" assert mod.getarg_KW(*(), **{}) == ((), {}) + def test_call_METH_FAST(self): + import sys + if sys.version_info[:2] != (3, 6): + skip('Python 3.6 only test, Python 3.7+ is different') + mod = self.import_extension('foo', [ + ('getarg_FAST', 'METH_FASTCALL', + ''' + int kwlen, i; + PyObject *pyargs; + if (kwnames == NULL) { + kwlen = 0; + } else { + kwlen = PySequence_Size(kwnames); + } + fprintf(stderr, "got %ld args, %d kwnames\\n", nargs, kwlen); + pyargs = PyTuple_New(nargs + kwlen); + for (i=0; iob_refcnt); + } + if (kwnames == NULL) { + return Py_BuildValue("Oi", pyargs, nargs); + } else { + fprintf(stderr, "got kwnames\\n"); + return Py_BuildValue("OiO", pyargs, nargs, kwnames); + } + ''' + ), + ]) + assert mod.getarg_FAST(1) == ((1,), 1) + assert mod.getarg_FAST(1, 2) == ((1, 2), 2) + assert mod.getarg_FAST(a=3, b=4) == ((3, 4), 0, ('a', 'b')) + assert mod.getarg_FAST(1, 2, a=3, b=4) == ((1, 2, 3, 4), 2, ('a', 'b')) + assert mod.getarg_FAST.__name__ == "getarg_FAST" + assert mod.getarg_FAST(*(), **{}) == ((), 0) def test_func_attributes(self): mod = self.import_extension('MyModule', [ diff --git a/pypy/tool/cpyext/extbuild.py b/pypy/tool/cpyext/extbuild.py --- a/pypy/tool/cpyext/extbuild.py +++ b/pypy/tool/cpyext/extbuild.py @@ -118,7 +118,9 @@ codes = [] for funcname, flags, code in functions: cfuncname = "%s_%s" % (modname, funcname) - if 'METH_KEYWORDS' in flags: + if 'METH_FASTCALL' in flags: + signature = '(PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames)' + elif 'METH_KEYWORDS' in flags: signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' else: signature = '(PyObject *self, PyObject *args)' From pypy.commits at gmail.com Tue Dec 17 10:42:10 2019 From: pypy.commits at gmail.com (antocuni) Date: Tue, 17 Dec 2019 07:42:10 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: start a blog post about the hpy sprint Message-ID: <5df8f752.1c69fb81.dec1c.16e1@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5966:48e199e316c7 Date: 2019-12-17 16:41 +0100 http://bitbucket.org/pypy/extradoc/changeset/48e199e316c7/ Log: start a blog post about the hpy sprint diff --git a/blog/draft/2019-12-hpy-sprint.rst b/blog/draft/2019-12-hpy-sprint.rst new file mode 100644 --- /dev/null +++ b/blog/draft/2019-12-hpy-sprint.rst @@ -0,0 +1,165 @@ +HPy kick-off sprint report +=========================== + +Recently Antonio, Armin and Ronan had a small internal sprint in the beautiful +city of Gdańsk to kick-off the development of HPy. Here is a brief report of +what it has been accomplished during the sprint. + +What is HPy? +------------ + +The TL;DR answer is "a better way to write C extensions for Python". + +The idea of HPy was born during EuroPython 2019 in Basel, where there was an +informal meeting which included core developers of PyPy, CPython (Victor +Stinner and Mark Shannon) and Cython (Stefan Behnel). + +All of us agreeded that the current design of the CPython C API is problematic +for various reasons and, in particular, because it is too tied to the current +internal design of CPython. The end result is that: + + - alternative implementations of Python (such as PyPy, but not only) have a + `hard time` to load and execute existing C extensions; + + - CPython itself is unable to change some of its internal implementation + details without breaking the world. For example, as of today it would be + impossible to switch from using reference counting to using a real GC, + which in turns make it hard for example to remove the GIL, as gilectomy_ + attempted. + +HPy tries to address these issues by following two major design guidelines: + + 1. objects are referenced and passed around using opaque handles, which are + similar to e.g. file descriptors in spirit. Multiple, different handles + can point to the same underlying object, handles can be duplicated and + each handle must be closed independently. + + 2. The internal data structures and C-level layout of objects are not + visible nor accessible using the API, so each implementation if free to + use what fits best. + +The other major design goal of HPy is to allow an incremental +transition/porting, so existing module can migrate their codebase one method +at a time. Moreover, Cython eventually will generate HPy code, so extension +module written in Cython will be able to benefit from HPy automatically. + +More details can be found in the README of the official `HPy repository`_. + +.. _`hard time`: https://morepypy.blogspot.com/2018/09/inside-cpyext-why-emulating-cpython-c.html +.. _gilectomy: https://pythoncapi.readthedocs.io/gilectomy.html +.. _`HPy repository`: https://github.com/pyhandle/hpy + + +CPython and universal target ABI +--------------------------------- + +When compiling an HPy extension you can choose two different target ABI: + + - **CPython ABI**: in this case, ``hpy.h`` contains a set of macros and + static inline functions which translates at compilation time the HPy API + into the standard C-API: the compiled module will have no performance + penalty and it will have an filename like + ``foo.cpython-37m-x86_64-linux-gnu.so``. + + - **Universal HPy ABI**: as the name implies, extension modules compiled + this way are "universal" and can be loaded unmodified by multiple Python + interpreters and version. Moreover, it will be possible to dynamically + enable a special debug mode which will make it easy to find e.g. unclosed + handles or memory leaks, **without having to recompile the extension**. + + +Universal modules can be loaded **also** on CPython, thanks to the +``hpy_universal`` module which is under development: because of an extra layer +of indirection, extensions compiled with the universal ABI will face a small +performance penalty compared to the ones using the CPython ABI. + +This setup gives several benefits: + + - extension developers can use the extra debug features given by the + Universal ABI with no need to use a special debug version of Python + + - projects which need the maximum level of performance can compile their + extension for each relevant version of CPython, as they are doing now + + - projects for which runtime speed is less important will have the choice of + distributing a single binary which will work on any version of Python + + + +A simple example +----------------- + +The HPy repo contains a `proof of concept`_ module. Here is a simplified +version which illustrates how an HPy module looks like: + +.. sourcecode:: C + + #include "hpy.h" + + HPy_DEF_METH_VARARGS(add_ints) + static HPy add_ints_impl(HPyContext ctx, HPy self, HPy *args, HPy_ssize_t nargs) + { + long a, b; + if (!HPyArg_Parse(ctx, args, nargs, "ll", &a, &b)) + return HPy_NULL; + return HPyLong_FromLong(ctx, a+b); + } + + + static HPyMethodDef PofMethods[] = { + {"add_ints", add_ints, HPy_METH_VARARGS, ""}, + {NULL, NULL, 0, NULL} + }; + + static HPyModuleDef moduledef = { + HPyModuleDef_HEAD_INIT, + .m_name = "pof", + .m_doc = "HPy Proof of Concept", + .m_size = -1, + .m_methods = PofMethods + }; + + + HPy_MODINIT(pof) + static HPy init_pof_impl(HPyContext ctx) + { + HPy m; + m = HPyModule_Create(ctx, &moduledef); + if (HPy_IsNull(m)) + return HPy_NULL; + return m; + } + + +People who are familiar with the current C-API will surely notice lots of +similarities. The biggest differences are: + + - Instead of ``PyObject *``, objects have the type ``HPy``, which as + explained above represents a handle. + + - You need to explicitly pass an ``HPyContext`` around: the intent is + primary to be future-proof and make it easier to implement things like + sub- interpreters. + + - ``HPy_METH_VARARGS`` is implemented differently than CPython's + ``METH_VARARGS``: in particular, these methods receive an array of ``HPy`` + and its length, instead of a fully constructed tuple: passing a tuple + makes sense on CPython where you have it anyway, but it might be an + unnecessary burden for alternate implementations. Note that this is + similar to the new `METH_FASTCALL` which was introduced in CPython. + + - HPy relies a lot on C macros, which most of the time are needed to support + the CPython ABI compilation mode. For example, ``HPy_DEF_METH_VARARGS`` + expands into a trampoline which has the correct C signature that CPython + expects (i.e., ``PyObject (*)(PyObject *self, *PyObject *args)``) and + which calls ``add_ints_impl``. + + +.. _`proof of concept`: https://github.com/pyhandle/hpy/blob/master/proof-of-concept/pof.c +.. _`METH_FASTCALL`: https://www.python.org/dev/peps/pep-0580/ + + +Sprint report and current status +--------------------------------- + +XXX finish me From pypy.commits at gmail.com Tue Dec 17 12:49:15 2019 From: pypy.commits at gmail.com (antocuni) Date: Tue, 17 Dec 2019 09:49:15 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: finish the draft Message-ID: <5df9151b.1c69fb81.f594a.5d61@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5968:1634e069aabb Date: 2019-12-17 18:43 +0100 http://bitbucket.org/pypy/extradoc/changeset/1634e069aabb/ Log: finish the draft diff --git a/blog/draft/2019-12-hpy-sprint.rst b/blog/draft/2019-12-hpy-sprint.rst --- a/blog/draft/2019-12-hpy-sprint.rst +++ b/blog/draft/2019-12-hpy-sprint.rst @@ -50,12 +50,12 @@ .. _`HPy repository`: https://github.com/pyhandle/hpy -CPython and universal target ABI ---------------------------------- +Target ABI +----------- When compiling an HPy extension you can choose two different target ABI: - - **CPython ABI**: in this case, ``hpy.h`` contains a set of macros and + - **HPy/CPython ABI**: in this case, ``hpy.h`` contains a set of macros and static inline functions which translates at compilation time the HPy API into the standard C-API: the compiled module will have no performance penalty and it will have an filename like @@ -71,7 +71,7 @@ Universal modules can be loaded **also** on CPython, thanks to the ``hpy_universal`` module which is under development: because of an extra layer of indirection, extensions compiled with the universal ABI will face a small -performance penalty compared to the ones using the CPython ABI. +performance penalty compared to the ones using the HPy/CPython ABI. This setup gives several benefits: @@ -149,7 +149,7 @@ similar to the new `METH_FASTCALL` which was introduced in CPython. - HPy relies a lot on C macros, which most of the time are needed to support - the CPython ABI compilation mode. For example, ``HPy_DEF_METH_VARARGS`` + the HPy/CPython ABI compilation mode. For example, ``HPy_DEF_METH_VARARGS`` expands into a trampoline which has the correct C signature that CPython expects (i.e., ``PyObject (*)(PyObject *self, *PyObject *args)``) and which calls ``add_ints_impl``. @@ -162,4 +162,157 @@ Sprint report and current status --------------------------------- -XXX finish me +After this long preamble, here is a rough list of what we accomplished during +the week-long sprint and the days immediatly after. + +On the HPy side, We kicked-off the code in the repo: at the moment of writing +the layout of the directories is a bit messy because we moved things around +several times, but identified several main sections: + + 1. A specification of the API which serves both as documentation and as an + input for parts of the projects which are automatically + generated. Currently, this lives `public_api.h`_. + + 2. A set of header files which can be used to compile extension module: + depending on whether the flag ``-DHPY_UNIVERSAL_ABI`` is passed to the + compiler, the extension can target the `HPy/CPython ABI`_ or the `HPy + Universal ABI`_ + + 3. A `CPython extension module`_ called ``hpy_universal`` which makes it + possible to import universal modules on CPython + + 4. A set of tests_ which are independent of the implementation and are meant + to be an "executable specification" of the semantics. Currently, these + tests are run against three different implementations of the HPy API: + + - the headers which implements the "HPy/CPython ABI" + + - the ``hpy_universal`` module for CPython + + - the ``hpy_universal`` module for PyPy (these tests are run in the PyPy repo) + +Moreover, we started a `PyPy branch`_ in which to implement the +``hpy_univeral`` module: at the moment of writing PyPy can pass all the HPy +tests apart the ones which allows to convert to and from ``PyObject *``. +Among the other things, this means that it is already possible to load the +very same binary module in both CPython and PyPy, which is impressive on its +own :). + +Finally, we wanted a real-life use case to show how to port a module to HPy +and to do benchmarks. After some searching, we choose ultrajson_, for the +following reasons: + + - it is a real-world extension module which was written with performance in + mind + + - when parsing a JSON file it does a lot of calls to the Python API to + construct the various parts of the result message + + - it uses only a small subset of the Python API + +This repo contains the `HPy port of ultrajson`. This commit_ shows an example +of how the porting looks like. + +``ujson_hpy`` is also a very good example of incremental migration: so far +only ``ujson.loads`` is implemented using the HPy API, while ``ujson.dumps`` +is still implemented using the old C-API, and both can coexist nicely in the +same compiled module. + + +.. _`public_api.h`: https://github.com/pyhandle/hpy/blob/9aa8a2738af3fd2eda69d4773b319d10a9a5373f/tools/public_api.h +.. _`CPython extension module`: https://github.com/pyhandle/hpy/tree/9aa8a2738af3fd2eda69d4773b319d10a9a5373f/cpython-universal/src +.. _`HPy/CPython ABI`: https://github.com/pyhandle/hpy/blob/9aa8a2738af3fd2eda69d4773b319d10a9a5373f/hpy-api/hpy_devel/include/cpython/hpy.h +.. _`HPy Universal ABI`: https://github.com/pyhandle/hpy/blob/9aa8a2738af3fd2eda69d4773b319d10a9a5373f/hpy-api/hpy_devel/include/universal/hpy.h +.. _tests: https://github.com/pyhandle/hpy/tree/9aa8a2738af3fd2eda69d4773b319d10a9a5373f/test + +.. _`PyPy branch`: https://bitbucket.org/pypy/pypy/src/hpy/pypy/module/hpy_universal/ + +.. _ultrajson: https://github.com/esnme/ultrajson +.. _`HPy port of ultrajson`: https://github.com/pyhandle/ultrajson-hpy +.. _commit: https://github.com/pyhandle/ultrajson-hpy/commit/efb35807afa8cf57db5df6a3dfd4b64c289fe907 + + +Benchmarks +----------- + +Once we have a fully working ``ujson_hpy`` module, we can finally run +benchmarks! We tested several different versions of the module: + + - ``ujson``: this is the vanilla implementation of ultrajson using the + C-API. On PyPy this is executed by the infamous ``cpyext`` compatibility + layer, so we expect it to be much slower than on CPython + + - ``ujson_hpy``: our HPy port compiled to target the HPy/CPython ABI. We + expect it to be as fast as ``ujson`` + + - ``ujson_hpy_universal``: same as above but compiled to target the + Universal HPy ABI. We expect it to be slightly slower than ``ujson`` on + CPython, and much faster on PyPy. + +Finally, we also ran the benchmark using the builtin ``json`` module. This is +not really relevant to HPy, but it might still be an interesting as a +reference data point. + +The benchmark_ is very simple and consists of parsing a `big JSON file`_ 100 +times. Here is the average time per iteration (in milliseconds) using the +various versions of the module, CPython 3.7 and the latest version of the hpy +PyPy branch: + ++---------------------+---------+--------+ +| | CPython | PyPy | ++---------------------+---------+--------+ +| ujson | 154.32 | 633.97 | ++---------------------+---------+--------+ +| ujson_hpy | 152.19 | | ++---------------------+---------+--------+ +| ujson_hpy_universal | 168.78 | 207.68 | ++---------------------+---------+--------+ +| json | 224.59 | 135.43 | ++---------------------+---------+--------+ + +As expected, the benchmark proves that when targeting the HPy/CPython ABI, HPy +doesn't impose any performance penalty on CPython. The universal version is +~10% slower on CPython, but gives an impressive 3x speedup on PyPy! It it +worth noting that the PyPy hpy module is not fully optimized yet, and we +expect to be able to reach the same performance as CPython for this particular +example (or even more, thanks to our better GC). + +All in all, not a bad result for two weeks of intense hacking :) + +It is also worth noting than PyPy's builtin ``json`` module does **really** +well in this benchmark, thanks to the recent optimizations that were described +in an `earlier blog post`_. + + +.. _benchmark: https://github.com/pyhandle/ultrajson-hpy/blob/hpy/benchmark/main.py +.. _`big JSON file`: https://github.com/pyhandle/ultrajson-hpy/blob/hpy/benchmark/download_data.sh +.. _`earlier blog post`: https://morepypy.blogspot.com/2019/10/pypys-new-json-parser.html + + +Conclusion and future directions +--------------------------------- + +We think we can be very satisfied about what we have got so far. The +development of HPy just started but these early results seem to indicate that +we are on the right track to bring Python extensions into the future. + +At the moment, we can anticipate some of the next steps in the development of +HPy: + + - think about a proper API design: what we have done so far has + been a "dumb" translation of the API we needed to run ``ujson``. However, + one of the declared goal of HPy is to improve the design of the API. There + will be a trade-off between the desire of having a clean, fresh new API + and the need to be not too different than the old one, to make porting + easier. Finding the sweet spot will not be easy! + + - implement the "debug" mode, which will help developers to find + bugs such as leaking handles or using invalid handles + + - instruct Cython to emit HPy code on request + + - eventually, we will also want to try to port parts of ``numpy`` to HPy to + finally solve the long-standing problem of sub-optimal ``numpy`` + performance in PyPy + +Stay tuned! From pypy.commits at gmail.com Tue Dec 17 12:49:18 2019 From: pypy.commits at gmail.com (antocuni) Date: Tue, 17 Dec 2019 09:49:18 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: merge heads Message-ID: <5df9151e.1c69fb81.85225.9452@mx.google.com> Author: Antonio Cuni Branch: extradoc Changeset: r5969:f70d7fc3055f Date: 2019-12-17 18:49 +0100 http://bitbucket.org/pypy/extradoc/changeset/f70d7fc3055f/ Log: merge heads diff --git a/blog/draft/2019-12-hpy-sprint.rst b/blog/draft/2019-12-hpy-sprint.rst --- a/blog/draft/2019-12-hpy-sprint.rst +++ b/blog/draft/2019-12-hpy-sprint.rst @@ -3,7 +3,7 @@ Recently Antonio, Armin and Ronan had a small internal sprint in the beautiful city of Gdańsk to kick-off the development of HPy. Here is a brief report of -what it has been accomplished during the sprint. +what was accomplished during the sprint. What is HPy? ------------ @@ -14,12 +14,12 @@ informal meeting which included core developers of PyPy, CPython (Victor Stinner and Mark Shannon) and Cython (Stefan Behnel). -All of us agreeded that the current design of the CPython C API is problematic +All of us agreed that the current design of the CPython C API is problematic for various reasons and, in particular, because it is too tied to the current internal design of CPython. The end result is that: - alternative implementations of Python (such as PyPy, but not only) have a - `hard time` to load and execute existing C extensions; + `hard time`_ loading and executing existing C extensions; - CPython itself is unable to change some of its internal implementation details without breaking the world. For example, as of today it would be @@ -30,16 +30,16 @@ HPy tries to address these issues by following two major design guidelines: 1. objects are referenced and passed around using opaque handles, which are - similar to e.g. file descriptors in spirit. Multiple, different handles + similar to e.g., file descriptors in spirit. Multiple, different handles can point to the same underlying object, handles can be duplicated and - each handle must be closed independently. + each handle must be released independently of any other duplicate. 2. The internal data structures and C-level layout of objects are not visible nor accessible using the API, so each implementation if free to use what fits best. The other major design goal of HPy is to allow an incremental -transition/porting, so existing module can migrate their codebase one method +transition/porting, so existing modules can migrate their codebase one method at a time. Moreover, Cython eventually will generate HPy code, so extension module written in Cython will be able to benefit from HPy automatically. @@ -53,10 +53,10 @@ Target ABI ----------- -When compiling an HPy extension you can choose two different target ABI: +When compiling an HPy extension you can choose two different target ABIs: - **HPy/CPython ABI**: in this case, ``hpy.h`` contains a set of macros and - static inline functions which translates at compilation time the HPy API + static inline functions which at compilation time translates the HPy API into the standard C-API: the compiled module will have no performance penalty and it will have an filename like ``foo.cpython-37m-x86_64-linux-gnu.so``. @@ -64,14 +64,15 @@ - **Universal HPy ABI**: as the name implies, extension modules compiled this way are "universal" and can be loaded unmodified by multiple Python interpreters and version. Moreover, it will be possible to dynamically - enable a special debug mode which will make it easy to find e.g. unclosed + enable a special debug mode which will make it easy to find e.g., open handles or memory leaks, **without having to recompile the extension**. Universal modules can be loaded **also** on CPython, thanks to the -``hpy_universal`` module which is under development: because of an extra layer -of indirection, extensions compiled with the universal ABI will face a small -performance penalty compared to the ones using the HPy/CPython ABI. +``hpy_universal`` module which is under development. An extra layer of +indirection enables loading extensions compiled with the universal ABI. Users +of ``hpy_universal`` will face a small performance penalty compared to the ones +using the HPy/CPython ABI. This setup gives several benefits: @@ -82,15 +83,15 @@ extension for each relevant version of CPython, as they are doing now - projects for which runtime speed is less important will have the choice of - distributing a single binary which will work on any version of Python - + distributing a single binary which will work on any version and + implementation of Python A simple example ----------------- The HPy repo contains a `proof of concept`_ module. Here is a simplified -version which illustrates how an HPy module looks like: +version which illustrates what a HPy module looks like: .. sourcecode:: C From pypy.commits at gmail.com Tue Dec 17 13:08:50 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 17 Dec 2019 10:08:50 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <5df919b2.1c69fb81.7f345.e365@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5970:128bb83a6384 Date: 2019-12-17 20:08 +0200 http://bitbucket.org/pypy/extradoc/changeset/128bb83a6384/ Log: tweaks diff --git a/blog/draft/2019-12-hpy-sprint.rst b/blog/draft/2019-12-hpy-sprint.rst --- a/blog/draft/2019-12-hpy-sprint.rst +++ b/blog/draft/2019-12-hpy-sprint.rst @@ -38,10 +38,10 @@ visible nor accessible using the API, so each implementation if free to use what fits best. -The other major design goal of HPy is to allow an incremental -transition/porting, so existing modules can migrate their codebase one method -at a time. Moreover, Cython eventually will generate HPy code, so extension -module written in Cython will be able to benefit from HPy automatically. +The other major design goal of HPy is to allow incremental transition and +porting, so existing modules can migrate their codebase one method at a time. +Moreover, Cython eventually will generate HPy code, so extension module written +in Cython will be able to benefit from HPy automatically. More details can be found in the README of the official `HPy repository`_. @@ -53,22 +53,22 @@ Target ABI ----------- -When compiling an HPy extension you can choose two different target ABIs: +When compiling an HPy extension you can choose one of two different target ABIs: - **HPy/CPython ABI**: in this case, ``hpy.h`` contains a set of macros and - static inline functions which at compilation time translates the HPy API - into the standard C-API: the compiled module will have no performance - penalty and it will have an filename like + static inline functions. At compilation time this translates the HPy API + into the standard C-API. The compiled module will have no performance + penalty, and it will have a "standard" filename like ``foo.cpython-37m-x86_64-linux-gnu.so``. - **Universal HPy ABI**: as the name implies, extension modules compiled this way are "universal" and can be loaded unmodified by multiple Python - interpreters and version. Moreover, it will be possible to dynamically + interpreters and versions. Moreover, it will be possible to dynamically enable a special debug mode which will make it easy to find e.g., open handles or memory leaks, **without having to recompile the extension**. -Universal modules can be loaded **also** on CPython, thanks to the +Universal modules can **also** be loaded on CPython, thanks to the ``hpy_universal`` module which is under development. An extra layer of indirection enables loading extensions compiled with the universal ABI. Users of ``hpy_universal`` will face a small performance penalty compared to the ones @@ -76,15 +76,15 @@ This setup gives several benefits: - - extension developers can use the extra debug features given by the - Universal ABI with no need to use a special debug version of Python + - Extension developers can use the extra debug features given by the + Universal ABI with no need to use a special debug version of Python. - - projects which need the maximum level of performance can compile their - extension for each relevant version of CPython, as they are doing now + - Projects which need the maximum level of performance can compile their + extension for each relevant version of CPython, as they are doing now. - - projects for which runtime speed is less important will have the choice of + - Projects for which runtime speed is less important will have the choice of distributing a single binary which will work on any version and - implementation of Python + implementation of Python. A simple example @@ -132,7 +132,7 @@ } -People who are familiar with the current C-API will surely notice lots of +People who are familiar with the current C-API will surely notice many similarities. The biggest differences are: - Instead of ``PyObject *``, objects have the type ``HPy``, which as @@ -174,7 +174,7 @@ input for parts of the projects which are automatically generated. Currently, this lives `public_api.h`_. - 2. A set of header files which can be used to compile extension module: + 2. A set of header files which can be used to compile extension modules: depending on whether the flag ``-DHPY_UNIVERSAL_ABI`` is passed to the compiler, the extension can target the `HPy/CPython ABI`_ or the `HPy Universal ABI`_ @@ -194,7 +194,7 @@ Moreover, we started a `PyPy branch`_ in which to implement the ``hpy_univeral`` module: at the moment of writing PyPy can pass all the HPy -tests apart the ones which allows to convert to and from ``PyObject *``. +tests apart the ones which allow conversion to and from ``PyObject *``. Among the other things, this means that it is already possible to load the very same binary module in both CPython and PyPy, which is impressive on its own :). @@ -212,7 +212,7 @@ - it uses only a small subset of the Python API This repo contains the `HPy port of ultrajson`. This commit_ shows an example -of how the porting looks like. +of what the porting looks like. ``ujson_hpy`` is also a very good example of incremental migration: so far only ``ujson.loads`` is implemented using the HPy API, while ``ujson.dumps`` @@ -294,26 +294,26 @@ --------------------------------- We think we can be very satisfied about what we have got so far. The -development of HPy just started but these early results seem to indicate that +development of HPy is quite new, but these early results seem to indicate that we are on the right track to bring Python extensions into the future. At the moment, we can anticipate some of the next steps in the development of HPy: - - think about a proper API design: what we have done so far has + - Think about a proper API design: what we have done so far has been a "dumb" translation of the API we needed to run ``ujson``. However, one of the declared goal of HPy is to improve the design of the API. There will be a trade-off between the desire of having a clean, fresh new API and the need to be not too different than the old one, to make porting easier. Finding the sweet spot will not be easy! - - implement the "debug" mode, which will help developers to find - bugs such as leaking handles or using invalid handles + - Implement the "debug" mode, which will help developers to find + bugs such as leaking handles or using invalid handles. - - instruct Cython to emit HPy code on request + - Instruct Cython to emit HPy code on request. - - eventually, we will also want to try to port parts of ``numpy`` to HPy to + - Eventually, we will also want to try to port parts of ``numpy`` to HPy to finally solve the long-standing problem of sub-optimal ``numpy`` - performance in PyPy + performance in PyPy. Stay tuned! From pypy.commits at gmail.com Tue Dec 17 13:12:56 2019 From: pypy.commits at gmail.com (rlamy) Date: Tue, 17 Dec 2019 10:12:56 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Merged in olliemath/pypy/py3.6 (pull request #687) Message-ID: <5df91aa8.1c69fb81.5e60c.4800@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98308:efd0ab3f167c Date: 2019-12-17 18:12 +0000 http://bitbucket.org/pypy/pypy/changeset/efd0ab3f167c/ Log: Merged in olliemath/pypy/py3.6 (pull request #687) Py3.6 diff --git a/extra_tests/test_datetime.py b/extra_tests/test_datetime.py --- a/extra_tests/test_datetime.py +++ b/extra_tests/test_datetime.py @@ -350,3 +350,31 @@ d2 = d.replace(hour=7) assert type(d2) is MyDatetime assert d2 == datetime.datetime(2016, 4, 5, 7, 2, 3) + +def test_normalize_pair(): + normalize = datetime._normalize_pair + + assert normalize(1, 59, 60) == (1, 59) + assert normalize(1, 60, 60) == (2, 0) + assert normalize(1, 95, 60) == (2, 35) + +def test_normalize_date(): + normalize = datetime._normalize_date + + # Huge year is caught correctly + with pytest.raises(OverflowError): + normalize(1000 * 1000, 1, 1) + # Normal dates should be unchanged + assert normalize(3000, 1, 1) == (3000, 1, 1) + # Month overflows year boundary + assert normalize(2001, 24, 1) == (2002, 12, 1) + # Day overflows month boundary + assert normalize(2001, 14, 31) == (2002, 3, 3) + # Leap years? :S + assert normalize(2001, 1, 61) == (2001, 3, 2) + assert normalize(2000, 1, 61) == (2000, 3, 1) + +def test_normalize_datetime(): + normalize = datetime._normalize_datetime + abnormal = (2002, 13, 35, 30, 95, 75, 1000001) + assert normalize(*abnormal) == (2003, 2, 5, 7, 36, 16, 1) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1416,9 +1416,13 @@ self.__setstate(year, month) self._hashcode = -1 return self - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) + elif isinstance(year, tuple) and len(year) == 7: + # Internal operation - numbers guaranteed to be valid + year, month, day, hour, minute, second, microsecond = year + else: + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) _check_tzinfo_arg(tzinfo) self = dateinterop.__new__(cls) self._year = int(year) @@ -1890,20 +1894,18 @@ "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - delta = timedelta(self.toordinal(), - hours=self._hour, - minutes=self._minute, - seconds=self._second, - microseconds=self._microsecond) - delta += other - hour, rem = divmod(delta.seconds, 3600) - minute, second = divmod(rem, 60) - if 0 < delta.days <= _MAXORDINAL: - return datetime.combine(date.fromordinal(delta.days), - time(hour, minute, second, - delta.microseconds, - tzinfo=self._tzinfo)) - raise OverflowError("result out of range") + + result = _normalize_datetime( + self._year, + self._month, + self._day + other.days, + self._hour, + self._minute, + self._second + other.seconds, + self._microsecond + other.microseconds, + ) + + return datetime(result, tzinfo=self._tzinfo) __radd__ = __add__ @@ -2000,6 +2002,65 @@ datetime.resolution = timedelta(microseconds=1) +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo + + +def _normalize_datetime(y, m, d, hh, mm, ss, us): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d) + return y, m, d, hh, mm, ss, us + + +def _normalize_date(year, month, day): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 + + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) + + if not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day + + def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently From pypy.commits at gmail.com Tue Dec 17 13:13:08 2019 From: pypy.commits at gmail.com (olliemath) Date: Tue, 17 Dec 2019 10:13:08 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Speed up py3 datetime __add__ method. Message-ID: <5df91ab4.1c69fb81.ae3f4.92b7@mx.google.com> Author: olliemath Branch: py3.6 Changeset: r98305:513c179fed54 Date: 2019-12-13 23:44 +0000 http://bitbucket.org/pypy/pypy/changeset/513c179fed54/ Log: Speed up py3 datetime __add__ method. diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1416,9 +1416,13 @@ self.__setstate(year, month) self._hashcode = -1 return self - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) + elif isinstance(year, tuple) and len(year) == 7: + # Internal operation - numbers guaranteed to be valid + year, month, day, hour, minute, second, microsecond = year + else: + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) _check_tzinfo_arg(tzinfo) self = dateinterop.__new__(cls) self._year = int(year) @@ -1890,20 +1894,18 @@ "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - delta = timedelta(self.toordinal(), - hours=self._hour, - minutes=self._minute, - seconds=self._second, - microseconds=self._microsecond) - delta += other - hour, rem = divmod(delta.seconds, 3600) - minute, second = divmod(rem, 60) - if 0 < delta.days <= _MAXORDINAL: - return datetime.combine(date.fromordinal(delta.days), - time(hour, minute, second, - delta.microseconds, - tzinfo=self._tzinfo)) - raise OverflowError("result out of range") + + result = _normalize_datetime( + self._year, + self._month, + self._day + other.days, + self._hour, + self._minute, + self._second + other.seconds, + self._microsecond + other.microseconds, + ) + + return datetime(result, tzinfo=self._tzinfo) __radd__ = __add__ @@ -2000,6 +2002,65 @@ datetime.resolution = timedelta(microseconds=1) +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo + + +def _normalize_datetime(y, m, d, hh, mm, ss, us): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d) + return y, m, d, hh, mm, ss, us + + +def _normalize_date(year, month, day): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 + + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) + + if not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day + + def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently From pypy.commits at gmail.com Tue Dec 17 13:13:10 2019 From: pypy.commits at gmail.com (olliemath) Date: Tue, 17 Dec 2019 10:13:10 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Add tests for datetime normalize functions. Message-ID: <5df91ab6.1c69fb81.db2c9.4de7@mx.google.com> Author: olliemath Branch: py3.6 Changeset: r98306:8fe5d09e905c Date: 2019-12-13 23:59 +0000 http://bitbucket.org/pypy/pypy/changeset/8fe5d09e905c/ Log: Add tests for datetime normalize functions. diff --git a/lib-python/3/test/datetimetester.py b/lib-python/3/test/datetimetester.py --- a/lib-python/3/test/datetimetester.py +++ b/lib-python/3/test/datetimetester.py @@ -100,6 +100,34 @@ self.assertEqual(dar(6, -4), -2) self.assertEqual(dar(-6, -4), 2) + def test_normalize_pair(self): + normalize = datetime_module._normalize_pair + + self.assertEqual(normalize(1, 59, 60), (1, 59)) + self.assertEqual(normalize(1, 60, 60), (2, 0)) + self.assertEqual(normalize(1, 95, 60), (2, 35)) + + def test_normalize_date(self): + normalize = datetime_module._normalize_date + + # Huge year is caught correctly + with self.assertRaises(OverflowError): + normalize(1000 * 1000, 1, 1) + # Normal dates should be unchanged + self.assertEqual(normalize(3000, 1, 1), (3000, 1, 1)) + # Month overflows year boundary + self.assertEqual(normalize(2001, 24, 1), (2002, 12, 1)) + # Day overflows month boundary + self.assertEqual(normalize(2001, 14, 31), (2002, 3, 3)) + # Leap years? :S + self.assertEqual(normalize(2001, 1, 61), (2001, 3, 2)) + self.assertEqual(normalize(2000, 1, 61), (2000, 3, 1)) + + def test_normalize_datetime(self): + normalize = datetime_module._normalize_datetime + abnormal = (2002, 13, 35, 30, 95, 75, 1000001) + self.assertEqual(normalize(*abnormal), (2003, 2, 5, 7, 36, 16, 1)) + ############################################################################# # tzinfo tests From pypy.commits at gmail.com Tue Dec 17 13:13:15 2019 From: pypy.commits at gmail.com (olliemath) Date: Tue, 17 Dec 2019 10:13:15 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Move normalize datetime tests to extra_tests Message-ID: <5df91abb.1c69fb81.356f2.a4b9@mx.google.com> Author: olliemath Branch: py3.6 Changeset: r98307:5a886620335c Date: 2019-12-17 15:24 +0000 http://bitbucket.org/pypy/pypy/changeset/5a886620335c/ Log: Move normalize datetime tests to extra_tests diff --git a/extra_tests/test_datetime.py b/extra_tests/test_datetime.py --- a/extra_tests/test_datetime.py +++ b/extra_tests/test_datetime.py @@ -350,3 +350,31 @@ d2 = d.replace(hour=7) assert type(d2) is MyDatetime assert d2 == datetime.datetime(2016, 4, 5, 7, 2, 3) + +def test_normalize_pair(): + normalize = datetime._normalize_pair + + assert normalize(1, 59, 60) == (1, 59) + assert normalize(1, 60, 60) == (2, 0) + assert normalize(1, 95, 60) == (2, 35) + +def test_normalize_date(): + normalize = datetime._normalize_date + + # Huge year is caught correctly + with pytest.raises(OverflowError): + normalize(1000 * 1000, 1, 1) + # Normal dates should be unchanged + assert normalize(3000, 1, 1) == (3000, 1, 1) + # Month overflows year boundary + assert normalize(2001, 24, 1) == (2002, 12, 1) + # Day overflows month boundary + assert normalize(2001, 14, 31) == (2002, 3, 3) + # Leap years? :S + assert normalize(2001, 1, 61) == (2001, 3, 2) + assert normalize(2000, 1, 61) == (2000, 3, 1) + +def test_normalize_datetime(): + normalize = datetime._normalize_datetime + abnormal = (2002, 13, 35, 30, 95, 75, 1000001) + assert normalize(*abnormal) == (2003, 2, 5, 7, 36, 16, 1) diff --git a/lib-python/3/test/datetimetester.py b/lib-python/3/test/datetimetester.py --- a/lib-python/3/test/datetimetester.py +++ b/lib-python/3/test/datetimetester.py @@ -100,34 +100,6 @@ self.assertEqual(dar(6, -4), -2) self.assertEqual(dar(-6, -4), 2) - def test_normalize_pair(self): - normalize = datetime_module._normalize_pair - - self.assertEqual(normalize(1, 59, 60), (1, 59)) - self.assertEqual(normalize(1, 60, 60), (2, 0)) - self.assertEqual(normalize(1, 95, 60), (2, 35)) - - def test_normalize_date(self): - normalize = datetime_module._normalize_date - - # Huge year is caught correctly - with self.assertRaises(OverflowError): - normalize(1000 * 1000, 1, 1) - # Normal dates should be unchanged - self.assertEqual(normalize(3000, 1, 1), (3000, 1, 1)) - # Month overflows year boundary - self.assertEqual(normalize(2001, 24, 1), (2002, 12, 1)) - # Day overflows month boundary - self.assertEqual(normalize(2001, 14, 31), (2002, 3, 3)) - # Leap years? :S - self.assertEqual(normalize(2001, 1, 61), (2001, 3, 2)) - self.assertEqual(normalize(2000, 1, 61), (2000, 3, 1)) - - def test_normalize_datetime(self): - normalize = datetime_module._normalize_datetime - abnormal = (2002, 13, 35, 30, 95, 75, 1000001) - self.assertEqual(normalize(*abnormal), (2003, 2, 5, 7, 36, 16, 1)) - ############################################################################# # tzinfo tests From pypy.commits at gmail.com Wed Dec 18 07:56:30 2019 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 18 Dec 2019 04:56:30 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: new plots Message-ID: <5dfa21fe.1c69fb81.2dca7.bff0@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: extradoc Changeset: r5971:0415a5f5f1ec Date: 2019-10-07 15:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/0415a5f5f1ec/ Log: new plots diff too long, truncating to 2000 out of 19079 lines diff --git a/blog/draft/2019_json_mem_languages.svg b/blog/draft/2019_json_mem_languages.svg new file mode 100644 --- /dev/null +++ b/blog/draft/2019_json_mem_languages.svg @@ -0,0 +1,5664 @@ + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Author: Carl Friedrich Bolz-Tereick Branch: extradoc Changeset: r5972:b927dc27a328 Date: 2019-12-18 13:54 +0100 http://bitbucket.org/pypy/extradoc/changeset/b927dc27a328/ Log: add source of diagram diff --git a/blog/draft/2019-10-json.rst b/blog/draft/2019-10-json.rst --- a/blog/draft/2019-10-json.rst +++ b/blog/draft/2019-10-json.rst @@ -355,7 +355,7 @@ the deserialized data-structure (Node uses two bytes per character in a string, [in CPython it depends](https://www.python.org/dev/peps/pep-0393/) but 4 bytes on my -machine, PyPyBaseline uses four bytes,PyPy and RapidJSON use utf-8). But +machine, PyPyBaseline uses four bytes, PyPy and RapidJSON use utf-8). But it's still interesting to get some ballpark numbers. The results are as follows: diff --git a/blog/draft/2019_json_nytimes.dot b/blog/draft/2019_json_nytimes.dot new file mode 100644 --- /dev/null +++ b/blog/draft/2019_json_nytimes.dot @@ -0,0 +1,192 @@ +digraph G { +node [fillcolor=white]; +94396518359136 [shape=box, label=base]; +94396518359136 -> 94396518465904 [label="copyright", color=blue]; +94396518465904 [shape=box, label="#1\nchildren: 1\n0/1 (0.000000%)", fillcolor=lightgray]; +94396518465904 -> 94396518465792 [label="response", color=blue]; +94396518465792 [shape=box, label="#1\nchildren: 0", fillcolor=lightslategray]; +94396518359136 -> 94396518466128 [label="meta"]; +94396518466128 [shape=box, label="#1\nchildren: 1", fillcolor=lightgray]; +94396518466128 -> 94396518465680 [label="docs", color=blue]; +94396518465680 [shape=box, label="#1\nchildren: 0", fillcolor=lightslategray]; +94396518359136 -> 94396518466016 [label="hits"]; +94396518466016 [shape=box, label="#1\nchildren: 0", fillcolor=lightgray]; +94396518359136 -> 94396518058880 [label="web_url"]; +94396518058880 [shape=box, label="#5902\nchildren: 1\n0/5902 (0.000000%)"]; +94396518058880 -> 94396518058768 [label="snippet", color=blue]; +94396518058768 [shape=box, label="#5902\nchildren: 1\n2/5899 (0.000339%)"]; +94396518058768 -> 94396518465568 [label="lead_paragraph", color=blue]; +94396518465568 [shape=box, label="#5902\nchildren: 1\n2/5694 (0.000351%)"]; +94396518465568 -> 94396518468032 [label="abstract", color=blue]; +94396518468032 [shape=box, label="#5902\nchildren: 1\n0/2119 (0.000000%)"]; +94396518468032 -> 94396518467920 [label="print_page", color=blue]; +94396518467920 [shape=box, label="#5902\nchildren: 1\n3598/3717 (0.967985%)"]; +94396518467920 -> 94396518467808 [label="blog", color=blue]; +94396518467808 [shape=box, label="#5902\nchildren: 1"]; +94396518467808 -> 94396518467696 [label="source", color=blue]; +94396518467696 [shape=box, label="#5902\nchildren: 1\n5901/5902 (0.999831%)"]; +94396518467696 -> 94396518467584 [label="multimedia", color=blue]; +94396518467584 [shape=box, label="#5902\nchildren: 1"]; +94396518467584 -> 94396518467472 [label="headline", color=blue]; +94396518467472 [shape=box, label="#5902\nchildren: 1"]; +94396518467472 -> 94396518467360 [label="keywords", color=blue]; +94396518467360 [shape=box, label="#5902\nchildren: 1"]; +94396518467360 -> 94396518467248 [label="pub_date", color=blue]; +94396518467248 [shape=box, label="#5902\nchildren: 1\n4652/5902 (0.788207%)"]; +94396518467248 -> 94396518467136 [label="document_type", color=blue]; +94396518467136 [shape=box, label="#5902\nchildren: 1\n5899/5902 (0.999492%)"]; +94396518467136 -> 94396518467024 [label="news_desk", color=blue]; +94396518467024 [shape=box, label="#5902\nchildren: 1\n5657/5794 (0.976355%)"]; +94396518467024 -> 94396518466912 [label="section_name", color=blue]; +94396518466912 [shape=box, label="#5902\nchildren: 1\n5834/5902 (0.988478%)"]; +94396518466912 -> 94396518466800 [label="subsection_name", color=blue]; +94396518466800 [shape=box, label="#5902\nchildren: 1\n2750/2877 (0.955857%)"]; +94396518466800 -> 94396518466688 [label="byline", color=blue]; +94396518466688 [shape=box, label="#5902\nchildren: 1"]; +94396518466688 -> 94396518466576 [label="type_of_material", color=blue]; +94396518466576 [shape=box, label="#5902\nchildren: 1\n5877/5902 (0.995764%)"]; +94396518466576 -> 94396518466464 [label="_id", color=blue]; +94396518466464 [shape=box, label="#5902\nchildren: 1\n0/5902 (0.000000%)"]; +94396518466464 -> 94396518466352 [label="word_count", color=blue]; +94396518466352 [shape=box, label="#5902\nchildren: 1\n24/5662 (0.004239%)"]; +94396518466352 -> 94396518466240 [label="slideshow_credits", color=blue]; +94396518466240 [shape=box, label="#5902\nchildren: 0\n160/356 (0.449438%)"]; +94396518359136 -> 94396518471728 [label="width"]; +94396518471728 [shape=box, label="#15310\nchildren: 1"]; +94396518471728 -> 94396518471616 [label="url", color=blue]; +94396518471616 [shape=box, label="#15310\nchildren: 1\n3/15310 (0.000196%)"]; +94396518471616 -> 94396518471504 [label="height", color=blue]; +94396518471504 [shape=box, label="#15310\nchildren: 1"]; +94396518471504 -> 94396518471392 [label="subtype", color=blue]; +94396518471392 [shape=box, label="#15310\nchildren: 1\n15307/15310 (0.999804%)"]; +94396518471392 -> 94396518471280 [label="legacy", color=blue]; +94396518471280 [shape=box, label="#15310\nchildren: 1"]; +94396518471280 -> 94396518471168 [label="type", color=blue]; +94396518471168 [shape=box, label="#15310\nchildren: 0\n15309/15310 (0.999935%)"]; +94396518359136 -> 94396518472736 [label="wide"]; +94396518472736 [shape=box, label="#5128\nchildren: 1\n69/5128 (0.013456%)"]; +94396518472736 -> 94396518472624 [label="wideheight", color=blue]; +94396518472624 [shape=box, label="#5128\nchildren: 1\n5127/5128 (0.999805%)"]; +94396518472624 -> 94396518472512 [label="widewidth", color=blue]; +94396518472512 [shape=box, label="#5128\nchildren: 0\n5127/5128 (0.999805%)"]; +94396518359136 -> 94396518472400 [label="xlargewidth"]; +94396518472400 [shape=box, label="#5053\nchildren: 1\n5052/5053 (0.999802%)"]; +94396518472400 -> 94396518472288 [label="xlarge", color=blue]; +94396518472288 [shape=box, label="#5053\nchildren: 1\n67/5053 (0.013259%)"]; +94396518472288 -> 94396518472176 [label="xlargeheight", color=blue]; +94396518472176 [shape=box, label="#5053\nchildren: 0\n3862/5053 (0.764298%)"]; +94396518359136 -> 94396518472064 [label="thumbnailheight"]; +94396518472064 [shape=box, label="#5129\nchildren: 1\n5128/5129 (0.999805%)"]; +94396518472064 -> 94396518471952 [label="thumbnail", color=blue]; +94396518471952 [shape=box, label="#5129\nchildren: 1\n68/5129 (0.013258%)"]; +94396518471952 -> 94396518471840 [label="thumbnailwidth", color=blue]; +94396518471840 [shape=box, label="#5129\nchildren: 0\n5129/5129 (1.000000%)"]; +94396518359136 -> 94396518469936 [label="main"]; +94396518469936 [shape=box, label="#5901\nchildren: 5\n3/5901 (0.000508%)"]; +94396518469936 -> 94396518470496 [label="kicker"]; +94396518470496 [shape=box, label="#381\nchildren: 1\n345/381 (0.905512%)"]; +94396518470496 -> 94396518470608 [label="print_headline", color=blue]; +94396518470608 [shape=box, label="#23\nchildren: 1\n3/22 (0.136364%)"]; +94396518470608 -> 94396510855984 [label="content_kicker", color=blue]; +94396510855984 [shape=box, label="#1\nchildren: 0\n1/1 (1.000000%)", fillcolor=lightgray]; +94396518469936 -> 94396518470048 [label="content_kicker", color=blue]; +94396518470048 [shape=box, label="#1971\nchildren: 3\n1408/1971 (0.714358%)"]; +94396518470048 -> 94396518470160 [label="kicker", color=blue]; +94396518470160 [shape=box, label="#1642\nchildren: 1\n1310/1642 (0.797808%)"]; +94396518470160 -> 94396518470272 [label="print_headline", color=blue]; +94396518470272 [shape=box, label="#1242\nchildren: 0\n5/1242 (0.004026%)"]; +94396518470048 -> 94396518470384 [label="print_headline"]; +94396518470384 [shape=box, label="#110\nchildren: 0\n2/110 (0.018182%)", fillcolor=lightgray]; +94396518469936 -> 94396518469824 [label="print_headline"]; +94396518469824 [shape=box, label="#2350\nchildren: 0\n8/2350 (0.003404%)", fillcolor=lightgray]; +94396518469936 -> 94396521222112 [label="sub"]; +94396521222112 [shape=box, label="#3\nchildren: 1\n0/3 (0.000000%)", fillcolor=lightgray]; +94396521222112 -> 94396521222000 [label="print_headline", color=blue]; +94396521222000 [shape=box, label="#1\nchildren: 0\n0/1 (0.000000%)", fillcolor=lightslategray]; +94396518359136 -> 94396518469488 [label="rank"]; +94396518469488 [shape=box, label="#27623\nchildren: 3\n27612/27623 (0.999602%)"]; +94396518469488 -> 94396518469376 [label="is_major", color=blue]; +94396518469376 [shape=box, label="#26885\nchildren: 1\n26883/26885 (0.999926%)"]; +94396518469376 -> 94396518469264 [label="name", color=blue]; +94396518469264 [shape=box, label="#26885\nchildren: 1\n26878/26885 (0.999740%)"]; +94396518469264 -> 94396518469152 [label="value", color=blue]; +94396518469152 [shape=box, label="#26885\nchildren: 0\n41/26885 (0.001525%)"]; +94396518469488 -> 94396518469600 [label="name"]; +94396518469600 [shape=box, label="#738\nchildren: 1\n737/738 (0.998645%)"]; +94396518469600 -> 94396518469712 [label="value", color=blue]; +94396518469712 [shape=box, label="#738\nchildren: 0\n44/738 (0.059621%)"]; +94396518359136 -> 94396518471056 [label="person"]; +94396518471056 [shape=box, label="#4299\nchildren: 1"]; +94396518471056 -> 94396518470832 [label="original", color=blue]; +94396518470832 [shape=box, label="#4299\nchildren: 1\n33/4299 (0.007676%)"]; +94396518470832 -> 94396518470944 [label="organization", color=blue]; +94396518470944 [shape=box, label="#282\nchildren: 0\n277/282 (0.982270%)", fillcolor=lightgray]; +94396518359136 -> 94396518468928 [label="organization"]; +94396518468928 [shape=box, label="#4750\nchildren: 1"]; +94396518468928 -> 94396518468816 [label="role", color=blue]; +94396518468816 [shape=box, label="#4750\nchildren: 3\n4749/4750 (0.999789%)"]; +94396518468816 -> 94396518468704 [label="firstname", color=blue]; +94396518468704 [shape=box, label="#4734\nchildren: 1\n3133/4734 (0.661808%)"]; +94396518468704 -> 94396518468592 [label="rank", color=blue]; +94396518468592 [shape=box, label="#4734\nchildren: 1"]; +94396518468592 -> 94396518468480 [label="lastname", color=blue]; +94396518468480 [shape=box, label="#4630\nchildren: 0\n37/4630 (0.007991%)"]; +94396518468816 -> 94396518469040 [label="rank"]; +94396518469040 [shape=box, label="#16\nchildren: 0", fillcolor=lightgray]; +94396518359136 -> 94396518606400 [label="firstname"]; +94396518606400 [shape=box, label="#529\nchildren: 1\n430/529 (0.812854%)"]; +94396518606400 -> 94396518606288 [label="middlename", color=blue]; +94396518606288 [shape=box, label="#529\nchildren: 1\n436/529 (0.824197%)"]; +94396518606288 -> 94396518606176 [label="lastname", color=blue]; +94396518606176 [shape=box, label="#529\nchildren: 1\n345/529 (0.652174%)"]; +94396518606176 -> 94396518606064 [label="rank", color=blue]; +94396518606064 [shape=box, label="#529\nchildren: 1"]; +94396518606064 -> 94396518605952 [label="role", color=blue]; +94396518605952 [shape=box, label="#529\nchildren: 1\n529/529 (1.000000%)"]; +94396518605952 -> 94396518605840 [label="organization", color=blue]; +94396518605840 [shape=box, label="#529\nchildren: 0"]; +94396518359136 -> 94396518468368 [label="contributor"]; +94396518468368 [shape=box, label="#717\nchildren: 1\n21/441 (0.047619%)"]; +94396518468368 -> 94396518468256 [label="person", color=blue]; +94396518468256 [shape=box, label="#717\nchildren: 1"]; +94396518468256 -> 94396518468144 [label="original", color=blue]; +94396518468144 [shape=box, label="#717\nchildren: 1\n211/717 (0.294282%)"]; +94396518468144 -> 94396521222672 [label="organization", color=blue]; +94396521222672 [shape=box, label="#14\nchildren: 0\n14/14 (1.000000%)", fillcolor=lightgray]; +94396518359136 -> 94396518611104 [label="qualifier"]; +94396518611104 [shape=box, label="#38\nchildren: 1\n34/38 (0.894737%)"]; +94396518611104 -> 94396518610992 [label="firstname", color=blue]; +94396518610992 [shape=box, label="#38\nchildren: 3\n27/38 (0.710526%)"]; +94396518610992 -> 94396518610880 [label="lastname", color=blue]; +94396518610880 [shape=box, label="#19\nchildren: 1\n11/19 (0.578947%)"]; +94396518610880 -> 94396518610768 [label="rank", color=blue]; +94396518610768 [shape=box, label="#19\nchildren: 1"]; +94396518610768 -> 94396518610656 [label="role", color=blue]; +94396518610656 [shape=box, label="#19\nchildren: 1\n19/19 (1.000000%)"]; +94396518610656 -> 94396518610544 [label="organization", color=blue]; +94396518610544 [shape=box, label="#19\nchildren: 0"]; +94396518610992 -> 94396547805712 [label="middlename"]; +94396547805712 [shape=box, label="#19\nchildren: 1\n18/19 (0.947368%)"]; +94396547805712 -> 94396547805600 [label="lastname", color=blue]; +94396547805600 [shape=box, label="#19\nchildren: 1\n13/19 (0.684211%)"]; +94396547805600 -> 94396547805488 [label="rank", color=blue]; +94396547805488 [shape=box, label="#19\nchildren: 1"]; +94396547805488 -> 94396547805376 [label="role", color=blue]; +94396547805376 [shape=box, label="#19\nchildren: 1\n19/19 (1.000000%)"]; +94396547805376 -> 94396547805264 [label="organization", color=blue]; +94396547805264 [shape=box, label="#19\nchildren: 0"]; +94396518359136 -> 94396510856096 [label="isMajor"]; +94396510856096 [shape=box, label="#10\nchildren: 1\n10/10 (1.000000%)"]; +94396510856096 -> 94396510856208 [label="rank", color=blue]; +94396510856208 [shape=box, label="#10\nchildren: 1"]; +94396510856208 -> 94396510856320 [label="name", color=blue]; +94396510856320 [shape=box, label="#10\nchildren: 1\n10/10 (1.000000%)"]; +94396510856320 -> 94396510856432 [label="value", color=blue]; +94396510856432 [shape=box, label="#10\nchildren: 0\n3/10 (0.300000%)"]; +94396518359136 -> 94396532465232 [label="seo"]; +94396532465232 [shape=box, label="#1\nchildren: 1\n0/1 (0.000000%)", fillcolor=lightgray]; +94396532465232 -> 94396532465120 [label="main", color=blue]; +94396532465120 [shape=box, label="#1\nchildren: 1\n0/1 (0.000000%)", fillcolor=lightslategray]; +94396532465120 -> 94396532465008 [label="kicker", color=blue]; +94396532465008 [shape=box, label="#1\nchildren: 0\n0/1 (0.000000%)", fillcolor=lightslategray]; +} \ No newline at end of file diff --git a/blog/draft/2019_json_transition_tree.svg b/blog/draft/2019_json_transition_tree.svg new file mode 100644 --- /dev/null +++ b/blog/draft/2019_json_transition_tree.svg @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From pypy.commits at gmail.com Wed Dec 18 08:12:07 2019 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 18 Dec 2019 05:12:07 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: weaken claim about cython, mention Tim and Windel Message-ID: <5dfa25a7.1c69fb81.c060d.b9c0@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: extradoc Changeset: r5973:c14cba0877bb Date: 2019-12-18 14:11 +0100 http://bitbucket.org/pypy/extradoc/changeset/c14cba0877bb/ Log: weaken claim about cython, mention Tim and Windel diff --git a/blog/draft/2019-12-hpy-sprint.rst b/blog/draft/2019-12-hpy-sprint.rst --- a/blog/draft/2019-12-hpy-sprint.rst +++ b/blog/draft/2019-12-hpy-sprint.rst @@ -12,7 +12,13 @@ The idea of HPy was born during EuroPython 2019 in Basel, where there was an informal meeting which included core developers of PyPy, CPython (Victor -Stinner and Mark Shannon) and Cython (Stefan Behnel). +Stinner and Mark Shannon) and Cython (Stefan Behnel). The ideas were later also +discussed with Tim Felgentreff of GraalPython_, to make sure they would also be +applicable to this very different implementation, Windel Bouwman of RustPython_ +is following the project as well. + +.. _GraalPython: https://github.com/graalvm/graalpython +.. _RustPython: https://github.com/RustPython/RustPython All of us agreed that the current design of the CPython C API is problematic for various reasons and, in particular, because it is too tied to the current @@ -40,8 +46,8 @@ The other major design goal of HPy is to allow incremental transition and porting, so existing modules can migrate their codebase one method at a time. -Moreover, Cython eventually will generate HPy code, so extension module written -in Cython will be able to benefit from HPy automatically. +Moreover, Cython is considering to optionally generate HPy code, so extension +module written in Cython would be able to benefit from HPy automatically. More details can be found in the README of the official `HPy repository`_. From pypy.commits at gmail.com Wed Dec 18 16:09:48 2019 From: pypy.commits at gmail.com (rlamy) Date: Wed, 18 Dec 2019 13:09:48 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Simplify test and convert it to use IPv4 Message-ID: <5dfa959c.1c69fb81.dd8ea.4080@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98314:1dd8529bdaff Date: 2019-12-18 21:08 +0000 http://bitbucket.org/pypy/pypy/changeset/1dd8529bdaff/ Log: Simplify test and convert it to use IPv4 diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -610,19 +610,14 @@ def test_recvmsg_issue2649(self): import _socket as socket - listener = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) + listener = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - listener.bind(('::1', 1234)) + listener.bind(('127.0.0.1', 1234)) - s = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) - IPV6_RECVERR = 25 - s.setsockopt(socket.IPPROTO_IPV6, IPV6_RECVERR, 1) - - s.sendto(b'x', ('::1', 1234)) - try: + s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + s.sendto(b'x', ('127.0.0.1', 1234)) + with raises(BlockingIOError): queue = s.recvmsg(1024, 1024, socket.MSG_ERRQUEUE) - except BlockingIOError as e: - assert True def test_buffer(self): # Test that send/sendall/sendto accept a buffer as arg From pypy.commits at gmail.com Wed Dec 18 16:34:44 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 18 Dec 2019 13:34:44 -0800 (PST) Subject: [pypy-commit] pypy default: extend existing horrible hack with more hackiness to copy lib (portable builds) Message-ID: <5dfa9b74.1c69fb81.bcd98.3d9b@mx.google.com> Author: Matti Picus Branch: Changeset: r98315:739335fb037c Date: 2019-12-18 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/739335fb037c/ Log: extend existing horrible hack with more hackiness to copy lib (portable builds) diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1301,6 +1301,11 @@ src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): caller.f_globals['copyfile'](src_library, dest_library) + src_lib = os.path.join(src_dir, '../lib') + if os.path.exists(src_lib): + # portable build + import shutil + shutil.copytree(src_lib, os.path.join(dest_dir, '../lib')) def _demo_posix(): From pypy.commits at gmail.com Wed Dec 18 16:34:46 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 18 Dec 2019 13:34:46 -0800 (PST) Subject: [pypy-commit] pypy default: invalid escape sequences fixed on upstream github.com/pypy/pyrepl#21 Message-ID: <5dfa9b76.1c69fb81.45622.3374@mx.google.com> Author: Matti Picus Branch: Changeset: r98316:a484c74b2bbc Date: 2019-12-18 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/a484c74b2bbc/ Log: invalid escape sequences fixed on upstream github.com/pypy/pyrepl#21 diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py --- a/lib_pypy/pyrepl/completing_reader.py +++ b/lib_pypy/pyrepl/completing_reader.py @@ -266,7 +266,7 @@ reader.ps1 = "c**> " reader.ps2 = "c/*> " reader.ps3 = "c|*> " - reader.ps4 = "c\*> " + reader.ps4 = r"c\*> " while reader.readline(): pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -58,7 +58,7 @@ return u[c] else: if unicodedata_.category(c).startswith('C'): - return '\u%04x'%(ord(c),) + return br'\u%04x'%(ord(c),) else: return c @@ -630,7 +630,7 @@ reader.ps1 = "**> " reader.ps2 = "/*> " reader.ps3 = "|*> " - reader.ps4 = "\*> " + reader.ps4 = r"\*> " while reader.readline(): pass From pypy.commits at gmail.com Wed Dec 18 16:34:48 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 18 Dec 2019 13:34:48 -0800 (PST) Subject: [pypy-commit] pypy py3.6: extend existing horrible hack with more hackiness to copy lib (portable builds) Message-ID: <5dfa9b78.1c69fb81.f1cda.5b84@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98317:0b647f05b74f Date: 2019-12-18 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/0b647f05b74f/ Log: extend existing horrible hack with more hackiness to copy lib (portable builds) diff --git a/lib-python/3/subprocess.py b/lib-python/3/subprocess.py --- a/lib-python/3/subprocess.py +++ b/lib-python/3/subprocess.py @@ -1657,3 +1657,9 @@ src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): caller.f_globals['copyfile'](src_library, dest_library) + src_lib = os.path.join(src_dir, '../lib') + if os.path.exists(src_lib): + # portable build + import shutil + shutil.copytree(src_lib, os.path.join(dest_dir, '../lib')) + From pypy.commits at gmail.com Wed Dec 18 16:34:50 2019 From: pypy.commits at gmail.com (mattip) Date: Wed, 18 Dec 2019 13:34:50 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5dfa9b7a.1c69fb81.2bd95.4e9f@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98318:010bdf9668e6 Date: 2019-12-18 23:33 +0200 http://bitbucket.org/pypy/pypy/changeset/010bdf9668e6/ Log: merge default into py3.6 diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py --- a/lib_pypy/pyrepl/completing_reader.py +++ b/lib_pypy/pyrepl/completing_reader.py @@ -266,7 +266,7 @@ reader.ps1 = "c**> " reader.ps2 = "c/*> " reader.ps3 = "c|*> " - reader.ps4 = "c\*> " + reader.ps4 = r"c\*> " while reader.readline(): pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -648,7 +648,7 @@ reader.ps1 = "**> " reader.ps2 = "/*> " reader.ps3 = "|*> " - reader.ps4 = "\*> " + reader.ps4 = r"\*> " while reader.readline(): pass diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1146,6 +1146,33 @@ out = cbuilder.cmdexec('') assert out.strip() == 'ok' + def test_int_manipulation(self): + # Distilled from micronumpy.descriptor._compute_hash + # which, for some version of gcc8 compiler produced + # out1 == out2 + from rpython.rlib.rarithmetic import intmask + + def entry_point(argv): + if len(argv) < 4: + print 'need 3 arguments, not %s' % str(argv) + return -1 + flags = 0 + x = 0x345678 + y = 0x345678 + s = str(argv[1])[0] + y = intmask((1000003 * y) ^ ord(s)) + y = intmask((1000003 * y) ^ ord(str(argv[2])[0])) + y = (1000003 * y) + y = intmask(y ^ flags) + y = intmask((1000003 * y) ^ int(argv[3])) + print y + return 0 + + t, cbuilder = self.compile(entry_point) + out1 = cbuilder.cmdexec(args=['i', '>', '64']) + out2 = cbuilder.cmdexec(args=['f', '>', '64']) + assert out1 != out2 + class TestThread(object): gcrootfinder = 'shadowstack' From pypy.commits at gmail.com Thu Dec 19 01:53:33 2019 From: pypy.commits at gmail.com (arigo) Date: Wed, 18 Dec 2019 22:53:33 -0800 (PST) Subject: [pypy-commit] pypy default: Issue #3128 Message-ID: <5dfb1e6d.1c69fb81.57d2b.c470@mx.google.com> Author: Armin Rigo Branch: Changeset: r98324:96dad0986a20 Date: 2019-12-19 07:53 +0100 http://bitbucket.org/pypy/pypy/changeset/96dad0986a20/ Log: Issue #3128 Potential fix in rare-case JIT optimizer diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -683,7 +683,14 @@ elif constvalue == 1: opnum = rop.GUARD_TRUE else: - raise AssertionError("uh?") + # Issue #3128: there might be rare cases where strange + # code is produced. That issue hits the assert from + # OptUnroll.inline_short_preamble's send_extra_operation(). + # Better just disable this optimization than crash with + # an AssertionError here. Note also that such code might + # trigger an InvalidLoop to be raised later---so we must + # not crash here. + return op newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) return newop return op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -698,6 +698,15 @@ """ self.optimize_loop(ops, expected, preamble) + def test_guard_value_on_boolean_but_not_zero_or_one(self): + ops = """ + [i] + i1 = int_lt(i, 3) + guard_value(i1, -1) [i] + jump(i) + """ + py.test.raises(InvalidLoop, self.optimize_loop, ops, ops, ops) + def test_int_is_true_of_bool(self): ops = """ [i0, i1] From pypy.commits at gmail.com Thu Dec 19 06:58:15 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 03:58:15 -0800 (PST) Subject: [pypy-commit] pypy py3.6: venv: also copy/link lib/* files for portable builds Message-ID: <5dfb65d7.1c69fb81.74be0.cf92@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98325:2a8d1e70cf74 Date: 2019-12-19 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/2a8d1e70cf74/ Log: venv: also copy/link lib/* files for portable builds diff --git a/lib-python/3/venv/__init__.py b/lib-python/3/venv/__init__.py --- a/lib-python/3/venv/__init__.py +++ b/lib-python/3/venv/__init__.py @@ -233,6 +233,16 @@ copier(src_library, dest_library) if not os.path.islink(dest_library): os.chmod(dest_library, 0o755) + libsrc = os.path.join(context.python_dir, '..', 'lib') + if os.path.exists(libsrc): + # PyPy: also copy lib/*.so* for portable builds + libdst = os.path.join(context.env_dir, 'lib') + if not os.path.exists(libdst): + os.mkdir(libdst) + for f in os.listdir(libsrc): + src = os.path.join(libsrc, f) + dst = os.path.join(libdst, f) + copier(src, dst) # else: subdir = 'DLLs' From pypy.commits at gmail.com Thu Dec 19 06:58:17 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 03:58:17 -0800 (PST) Subject: [pypy-commit] pypy default: update release note for recent changes Message-ID: <5dfb65d9.1c69fb81.1c23e.03e9@mx.google.com> Author: Matti Picus Branch: Changeset: r98326:d1af0bd1b3d3 Date: 2019-12-19 13:55 +0200 http://bitbucket.org/pypy/pypy/changeset/d1af0bd1b3d3/ Log: update release note for recent changes diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -138,6 +138,10 @@ * Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit platforms rather than automatically using a ``SignedLongLong``, require an explicit ``r_int64()`` call instead +* Fix multithread contention when creating an object in cffi (PyPy only) +* Copy lib/* shared objects in portable builds when creating virtual + environments with virtualenv and venv +* Potential fix in rare-case JIT optimizer (`issue 3128`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -226,6 +230,7 @@ .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 .. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 +.. _`issue 3128`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 .. _13617: https://bugs.python.org/issue13617 From pypy.commits at gmail.com Thu Dec 19 06:58:19 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 03:58:19 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5dfb65db.1c69fb81.5c10a.f131@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98327:513377efa4e8 Date: 2019-12-19 13:55 +0200 http://bitbucket.org/pypy/pypy/changeset/513377efa4e8/ Log: merge default into py3.6 diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -138,6 +138,10 @@ * Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit platforms rather than automatically using a ``SignedLongLong``, require an explicit ``r_int64()`` call instead +* Fix multithread contention when creating an object in cffi (PyPy only) +* Copy lib/* shared objects in portable builds when creating virtual + environments with virtualenv and venv +* Potential fix in rare-case JIT optimizer (`issue 3128`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -226,6 +230,7 @@ .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 .. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 +.. _`issue 3128`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 .. _13617: https://bugs.python.org/issue13617 diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -683,7 +683,14 @@ elif constvalue == 1: opnum = rop.GUARD_TRUE else: - raise AssertionError("uh?") + # Issue #3128: there might be rare cases where strange + # code is produced. That issue hits the assert from + # OptUnroll.inline_short_preamble's send_extra_operation(). + # Better just disable this optimization than crash with + # an AssertionError here. Note also that such code might + # trigger an InvalidLoop to be raised later---so we must + # not crash here. + return op newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) return newop return op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -698,6 +698,15 @@ """ self.optimize_loop(ops, expected, preamble) + def test_guard_value_on_boolean_but_not_zero_or_one(self): + ops = """ + [i] + i1 = int_lt(i, 3) + guard_value(i1, -1) [i] + jump(i) + """ + py.test.raises(InvalidLoop, self.optimize_loop, ops, ops, ops) + def test_int_is_true_of_bool(self): ops = """ [i0, i1] From pypy.commits at gmail.com Thu Dec 19 06:58:21 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 03:58:21 -0800 (PST) Subject: [pypy-commit] pypy release-pypy2.7-v7.x: merge default into release Message-ID: <5dfb65dd.1c69fb81.a161f.dce9@mx.google.com> Author: Matti Picus Branch: release-pypy2.7-v7.x Changeset: r98328:c124c11a5921 Date: 2019-12-19 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/c124c11a5921/ Log: merge default into release diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -59,3 +59,5 @@ 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 +285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 +008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py --- a/lib-python/2.7/subprocess.py +++ b/lib-python/2.7/subprocess.py @@ -1301,6 +1301,11 @@ src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): caller.f_globals['copyfile'](src_library, dest_library) + src_lib = os.path.join(src_dir, '../lib') + if os.path.exists(src_lib): + # portable build + import shutil + shutil.copytree(src_lib, os.path.join(dest_dir, '../lib')) def _demo_posix(): diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py --- a/lib_pypy/pyrepl/completing_reader.py +++ b/lib_pypy/pyrepl/completing_reader.py @@ -266,7 +266,7 @@ reader.ps1 = "c**> " reader.ps2 = "c/*> " reader.ps3 = "c|*> " - reader.ps4 = "c\*> " + reader.ps4 = r"c\*> " while reader.readline(): pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -58,7 +58,7 @@ return u[c] else: if unicodedata_.category(c).startswith('C'): - return '\u%04x'%(ord(c),) + return br'\u%04x'%(ord(c),) else: return c @@ -630,7 +630,7 @@ reader.ps1 = "**> " reader.ps2 = "/*> " reader.ps3 = "|*> " - reader.ps4 = "\*> " + reader.ps4 = r"\*> " while reader.readline(): pass diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -138,6 +138,10 @@ * Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit platforms rather than automatically using a ``SignedLongLong``, require an explicit ``r_int64()`` call instead +* Fix multithread contention when creating an object in cffi (PyPy only) +* Copy lib/* shared objects in portable builds when creating virtual + environments with virtualenv and venv +* Potential fix in rare-case JIT optimizer (`issue 3128`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -226,6 +230,7 @@ .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 .. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 +.. _`issue 3128`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 .. _13617: https://bugs.python.org/issue13617 diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -83,6 +83,8 @@ self.space = space self.all_primitives = [None] * cffi_opcode._NUM_PRIM self.file_struct = None + self.lock = None + self.lock_owner = 0 self.rec_level = 0 def get_file_struct(self): @@ -90,6 +92,33 @@ self.file_struct = ctypestruct.W_CTypeStruct(self.space, "FILE") return self.file_struct + def __enter__(self): + # This is a simple recursive lock implementation + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + if tid != self.lock_owner: + if self.lock is None: + self.lock = self.space.allocate_lock() + self.lock.acquire(True) + assert self.lock_owner == 0 + assert self.rec_level == 0 + self.lock_owner = tid + self.rec_level += 1 + + def __exit__(self, *args): + assert self.rec_level > 0 + self.rec_level -= 1 + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + assert tid == self.lock_owner + if self.rec_level == 0: + self.lock_owner = 0 + self.lock.release() + def get_primitive_type(ffi, num): space = ffi.space @@ -408,21 +437,25 @@ return ffi.cached_types[index] realize_cache = ffi.space.fromcache(RealizeCache) - if realize_cache.rec_level >= 1000: - raise oefmt(ffi.space.w_RuntimeError, - "type-building recursion too deep or infinite. " - "This is known to occur e.g. in ``struct s { void(*callable)" - "(struct s); }''. Please report if you get this error and " - "really need support for your case.") - realize_cache.rec_level += 1 - try: + with realize_cache: + # + # check again cached_types, which might have been filled while + # we were waiting for the recursive lock + if from_ffi and ffi.cached_types[index] is not None: + return ffi.cached_types[index] + + if realize_cache.rec_level > 1000: + raise oefmt(ffi.space.w_RuntimeError, + "type-building recursion too deep or infinite. " + "This is known to occur e.g. in ``struct s { void(*callable)" + "(struct s); }''. Please report if you get this error and " + "really need support for your case.") x = realize_c_type_or_func_now(ffi, op, opcodes, index) - finally: - realize_cache.rec_level -= 1 - if from_ffi: - assert ffi.cached_types[index] is None or ffi.cached_types[index] is x - ffi.cached_types[index] = x + if from_ffi: + old = ffi.cached_types[index] + assert old is None or old is x + ffi.cached_types[index] = x return x diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -683,7 +683,14 @@ elif constvalue == 1: opnum = rop.GUARD_TRUE else: - raise AssertionError("uh?") + # Issue #3128: there might be rare cases where strange + # code is produced. That issue hits the assert from + # OptUnroll.inline_short_preamble's send_extra_operation(). + # Better just disable this optimization than crash with + # an AssertionError here. Note also that such code might + # trigger an InvalidLoop to be raised later---so we must + # not crash here. + return op newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) return newop return op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -698,6 +698,15 @@ """ self.optimize_loop(ops, expected, preamble) + def test_guard_value_on_boolean_but_not_zero_or_one(self): + ops = """ + [i] + i1 = int_lt(i, 3) + guard_value(i1, -1) [i] + jump(i) + """ + py.test.raises(InvalidLoop, self.optimize_loop, ops, ops, ops) + def test_int_is_true_of_bool(self): ops = """ [i0, i1] diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1146,6 +1146,33 @@ out = cbuilder.cmdexec('') assert out.strip() == 'ok' + def test_int_manipulation(self): + # Distilled from micronumpy.descriptor._compute_hash + # which, for some version of gcc8 compiler produced + # out1 == out2 + from rpython.rlib.rarithmetic import intmask + + def entry_point(argv): + if len(argv) < 4: + print 'need 3 arguments, not %s' % str(argv) + return -1 + flags = 0 + x = 0x345678 + y = 0x345678 + s = str(argv[1])[0] + y = intmask((1000003 * y) ^ ord(s)) + y = intmask((1000003 * y) ^ ord(str(argv[2])[0])) + y = (1000003 * y) + y = intmask(y ^ flags) + y = intmask((1000003 * y) ^ int(argv[3])) + print y + return 0 + + t, cbuilder = self.compile(entry_point) + out1 = cbuilder.cmdexec(args=['i', '>', '64']) + out2 = cbuilder.cmdexec(args=['f', '>', '64']) + assert out1 != out2 + class TestThread(object): gcrootfinder = 'shadowstack' From pypy.commits at gmail.com Thu Dec 19 06:58:23 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 03:58:23 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release Message-ID: <5dfb65df.1c69fb81.a3697.e44f@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98329:e7e02dccbd8c Date: 2019-12-19 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/e7e02dccbd8c/ Log: merge py3.6 into release diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -59,3 +59,5 @@ 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 +285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 +008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 diff --git a/extra_tests/test_datetime.py b/extra_tests/test_datetime.py --- a/extra_tests/test_datetime.py +++ b/extra_tests/test_datetime.py @@ -350,3 +350,31 @@ d2 = d.replace(hour=7) assert type(d2) is MyDatetime assert d2 == datetime.datetime(2016, 4, 5, 7, 2, 3) + +def test_normalize_pair(): + normalize = datetime._normalize_pair + + assert normalize(1, 59, 60) == (1, 59) + assert normalize(1, 60, 60) == (2, 0) + assert normalize(1, 95, 60) == (2, 35) + +def test_normalize_date(): + normalize = datetime._normalize_date + + # Huge year is caught correctly + with pytest.raises(OverflowError): + normalize(1000 * 1000, 1, 1) + # Normal dates should be unchanged + assert normalize(3000, 1, 1) == (3000, 1, 1) + # Month overflows year boundary + assert normalize(2001, 24, 1) == (2002, 12, 1) + # Day overflows month boundary + assert normalize(2001, 14, 31) == (2002, 3, 3) + # Leap years? :S + assert normalize(2001, 1, 61) == (2001, 3, 2) + assert normalize(2000, 1, 61) == (2000, 3, 1) + +def test_normalize_datetime(): + normalize = datetime._normalize_datetime + abnormal = (2002, 13, 35, 30, 95, 75, 1000001) + assert normalize(*abnormal) == (2003, 2, 5, 7, 36, 16, 1) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1416,9 +1416,13 @@ self.__setstate(year, month) self._hashcode = -1 return self - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) + elif isinstance(year, tuple) and len(year) == 7: + # Internal operation - numbers guaranteed to be valid + year, month, day, hour, minute, second, microsecond = year + else: + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) _check_tzinfo_arg(tzinfo) self = dateinterop.__new__(cls) self._year = int(year) @@ -1890,20 +1894,18 @@ "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - delta = timedelta(self.toordinal(), - hours=self._hour, - minutes=self._minute, - seconds=self._second, - microseconds=self._microsecond) - delta += other - hour, rem = divmod(delta.seconds, 3600) - minute, second = divmod(rem, 60) - if 0 < delta.days <= _MAXORDINAL: - return datetime.combine(date.fromordinal(delta.days), - time(hour, minute, second, - delta.microseconds, - tzinfo=self._tzinfo)) - raise OverflowError("result out of range") + + result = _normalize_datetime( + self._year, + self._month, + self._day + other.days, + self._hour, + self._minute, + self._second + other.seconds, + self._microsecond + other.microseconds, + ) + + return datetime(result, tzinfo=self._tzinfo) __radd__ = __add__ @@ -2000,6 +2002,65 @@ datetime.resolution = timedelta(microseconds=1) +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo + + +def _normalize_datetime(y, m, d, hh, mm, ss, us): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d) + return y, m, d, hh, mm, ss, us + + +def _normalize_date(year, month, day): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 + + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) + + if not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day + + def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently diff --git a/lib-python/3/subprocess.py b/lib-python/3/subprocess.py --- a/lib-python/3/subprocess.py +++ b/lib-python/3/subprocess.py @@ -1657,3 +1657,9 @@ src_library = os.path.join(src_dir, libname) if os.path.exists(src_library): caller.f_globals['copyfile'](src_library, dest_library) + src_lib = os.path.join(src_dir, '../lib') + if os.path.exists(src_lib): + # portable build + import shutil + shutil.copytree(src_lib, os.path.join(dest_dir, '../lib')) + diff --git a/lib-python/3/venv/__init__.py b/lib-python/3/venv/__init__.py --- a/lib-python/3/venv/__init__.py +++ b/lib-python/3/venv/__init__.py @@ -233,6 +233,16 @@ copier(src_library, dest_library) if not os.path.islink(dest_library): os.chmod(dest_library, 0o755) + libsrc = os.path.join(context.python_dir, '..', 'lib') + if os.path.exists(libsrc): + # PyPy: also copy lib/*.so* for portable builds + libdst = os.path.join(context.env_dir, 'lib') + if not os.path.exists(libdst): + os.mkdir(libdst) + for f in os.listdir(libsrc): + src = os.path.join(libsrc, f) + dst = os.path.join(libdst, f) + copier(src, dst) # else: subdir = 'DLLs' diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py --- a/lib_pypy/pyrepl/completing_reader.py +++ b/lib_pypy/pyrepl/completing_reader.py @@ -266,7 +266,7 @@ reader.ps1 = "c**> " reader.ps2 = "c/*> " reader.ps3 = "c|*> " - reader.ps4 = "c\*> " + reader.ps4 = r"c\*> " while reader.readline(): pass diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -648,7 +648,7 @@ reader.ps1 = "**> " reader.ps2 = "/*> " reader.ps3 = "|*> " - reader.ps4 = "\*> " + reader.ps4 = r"\*> " while reader.readline(): pass diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -138,6 +138,10 @@ * Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit platforms rather than automatically using a ``SignedLongLong``, require an explicit ``r_int64()`` call instead +* Fix multithread contention when creating an object in cffi (PyPy only) +* Copy lib/* shared objects in portable builds when creating virtual + environments with virtualenv and venv +* Potential fix in rare-case JIT optimizer (`issue 3128`_) C-API (cpyext) and c-extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -226,6 +230,7 @@ .. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 .. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 .. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 +.. _`issue 3128`: https://bitbucket.com/pypy/pypy/issues/3120 .. _13312: https://bugs.python.org/issue13312 .. _13617: https://bugs.python.org/issue13617 diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -83,6 +83,8 @@ self.space = space self.all_primitives = [None] * cffi_opcode._NUM_PRIM self.file_struct = None + self.lock = None + self.lock_owner = 0 self.rec_level = 0 def get_file_struct(self): @@ -90,6 +92,33 @@ self.file_struct = ctypestruct.W_CTypeStruct(self.space, "FILE") return self.file_struct + def __enter__(self): + # This is a simple recursive lock implementation + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + if tid != self.lock_owner: + if self.lock is None: + self.lock = self.space.allocate_lock() + self.lock.acquire(True) + assert self.lock_owner == 0 + assert self.rec_level == 0 + self.lock_owner = tid + self.rec_level += 1 + + def __exit__(self, *args): + assert self.rec_level > 0 + self.rec_level -= 1 + if self.space.config.objspace.usemodules.thread: + from rpython.rlib import rthread + # + tid = rthread.get_ident() + assert tid == self.lock_owner + if self.rec_level == 0: + self.lock_owner = 0 + self.lock.release() + def get_primitive_type(ffi, num): space = ffi.space @@ -408,21 +437,25 @@ return ffi.cached_types[index] realize_cache = ffi.space.fromcache(RealizeCache) - if realize_cache.rec_level >= 1000: - raise oefmt(ffi.space.w_RuntimeError, - "type-building recursion too deep or infinite. " - "This is known to occur e.g. in ``struct s { void(*callable)" - "(struct s); }''. Please report if you get this error and " - "really need support for your case.") - realize_cache.rec_level += 1 - try: + with realize_cache: + # + # check again cached_types, which might have been filled while + # we were waiting for the recursive lock + if from_ffi and ffi.cached_types[index] is not None: + return ffi.cached_types[index] + + if realize_cache.rec_level > 1000: + raise oefmt(ffi.space.w_RuntimeError, + "type-building recursion too deep or infinite. " + "This is known to occur e.g. in ``struct s { void(*callable)" + "(struct s); }''. Please report if you get this error and " + "really need support for your case.") x = realize_c_type_or_func_now(ffi, op, opcodes, index) - finally: - realize_cache.rec_level -= 1 - if from_ffi: - assert ffi.cached_types[index] is None or ffi.cached_types[index] is x - ffi.cached_types[index] = x + if from_ffi: + old = ffi.cached_types[index] + assert old is None or old is x + ffi.cached_types[index] = x return x diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -610,19 +610,14 @@ def test_recvmsg_issue2649(self): import _socket as socket - listener = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) + listener = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - listener.bind(('::1', 1234)) + listener.bind(('127.0.0.1', 1234)) - s = socket.socket(family=socket.AF_INET6, type=socket.SOCK_DGRAM) - IPV6_RECVERR = 25 - s.setsockopt(socket.IPPROTO_IPV6, IPV6_RECVERR, 1) - - s.sendto(b'x', ('::1', 1234)) - try: + s = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) + s.sendto(b'x', ('127.0.0.1', 1234)) + with raises(BlockingIOError): queue = s.recvmsg(1024, 1024, socket.MSG_ERRQUEUE) - except BlockingIOError as e: - assert True def test_buffer(self): # Test that send/sendall/sendto accept a buffer as arg diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -683,7 +683,14 @@ elif constvalue == 1: opnum = rop.GUARD_TRUE else: - raise AssertionError("uh?") + # Issue #3128: there might be rare cases where strange + # code is produced. That issue hits the assert from + # OptUnroll.inline_short_preamble's send_extra_operation(). + # Better just disable this optimization than crash with + # an AssertionError here. Note also that such code might + # trigger an InvalidLoop to be raised later---so we must + # not crash here. + return op newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) return newop return op diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -698,6 +698,15 @@ """ self.optimize_loop(ops, expected, preamble) + def test_guard_value_on_boolean_but_not_zero_or_one(self): + ops = """ + [i] + i1 = int_lt(i, 3) + guard_value(i1, -1) [i] + jump(i) + """ + py.test.raises(InvalidLoop, self.optimize_loop, ops, ops, ops) + def test_int_is_true_of_bool(self): ops = """ [i0, i1] diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -1146,6 +1146,33 @@ out = cbuilder.cmdexec('') assert out.strip() == 'ok' + def test_int_manipulation(self): + # Distilled from micronumpy.descriptor._compute_hash + # which, for some version of gcc8 compiler produced + # out1 == out2 + from rpython.rlib.rarithmetic import intmask + + def entry_point(argv): + if len(argv) < 4: + print 'need 3 arguments, not %s' % str(argv) + return -1 + flags = 0 + x = 0x345678 + y = 0x345678 + s = str(argv[1])[0] + y = intmask((1000003 * y) ^ ord(s)) + y = intmask((1000003 * y) ^ ord(str(argv[2])[0])) + y = (1000003 * y) + y = intmask(y ^ flags) + y = intmask((1000003 * y) ^ int(argv[3])) + print y + return 0 + + t, cbuilder = self.compile(entry_point) + out1 = cbuilder.cmdexec(args=['i', '>', '64']) + out2 = cbuilder.cmdexec(args=['f', '>', '64']) + assert out1 != out2 + class TestThread(object): gcrootfinder = 'shadowstack' From pypy.commits at gmail.com Thu Dec 19 11:39:38 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 08:39:38 -0800 (PST) Subject: [pypy-commit] buildbot default: typo Message-ID: <5dfba7ca.1c69fb81.c0efd.1c48@mx.google.com> Author: Matti Picus Branch: Changeset: r1123:8bbcdf048916 Date: 2019-12-19 18:38 +0200 http://bitbucket.org/pypy/buildbot/changeset/8bbcdf048916/ Log: typo diff --git a/slave/buildbot.tac b/slave/buildbot.tac --- a/slave/buildbot.tac +++ b/slave/buildbot.tac @@ -29,10 +29,10 @@ return 'default_password' -buildmaster_host = 'buildbot.pypy.org' +buildmaster_host = 'localhost' port = 10407 -slavename = 'benchmarker64' -passwd = "Qn9iYWPzCaCLvB1+CRE" +slavename = 'localhost' +passwd = find_passwd(slavename) keepalive = 600 usepty = 0 umask = None From pypy.commits at gmail.com Thu Dec 19 15:26:26 2019 From: pypy.commits at gmail.com (yodada) Date: Thu, 19 Dec 2019 12:26:26 -0800 (PST) Subject: [pypy-commit] pypy py3.6: pypy/interpreter/pyframe.py: if locals directory is created inside fast2locals, create a module dictionary Message-ID: <5dfbdcf2.1c69fb81.a5d30.4da6@mx.google.com> Author: Lin Cheng Branch: py3.6 Changeset: r98330:5ac2c72ee7ed Date: 2019-12-18 20:46 -0500 http://bitbucket.org/pypy/pypy/changeset/5ac2c72ee7ed/ Log: pypy/interpreter/pyframe.py: if locals directory is created inside fast2locals, create a module dictionary diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -565,7 +565,7 @@ # Copy values from the fastlocals to self.w_locals d = self.getorcreatedebug() if d.w_locals is None: - d.w_locals = self.space.newdict() + d.w_locals = self.space.newdict(module=True) varnames = self.getcode().getvarnames() for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] From pypy.commits at gmail.com Thu Dec 19 15:26:28 2019 From: pypy.commits at gmail.com (yodada) Date: Thu, 19 Dec 2019 12:26:28 -0800 (PST) Subject: [pypy-commit] pypy py3.6: add a test of captured locals in pypyjit/test_pypy_c Message-ID: <5dfbdcf4.1c69fb81.c4263.6d0b@mx.google.com> Author: Lin Cheng Branch: py3.6 Changeset: r98331:574325fb0c26 Date: 2019-12-19 13:34 -0500 http://bitbucket.org/pypy/pypy/changeset/574325fb0c26/ Log: add a test of captured locals in pypyjit/test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py b/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py @@ -0,0 +1,41 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestCaptureLocals(BaseTestPyPyC): + def test_capture_locals(self): + def main(n): + num = 42 + i = 0 + acc = 0 + src = ''' +while i < n: + acc += num + i += 1 +''' + exec(src) + return acc + + log = self.run(main, [500]) + print (log.result) + assert log.result == 0 + loop, = log.loops_by_filename("") + print (loop) + assert loop.match(""" + i41 = instance_ptr_eq(ConstPtr(ptr18), p16) + guard_false(i41, descr=...) + guard_not_invalidated(descr=...) + i43 = int_lt(i35, 500) + guard_true(i43, descr=...) + i45 = getfield_gc_i(ConstPtr(ptr44), descr=...) + i47 = int_add_ovf(i45, 42) + guard_no_overflow(descr=...) + setfield_gc(ConstPtr(ptr48), i47, descr=...) + i50 = getfield_gc_i(ConstPtr(ptr49), descr=...) + i52 = int_add_ovf(i50, 1) + guard_no_overflow(descr=...) + i54 = getfield_raw_i(..., descr=...) + setfield_gc(ConstPtr(ptr55), i52, descr=...) + i57 = int_lt(i54, 0) + guard_false(i57, descr=...) + jump(..., descr=...) + """) From pypy.commits at gmail.com Thu Dec 19 19:37:29 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:29 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib: make it possible to use __import__ directly, and use a dummy package to run tests instead of trying to import more expensive ones Message-ID: <5dfc17c9.1c69fb81.71a0d.a4d2@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib Changeset: r98332:c37f7da0a5ee Date: 2019-12-19 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/c37f7da0a5ee/ Log: make it possible to use __import__ directly, and use a dummy package to run tests instead of trying to import more expensive ones diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -6,7 +6,7 @@ @unwrap_spec(name='text0', level=int) def importhook(space, name, w_globals=None, - w_locals=None, w_fromlist=None, level=-1): + w_locals=None, w_fromlist=None, level=0): """ NOT_RPYTHON diff --git a/pypy/module/_dummy_importlib/test/dummypkg/__init__.py b/pypy/module/_dummy_importlib/test/dummypkg/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_dummy_importlib/test/dummypkg/__init__.py @@ -0,0 +1,1 @@ +FOO = 42 diff --git a/pypy/module/_dummy_importlib/test/dummypkg/mod.py b/pypy/module/_dummy_importlib/test/dummypkg/mod.py new file mode 100644 --- /dev/null +++ b/pypy/module/_dummy_importlib/test/dummypkg/mod.py @@ -0,0 +1,1 @@ +BAR = 43 diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py --- a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -1,5 +1,9 @@ +import py from pypy.tool.pytest.objspace import gettestobjspace +THISDIR = py.path.local(__file__).dirpath() + + def test_default_is_dummy_importlib(): space = gettestobjspace() assert space.config.objspace.usemodules._dummy_importlib @@ -12,14 +16,36 @@ class AppTestDummyImportlib: + def setup_method(self, meth): + space = self.space + self.w_thisidr = space.newtext(str(THISDIR)) + space.appexec([self.w_thisidr], """(dir): + import sys + sys.path.append(dir) + """) + + + def teardown_method(self, meth): + space = self.space + space.appexec([self.w_thisidr], """(dir): + import sys + if dir in sys.path: + sys.path.remove(dir) + """) + def test_import_builtin(self): import sys assert sys.__name__ == 'sys' + def test___import__(self): + import sys + sys2 = __import__('sys') + assert sys is sys2 + def test_import_lib_pypy(self): import _structseq assert hasattr(_structseq, 'structseq_new') def test_import_package(self): - import collections - assert hasattr(collections, 'namedtuple') + import dummypkg + assert dummypkg.FOO == 42 From pypy.commits at gmail.com Thu Dec 19 19:37:31 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:31 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib: tweaks Message-ID: <5dfc17cb.1c69fb81.721f4.d420@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib Changeset: r98333:0deaee5041b4 Date: 2019-12-19 14:12 +0100 http://bitbucket.org/pypy/pypy/changeset/0deaee5041b4/ Log: tweaks diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -1,8 +1,20 @@ import py -from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError from pypy.module.imp.importing import add_module, check_sys_modules_w +def raise_ImportError(space, modname): + err = """ + You are using _dummy_importlib: this is not supposed to be a + fully-compatible importing library, but it contains just enough logic to + run most of the tests. If you are experiencing problems with it, consider + adding more logic, or to switch to the fully-working _frozen_importlib by + adding this line to your AppTest class: + + spaceconfig = {'usemodules': ['_frozen_importlib']} + """ + raise OperationError(space.w_ImportError, space.newtext(modname + '\n' + err)) + @unwrap_spec(name='text0', level=int) def importhook(space, name, w_globals=None, @@ -31,18 +43,7 @@ if pydir.check(dir=True) and pyinit.check(file=True): return import_pyfile(space, name, pyinit) - - err = """ - You are using _dummy_importlib: this is not supposed to be a - fully-compatible importing library, but it contains just enough logic to - run most of the tests. If you are experiencing problems with it, consider - adding more logic, or to switch to the fully-working _frozen_importlib by - adding this line to your AppTest class: - - spaceconfig = {'usemodules': ['_frozen_importlib']} - """ - raise OperationError(space.w_ImportError, space.newtext(name + err)) -importhook = interp2app(importhook, app_name='__import__') + raise_ImportError(name) def import_pyfile(space, modulename, pyfile): diff --git a/pypy/module/_dummy_importlib/moduledef.py b/pypy/module/_dummy_importlib/moduledef.py --- a/pypy/module/_dummy_importlib/moduledef.py +++ b/pypy/module/_dummy_importlib/moduledef.py @@ -1,6 +1,8 @@ from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.gateway import interp2app from pypy.module._dummy_importlib import interp_import + class Module(MixedModule): interpleveldefs = { } @@ -11,7 +13,8 @@ def install(self): """NOT_RPYTHON""" super(Module, self).install() - self.w_import = self.space.wrap(interp_import.importhook) + self.w_import = self.space.wrap(interp2app(interp_import.importhook, + app_name = '__dummy_import__')) def startup(self, space): """Copy our __import__ to builtins.""" From pypy.commits at gmail.com Thu Dec 19 19:37:32 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:32 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib: add enough logic to import packages and modules inside packages Message-ID: <5dfc17cc.1c69fb81.94473.6de9@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib Changeset: r98334:98132b9252a9 Date: 2019-12-19 14:47 +0100 http://bitbucket.org/pypy/pypy/changeset/98132b9252a9/ Log: add enough logic to import packages and modules inside packages diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -16,8 +16,8 @@ raise OperationError(space.w_ImportError, space.newtext(modname + '\n' + err)) - at unwrap_spec(name='text0', level=int) -def importhook(space, name, w_globals=None, + at unwrap_spec(modname='text0', level=int) +def importhook(space, modname, w_globals=None, w_locals=None, w_fromlist=None, level=0): """ NOT_RPYTHON @@ -26,33 +26,63 @@ non-rpython tricks to implement it :) """ assert level == 0 - if name in space.builtin_modules: - return space.getbuiltinmodule(name) + if modname in space.builtin_modules: + return space.getbuiltinmodule(modname) w_path = space.sys.get('path') + parts = modname.split('.') + if parts[-1] == '': + del parts[-1] + + ## if modname == 'dummypkg.mod': + ## import pdb;pdb.set_trace() + w_mod = None + w_firstmod = None + for part in parts: + w_mod = load_part(space, part, w_mod) + if w_mod and w_firstmod is None: + w_firstmod = w_mod + + if w_mod is None: + raise_ImportError(space, modname) + return w_firstmod + +def load_part(space, modname, w_parent): + if w_parent is None: + w_path = space.sys.get('path') + else: + w_path = space.getattr(w_parent, space.newtext('__path__')) + # + w_mod = load_part_in_path(space, modname, w_path) + if w_mod is not None and w_parent is not None: + space.setattr(w_parent, space.newtext(modname), w_mod) + return w_mod + +def load_part_in_path(space, modname, w_path): for w_item in space.unpackiterable(w_path): item = space.fsdecode_w(w_item) d = py.path.local(item) # - pyfile = d.join(name + '.py') + pyfile = d.join(modname + '.py') if pyfile.check(file=True): - return import_pyfile(space, name, pyfile) + return load_pyfile(space, modname, pyfile) # - pydir = d.join(name) + pydir = d.join(modname) pyinit = pydir.join('__init__.py') if pydir.check(dir=True) and pyinit.check(file=True): - return import_pyfile(space, name, pyinit) + return load_pyfile(space, modname, pyinit, modpath=str(pydir)) + return None - raise_ImportError(name) - - -def import_pyfile(space, modulename, pyfile): +def load_pyfile(space, modname, pyfile, modpath=None): ec = space.getexecutioncontext() source = pyfile.read() code_w = ec.compiler.compile(source, str(pyfile), 'exec', 0) - w_mod = add_module(space, space.newtext(modulename)) + w_mod = add_module(space, space.newtext(modname)) space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod) space.setitem(w_mod.w_dict, space.newtext('__name__'), w_mod.w_name) + if modpath: + w_path = space.newlist([space.newtext(modpath)]) + space.setitem(w_mod.w_dict, space.newtext('__path__'), w_path) code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict) - assert check_sys_modules_w(space, modulename) + assert check_sys_modules_w(space, modname) return w_mod diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py --- a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -18,21 +18,23 @@ def setup_method(self, meth): space = self.space - self.w_thisidr = space.newtext(str(THISDIR)) - space.appexec([self.w_thisidr], """(dir): + self.w_thisdir = space.newtext(str(THISDIR)) + space.appexec([self.w_thisdir], """(dir): import sys sys.path.append(dir) """) - def teardown_method(self, meth): space = self.space - space.appexec([self.w_thisidr], """(dir): + space.appexec([self.w_thisdir], """(dir): import sys if dir in sys.path: sys.path.remove(dir) """) + def test_ImportError(self): + raises(ImportError, "import i_dont_exist") + def test_import_builtin(self): import sys assert sys.__name__ == 'sys' @@ -43,9 +45,21 @@ assert sys is sys2 def test_import_lib_pypy(self): + import sys import _structseq + assert _structseq.__name__ == '_structseq' assert hasattr(_structseq, 'structseq_new') + assert sys.modules['_structseq'] is _structseq def test_import_package(self): + import sys import dummypkg + assert dummypkg.__name__ == 'dummypkg' + assert dummypkg.__path__ == [self.thisdir + '/' + 'dummypkg'] assert dummypkg.FOO == 42 + assert sys.modules['dummypkg'] is dummypkg + + def test_import_package_dot_mod(self): + import dummypkg.mod + assert dummypkg.FOO == 42 + assert dummypkg.mod.BAR == 43 From pypy.commits at gmail.com Thu Dec 19 19:37:34 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:34 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib: import submodules with from ... import Message-ID: <5dfc17ce.1c69fb81.2e645.a1bc@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib Changeset: r98335:bc01efbafe27 Date: 2019-12-19 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/bc01efbafe27/ Log: import submodules with from ... import diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -15,6 +15,14 @@ """ raise OperationError(space.w_ImportError, space.newtext(modname + '\n' + err)) +def try_getattr(space, w_obj, w_name): + try: + return space.getattr(w_obj, w_name) + except OperationError: + # ugh, but blame CPython :-/ this is supposed to emulate + # hasattr, which eats all exceptions. + return None + @unwrap_spec(modname='text0', level=int) def importhook(space, modname, w_globals=None, @@ -25,7 +33,17 @@ This module is not meant to be translated. As such, we can use all sort of non-rpython tricks to implement it :) """ - assert level == 0 + if level == 0: + w_mod = _absolute_import(space, modname, w_fromlist) + else: + assert False + + if w_mod is None: + raise_ImportError(space, modname) + return w_mod + + +def _absolute_import(space, modname, w_fromlist): if modname in space.builtin_modules: return space.getbuiltinmodule(modname) @@ -34,8 +52,6 @@ if parts[-1] == '': del parts[-1] - ## if modname == 'dummypkg.mod': - ## import pdb;pdb.set_trace() w_mod = None w_firstmod = None for part in parts: @@ -44,7 +60,15 @@ w_firstmod = w_mod if w_mod is None: - raise_ImportError(space, modname) + return None + + if w_fromlist is not None: + for w_name in space.unpackiterable(w_fromlist): + if not try_getattr(space, w_mod, w_name): + # w_name does not exists in w_mod, so it must be a submodule + submodname = space.text0_w(w_name) + load_part(space, submodname, w_mod) + return w_firstmod def load_part(space, modname, w_parent): diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py --- a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -63,3 +63,11 @@ import dummypkg.mod assert dummypkg.FOO == 42 assert dummypkg.mod.BAR == 43 + + def test_from_import(self): + from dummypkg import FOO + assert FOO == 42 + + def test_from_import_mod(self): + from dummypkg import mod + assert mod.BAR == 43 From pypy.commits at gmail.com Thu Dec 19 19:37:36 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:36 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: try again with another approach Message-ID: <5dfc17d0.1c69fb81.a3697.92ab@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98336:8535b9830858 Date: 2019-12-20 01:02 +0100 http://bitbucket.org/pypy/pypy/changeset/8535b9830858/ Log: try again with another approach From pypy.commits at gmail.com Thu Dec 19 19:37:38 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:38 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: start a branch in which to introduce a _dummy_importlib module: currently, all applevel tests use _frozen_importlib for importing, but since it is written at applevel it's utterly slow. The goal is to write a minimal replacement which works for 90% of the cases and it's much faster, to speed up tests. For the remaining 10% of the cases, individual tests can specify to use _frozen_importlib explicitly Message-ID: <5dfc17d2.1c69fb81.ae2a.7810@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98337:ad9feca07a5b Date: 2019-12-18 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/ad9feca07a5b/ Log: start a branch in which to introduce a _dummy_importlib module: currently, all applevel tests use _frozen_importlib for importing, but since it is written at applevel it's utterly slow. The goal is to write a minimal replacement which works for 90% of the cases and it's much faster, to speed up tests. For the remaining 10% of the cases, individual tests can specify to use _frozen_importlib explicitly diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -14,9 +14,12 @@ and p.join('__init__.py').check() and not p.basename.startswith('test')] +# _dummy_importlib is automatically removed when you specify +# --importlib=_frozen_importlib, which is the default when translating essential_modules = set([ "exceptions", "_io", "sys", "builtins", "posix", "_warnings", - "itertools", "_frozen_importlib", "operator", "_locale", "struct", + "itertools", "operator", "_locale", "struct", + "_dummy_importlib", ]) if sys.platform == "win32": essential_modules.add("_winreg") @@ -103,6 +106,8 @@ 'cpyext': [('objspace.usemodules.array', True)], '_cppyy': [('objspace.usemodules.cpyext', True)], 'faulthandler': [('objspace.usemodules._vmprof', True)], + '_dummy_importlib': [('objspace.usemodules._frozen_importlib', False)], + '_frozen_importlib': [('objspace.usemodules._dummy_importlib', False)], } module_suggests = { # the reason you want _rawffi is for ctypes, which diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -268,6 +268,10 @@ # expose the following variables to ease debugging global space, entry_point + # the default is _dummy_importlib for tests, but we really want + # _frozen_importlib when translating + config.objspace.usemodules._frozen_importlib = True + if config.objspace.allworkingmodules: from pypy.config.pypyoption import enable_allworkingmodules enable_allworkingmodules(config) @@ -413,4 +417,3 @@ ns['get_gchooks'] = self.get_gchooks PyPyTarget().interface(globals()) - diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -671,7 +671,10 @@ self.fromcache(State).build_api() self.getbuiltinmodule('sys') self.getbuiltinmodule('_imp') - self.getbuiltinmodule('_frozen_importlib') + if self.config.objspace.usemodules._frozen_importlib: + self.getbuiltinmodule('_frozen_importlib') + if self.config.objspace.usemodules._dummy_importlib: + self.getbuiltinmodule('_dummy_importlib') self.getbuiltinmodule('builtins') for mod in self.builtin_modules.values(): mod.setup_after_space_initialization() diff --git a/pypy/module/_dummy_importlib/__init__.py b/pypy/module/_dummy_importlib/__init__.py new file mode 100644 diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py new file mode 100644 --- /dev/null +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -0,0 +1,10 @@ +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError + + + at unwrap_spec(name='text0', level=int) +def importhook(space, name, w_globals=None, + w_locals=None, w_fromlist=None, level=-1): + return space.w_None + +importhook = interp2app(importhook, app_name='__import__') diff --git a/pypy/module/_dummy_importlib/moduledef.py b/pypy/module/_dummy_importlib/moduledef.py new file mode 100644 --- /dev/null +++ b/pypy/module/_dummy_importlib/moduledef.py @@ -0,0 +1,20 @@ +from pypy.interpreter.mixedmodule import MixedModule +from pypy.module._dummy_importlib import interp_import + +class Module(MixedModule): + interpleveldefs = { + } + + appleveldefs = { + } + + def install(self): + """NOT_RPYTHON""" + super(Module, self).install() + self.w_import = self.space.wrap(interp_import.importhook) + + def startup(self, space): + """Copy our __import__ to builtins.""" + # use special module api to prevent a cell from being introduced + self.space.builtin.setdictvalue_dont_introduce_cell( + '__import__', self.w_import) diff --git a/pypy/module/_dummy_importlib/test/__init__.py b/pypy/module/_dummy_importlib/test/__init__.py new file mode 100644 diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py new file mode 100644 --- /dev/null +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -0,0 +1,10 @@ +from pypy.tool.pytest.objspace import gettestobjspace + +def test_default_is_dummy_importlib(): + space = gettestobjspace() + assert space.config.objspace.usemodules._dummy_importlib + assert not space.config.objspace.usemodules._frozen_importlib + # + space = gettestobjspace(usemodules=['_frozen_importlib']) + assert not space.config.objspace.usemodules._dummy_importlib + assert space.config.objspace.usemodules._frozen_importlib From pypy.commits at gmail.com Thu Dec 19 19:37:39 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:39 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: fix comment Message-ID: <5dfc17d3.1c69fb81.ef3af.b35c@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98338:5e84006ee933 Date: 2019-12-20 01:03 +0100 http://bitbucket.org/pypy/pypy/changeset/5e84006ee933/ Log: fix comment diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -14,8 +14,8 @@ and p.join('__init__.py').check() and not p.basename.startswith('test')] -# _dummy_importlib is automatically removed when you specify -# --importlib=_frozen_importlib, which is the default when translating +# _dummy_importlib is automatically removed when you specify enable +# _fronzen_importlib, which is the default when translating essential_modules = set([ "exceptions", "_io", "sys", "builtins", "posix", "_warnings", "itertools", "operator", "_locale", "struct", From pypy.commits at gmail.com Thu Dec 19 19:37:41 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:41 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: WIP: remove this file Message-ID: <5dfc17d5.1c69fb81.3649d.a227@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98339:fdb244352422 Date: 2019-12-20 01:05 +0100 http://bitbucket.org/pypy/pypy/changeset/fdb244352422/ Log: WIP: remove this file diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -1,10 +0,0 @@ -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError - - - at unwrap_spec(name='text0', level=int) -def importhook(space, name, w_globals=None, - w_locals=None, w_fromlist=None, level=-1): - return space.w_None - -importhook = interp2app(importhook, app_name='__import__') From pypy.commits at gmail.com Thu Dec 19 19:37:43 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:43 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: copy the content of pypy/module/imp/importing.py from default (rev 2e8c4536e416) Message-ID: <5dfc17d7.1c69fb81.224cf.8a8c@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98340:e566fbc85941 Date: 2019-12-20 01:07 +0100 http://bitbucket.org/pypy/pypy/changeset/e566fbc85941/ Log: copy the content of pypy/module/imp/importing.py from default (rev 2e8c4536e416) diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -0,0 +1,1153 @@ +""" +Implementation of the interpreter-level default import logic. +""" + +import sys, os, stat + +from pypy.interpreter.module import Module +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef, generic_new_descr +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.baseobjspace import W_Root, CannotHaveLock +from pypy.interpreter.eval import Code +from pypy.interpreter.pycode import PyCode +from pypy.interpreter.streamutil import wrap_streamerror +from rpython.rlib import streamio, jit +from rpython.rlib.streamio import StreamErrors +from rpython.rlib.objectmodel import we_are_translated, specialize +from pypy.module.sys.version import PYPY_VERSION + +_WIN32 = sys.platform == 'win32' + +SEARCH_ERROR = 0 +PY_SOURCE = 1 +PY_COMPILED = 2 +C_EXTENSION = 3 +# PY_RESOURCE = 4 +PKG_DIRECTORY = 5 +C_BUILTIN = 6 +PY_FROZEN = 7 +# PY_CODERESOURCE = 8 +IMP_HOOK = 9 + +SO = '.pyd' if _WIN32 else '.so' + +# Be careful update when changing this: it is now used for both cpyext +# and cffi so's. If we do have to update it, we'd likely need a way to +# split the two usages again. +DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] + + at specialize.memo() +def get_so_extension(space): + if space.config.objspace.soabi is not None: + soabi = space.config.objspace.soabi + else: + soabi = DEFAULT_SOABI + + if not soabi: + return SO + + if not space.config.translating: + soabi += 'i' + + return '.' + soabi + SO + +def log_pyverbose(space, level, message): + if space.sys.w_initialdict is None: + return # sys module not initialised, avoid recursion + verbose = space.sys.get_flag('verbose') + if verbose >= level: + w_stderr = space.sys.get('stderr') + space.call_method(w_stderr, "write", space.newtext(message)) + +def file_exists(path): + "Test whether the given path is an existing regular file." + return os.path.isfile(path) and case_ok(path) + +def path_exists(path): + "Test whether the given path exists." + return os.path.exists(path) and case_ok(path) + +def has_so_extension(space): + return (space.config.objspace.usemodules.cpyext or + space.config.objspace.usemodules._cffi_backend) + +def has_init_module(space, filepart): + "Return True if the directory filepart qualifies as a package." + init = os.path.join(filepart, "__init__") + if path_exists(init + ".py"): + return True + if space.config.objspace.lonepycfiles and path_exists(init + ".pyc"): + return True + return False + +def find_modtype(space, filepart): + """Check which kind of module to import for the given filepart, + which is a path without extension. Returns PY_SOURCE, PY_COMPILED or + SEARCH_ERROR. + """ + # check the .py file + pyfile = filepart + ".py" + if file_exists(pyfile): + return PY_SOURCE, ".py", "U" + + # on Windows, also check for a .pyw file + if _WIN32: + pyfile = filepart + ".pyw" + if file_exists(pyfile): + return PY_SOURCE, ".pyw", "U" + + # The .py file does not exist. By default on PyPy, lonepycfiles + # is False: if a .py file does not exist, we don't even try to + # look for a lone .pyc file. + # The "imp" module does not respect this, and is allowed to find + # lone .pyc files. + # check the .pyc file + if space.config.objspace.lonepycfiles: + pycfile = filepart + ".pyc" + if file_exists(pycfile): + # existing .pyc file + return PY_COMPILED, ".pyc", "rb" + + if has_so_extension(space): + so_extension = get_so_extension(space) + pydfile = filepart + so_extension + if file_exists(pydfile): + return C_EXTENSION, so_extension, "rb" + + return SEARCH_ERROR, None, None + +if sys.platform.startswith('linux') or 'freebsd' in sys.platform: + def case_ok(filename): + return True +else: + # XXX that's slow + def case_ok(filename): + index = filename.rfind(os.sep) + if os.altsep is not None: + index2 = filename.rfind(os.altsep) + index = max(index, index2) + if index < 0: + directory = os.curdir + else: + directory = filename[:index+1] + filename = filename[index+1:] + try: + return filename in os.listdir(directory) + except OSError: + return False + +def try_getattr(space, w_obj, w_name): + try: + return space.getattr(w_obj, w_name) + except OperationError: + # ugh, but blame CPython :-/ this is supposed to emulate + # hasattr, which eats all exceptions. + return None + +def check_sys_modules(space, w_modulename): + return space.finditem(space.sys.get('modules'), w_modulename) + +def check_sys_modules_w(space, modulename): + return space.finditem_str(space.sys.get('modules'), modulename) + + at jit.elidable +def _get_dot_position(str, n): + # return the index in str of the '.' such that there are n '.'-separated + # strings after it + result = len(str) + while n > 0 and result >= 0: + n -= 1 + result = str.rfind('.', 0, result) + return result + +def _get_relative_name(space, modulename, level, w_globals): + ctxt_w_package = space.finditem_str(w_globals, '__package__') + ctxt_w_package = jit.promote(ctxt_w_package) + level = jit.promote(level) + + ctxt_package = None + if ctxt_w_package is not None and ctxt_w_package is not space.w_None: + try: + ctxt_package = space.text0_w(ctxt_w_package) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + raise oefmt(space.w_ValueError, "__package__ set to non-string") + + if ctxt_package is not None: + # __package__ is set, so use it + if ctxt_package == '' and level < 0: + return None, 0 + + dot_position = _get_dot_position(ctxt_package, level - 1) + if dot_position < 0: + if len(ctxt_package) == 0: + where = "in non-package" + else: + where = "beyond toplevel package" + raise oefmt(space.w_ValueError, + "Attempted relative import %s", where) + + # Try to import parent package + try: + absolute_import(space, ctxt_package, 0, None, tentative=False) + except OperationError as e: + if not e.match(space, space.w_ImportError): + raise + if level > 0: + raise oefmt(space.w_SystemError, + "Parent module '%s' not loaded, cannot perform " + "relative import", ctxt_package) + else: + msg = ("Parent module '%s' not found while handling absolute " + "import" % ctxt_package) + space.warn(space.newtext(msg), space.w_RuntimeWarning) + + rel_modulename = ctxt_package[:dot_position] + rel_level = rel_modulename.count('.') + 1 + if modulename: + rel_modulename += '.' + modulename + else: + # __package__ not set, so figure it out and set it + ctxt_w_name = space.finditem_str(w_globals, '__name__') + ctxt_w_path = space.finditem_str(w_globals, '__path__') + + ctxt_w_name = jit.promote(ctxt_w_name) + ctxt_name = None + if ctxt_w_name is not None: + try: + ctxt_name = space.text0_w(ctxt_w_name) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + + if not ctxt_name: + return None, 0 + + m = max(level - 1, 0) + if ctxt_w_path is None: # plain module + m += 1 + dot_position = _get_dot_position(ctxt_name, m) + if dot_position < 0: + if level > 0: + raise oefmt(space.w_ValueError, + "Attempted relative import in non-package") + rel_modulename = '' + rel_level = 0 + else: + rel_modulename = ctxt_name[:dot_position] + rel_level = rel_modulename.count('.') + 1 + + if ctxt_w_path is not None: + # __path__ is set, so __name__ is already the package name + space.setitem(w_globals, space.newtext("__package__"), ctxt_w_name) + else: + # Normal module, so work out the package name if any + last_dot_position = ctxt_name.rfind('.') + if last_dot_position < 0: + space.setitem(w_globals, space.newtext("__package__"), space.w_None) + else: + space.setitem(w_globals, space.newtext("__package__"), + space.newtext(ctxt_name[:last_dot_position])) + + if modulename: + if rel_modulename: + rel_modulename += '.' + modulename + else: + rel_modulename = modulename + + return rel_modulename, rel_level + + + at unwrap_spec(name='text0', level=int) +def importhook(space, name, w_globals=None, + w_locals=None, w_fromlist=None, level=-1): + modulename = name + if not modulename and level < 0: + raise oefmt(space.w_ValueError, "Empty module name") + + if w_fromlist is not None and not space.is_true(w_fromlist): + w_fromlist = None + + rel_modulename = None + if (level != 0 and w_globals is not None and + space.isinstance_w(w_globals, space.w_dict)): + rel_modulename, rel_level = _get_relative_name(space, modulename, level, + w_globals) + if rel_modulename: + # if no level was set, ignore import errors, and + # fall back to absolute import at the end of the + # function. + if level == -1: + # This check is a fast path to avoid redoing the + # following absolute_import() in the common case + w_mod = check_sys_modules_w(space, rel_modulename) + if w_mod is not None and space.is_w(w_mod, space.w_None): + # if we already find space.w_None, it means that we + # already tried and failed and fell back to the + # end of this function. + w_mod = None + else: + w_mod = absolute_import(space, rel_modulename, rel_level, + w_fromlist, tentative=True) + else: + w_mod = absolute_import(space, rel_modulename, rel_level, + w_fromlist, tentative=False) + if w_mod is not None: + return w_mod + + w_mod = absolute_import(space, modulename, 0, w_fromlist, tentative=0) + if rel_modulename is not None: + space.setitem(space.sys.get('modules'), space.newtext(rel_modulename), space.w_None) + return w_mod + +def absolute_import(space, modulename, baselevel, w_fromlist, tentative): + # Short path: check in sys.modules, but only if there is no conflict + # on the import lock. In the situation of 'import' statements + # inside tight loops, this should be true, and absolute_import_try() + # should be followed by the JIT and turned into not much code. But + # if the import lock is currently held by another thread, then we + # have to wait, and so shouldn't use the fast path. + if not getimportlock(space).lock_held_by_someone_else(): + w_mod = absolute_import_try(space, modulename, baselevel, w_fromlist) + if w_mod is not None and not space.is_w(w_mod, space.w_None): + return w_mod + return absolute_import_with_lock(space, modulename, baselevel, + w_fromlist, tentative) + + at jit.dont_look_inside +def absolute_import_with_lock(space, modulename, baselevel, + w_fromlist, tentative): + lock = getimportlock(space) + lock.acquire_lock() + try: + return _absolute_import(space, modulename, baselevel, + w_fromlist, tentative) + finally: + lock.release_lock(silent_after_fork=True) + + at jit.unroll_safe +def absolute_import_try(space, modulename, baselevel, w_fromlist): + """ Only look up sys.modules, not actually try to load anything + """ + w_path = None + last_dot = 0 + if '.' not in modulename: + w_mod = check_sys_modules_w(space, modulename) + first = w_mod + if w_fromlist is not None and w_mod is not None: + w_path = try_getattr(space, w_mod, space.newtext('__path__')) + else: + level = 0 + first = None + while last_dot >= 0: + last_dot = modulename.find('.', last_dot + 1) + if last_dot < 0: + w_mod = check_sys_modules_w(space, modulename) + else: + w_mod = check_sys_modules_w(space, modulename[:last_dot]) + if w_mod is None or space.is_w(w_mod, space.w_None): + return None + if level == baselevel: + first = w_mod + if w_fromlist is not None: + w_path = try_getattr(space, w_mod, space.newtext('__path__')) + level += 1 + if w_fromlist is not None: + # bit artificial code but important to not just unwrap w_fromlist + # to get a better trace. if it is unwrapped, the immutability of the + # tuple is lost + length = space.len_w(w_fromlist) + if w_path is not None: + if length == 1 and space.eq_w( + space.getitem(w_fromlist, space.newint(0)), + space.newtext('*')): + w_all = try_getattr(space, w_mod, space.newtext('__all__')) + if w_all is not None: + w_fromlist = w_all + length = space.len_w(w_fromlist) + else: + w_fromlist = None + # "from x import *" with x already imported and no x.__all__ + # always succeeds without doing more imports. It will + # just copy everything from x.__dict__ as it is now. + + if w_fromlist is not None: + for i in range(length): + w_name = space.getitem(w_fromlist, space.newint(i)) + if not space.isinstance_w(w_name, space.w_text): + raise oefmt(space.w_TypeError, + "'Item in ``fromlist'' must be str, not %T", w_name) + if try_getattr(space, w_mod, w_name) is None: + return None + return w_mod + return first + +def _absolute_import(space, modulename, baselevel, w_fromlist, tentative): + if '/' in modulename or '\\' in modulename: + raise oefmt(space.w_ImportError, + "Import by filename is not supported.") + + w_mod = None + parts = modulename.split('.') + if parts[-1] == '': + del parts[-1] + prefix = [] + w_path = None + + first = None + level = 0 + + for part in parts: + w_mod = load_part(space, w_path, prefix, part, w_mod, + tentative=tentative) + if w_mod is None: + return None + + if baselevel == level: + first = w_mod + tentative = 0 + prefix.append(part) + w_path = try_getattr(space, w_mod, space.newtext('__path__')) + level += 1 + + if w_fromlist is not None: + if w_path is not None: + length = space.len_w(w_fromlist) + if length == 1 and space.eq_w( + space.getitem(w_fromlist, space.newint(0)), + space.newtext('*')): + w_all = try_getattr(space, w_mod, space.newtext('__all__')) + if w_all is not None: + w_fromlist = w_all + length = space.len_w(w_fromlist) + else: + w_fromlist = None + if w_fromlist is not None: + for i in range(length): + w_name = space.getitem(w_fromlist, space.newint(i)) + if not space.isinstance_w(w_name, space.w_text): + raise oefmt(space.w_TypeError, + "'Item in ``fromlist'' must be str, not %T", w_name) + if try_getattr(space, w_mod, w_name) is None: + load_part(space, w_path, prefix, space.text0_w(w_name), + w_mod, tentative=1) + return w_mod + else: + return first + +def find_in_meta_path(space, w_modulename, w_path): + assert w_modulename is not None + if w_path is None: + w_path = space.w_None + for w_hook in space.unpackiterable(space.sys.get("meta_path")): + w_loader = space.call_method(w_hook, "find_module", + w_modulename, w_path) + if space.is_true(w_loader): + return w_loader + +def _getimporter(space, w_pathitem): + # 'imp._getimporter' is somewhat like CPython's get_path_importer + w_path_importer_cache = space.sys.get("path_importer_cache") + w_importer = space.finditem(w_path_importer_cache, w_pathitem) + if w_importer is None: + space.setitem(w_path_importer_cache, w_pathitem, space.w_None) + for w_hook in space.unpackiterable(space.sys.get("path_hooks")): + w_pathbytes = w_pathitem + if space.isinstance_w(w_pathitem, space.w_unicode): + from pypy.module.sys.interp_encoding import getfilesystemencoding + w_pathbytes = space.call_method(space.w_unicode, 'encode', + w_pathitem, getfilesystemencoding(space)) + try: + w_importer = space.call_function(w_hook, w_pathbytes) + except OperationError as e: + if not e.match(space, space.w_ImportError): + raise + else: + break + if w_importer is None: + try: + w_importer = space.call_function( + space.gettypefor(W_NullImporter), w_pathitem + ) + except OperationError as e: + if e.match(space, space.w_ImportError): + return None + raise + if space.is_true(w_importer): + space.setitem(w_path_importer_cache, w_pathitem, w_importer) + return w_importer + +def find_in_path_hooks(space, w_modulename, w_pathitem): + w_importer = _getimporter(space, w_pathitem) + if w_importer is not None and space.is_true(w_importer): + try: + w_loader = space.call_method(w_importer, "find_module", w_modulename) + except OperationError as e: + if e.match(space, space.w_ImportError): + return None + raise + if space.is_true(w_loader): + return w_loader + + +class W_NullImporter(W_Root): + def __init__(self, space): + pass + + @unwrap_spec(path='fsencode') + def descr_init(self, space, path): + if not path: + raise oefmt(space.w_ImportError, "empty pathname") + + # Directory should not exist + try: + st = os.stat(path) + except OSError: + pass + else: + if stat.S_ISDIR(st.st_mode): + raise oefmt(space.w_ImportError, "existing directory") + + def find_module_w(self, space, __args__): + return space.w_None + +W_NullImporter.typedef = TypeDef( + 'imp.NullImporter', + __new__=generic_new_descr(W_NullImporter), + __init__=interp2app(W_NullImporter.descr_init), + find_module=interp2app(W_NullImporter.find_module_w), + ) + +class FindInfo: + def __init__(self, modtype, filename, stream, + suffix="", filemode="", w_loader=None): + self.modtype = modtype + self.filename = filename + self.stream = stream + self.suffix = suffix + self.filemode = filemode + self.w_loader = w_loader + + @staticmethod + def fromLoader(w_loader): + return FindInfo(IMP_HOOK, '', None, w_loader=w_loader) + +def find_module(space, modulename, w_modulename, partname, w_path, + use_loader=True): + # Examin importhooks (PEP302) before doing the import + if use_loader: + w_loader = find_in_meta_path(space, w_modulename, w_path) + if w_loader: + return FindInfo.fromLoader(w_loader) + + # XXX Check for frozen modules? + # when w_path is a string + + delayed_builtin = None + w_lib_extensions = None + + if w_path is None: + # check the builtin modules + if modulename in space.builtin_modules: + delayed_builtin = FindInfo(C_BUILTIN, modulename, None) + # a "real builtin module xx" shadows every file "xx.py" there + # could possibly be; a "pseudo-extension module" does not, and + # is only loaded at the point in sys.path where we find + # '.../lib_pypy/__extensions__'. + if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + return delayed_builtin + w_lib_extensions = space.sys.get_state(space).w_lib_extensions + w_path = space.sys.get('path') + + # XXX check frozen modules? + # when w_path is null + + if w_path is not None: + for w_pathitem in space.unpackiterable(w_path): + # sys.path_hooks import hook + if (w_lib_extensions is not None and + space.eq_w(w_pathitem, w_lib_extensions)): + return delayed_builtin + if use_loader: + w_loader = find_in_path_hooks(space, w_modulename, w_pathitem) + if w_loader: + return FindInfo.fromLoader(w_loader) + + path = space.fsencode_w(w_pathitem) + filepart = os.path.join(path, partname) + log_pyverbose(space, 2, "# trying %s\n" % (filepart,)) + if os.path.isdir(filepart) and case_ok(filepart): + if has_init_module(space, filepart): + return FindInfo(PKG_DIRECTORY, filepart, None) + else: + msg = ("Not importing directory '%s' missing __init__.py" % + (filepart,)) + space.warn(space.newtext(msg), space.w_ImportWarning) + modtype, suffix, filemode = find_modtype(space, filepart) + try: + if modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION): + assert suffix is not None + filename = filepart + suffix + stream = streamio.open_file_as_stream(filename, filemode) + try: + return FindInfo(modtype, filename, stream, suffix, filemode) + except: + stream.close() + raise + except StreamErrors: + pass # XXX! must not eat all exceptions, e.g. + # Out of file descriptors. + + # not found + return delayed_builtin + +def _prepare_module(space, w_mod, filename, pkgdir): + space.sys.setmodule(w_mod) + space.setattr(w_mod, space.newtext('__file__'), space.newtext(filename)) + space.setattr(w_mod, space.newtext('__doc__'), space.w_None) + if pkgdir is not None: + space.setattr(w_mod, space.newtext('__path__'), space.newlist([space.newtext(pkgdir)])) + +def add_module(space, w_name): + w_mod = check_sys_modules(space, w_name) + if w_mod is None: + w_mod = Module(space, w_name) + space.sys.setmodule(w_mod) + return w_mod + +def load_c_extension(space, filename, modulename): + from pypy.module.cpyext.api import load_extension_module + log_pyverbose(space, 1, "import %s # from %s\n" % + (modulename, filename)) + return load_extension_module(space, filename, modulename) + # NB. cpyext.api.load_extension_module() can also delegate to _cffi_backend + + at jit.dont_look_inside +def load_module(space, w_modulename, find_info, reuse=False): + """Like load_module() in CPython's import.c, this will normally + make a module object, store it in sys.modules, execute code in it, + and then fetch it again from sys.modules. But this logic is not + used if we're calling a PEP302 loader. + """ + if find_info is None: + return + + if find_info.w_loader: + return space.call_method(find_info.w_loader, "load_module", w_modulename) + + if find_info.modtype == C_BUILTIN: + return space.getbuiltinmodule(find_info.filename, force_init=True, + reuse=reuse) + + if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): + w_mod = None + if reuse: + try: + w_mod = space.getitem(space.sys.get('modules'), w_modulename) + except OperationError as oe: + if not oe.match(space, space.w_KeyError): + raise + if w_mod is None: + w_mod = Module(space, w_modulename) + if find_info.modtype == PKG_DIRECTORY: + pkgdir = find_info.filename + else: + pkgdir = None + _prepare_module(space, w_mod, find_info.filename, pkgdir) + + try: + if find_info.modtype == PY_SOURCE: + return load_source_module( + space, w_modulename, w_mod, + find_info.filename, _wrap_readall(space, find_info.stream), + find_info.stream.try_to_find_file_descriptor()) + elif find_info.modtype == PY_COMPILED: + magic = _wrap_r_long(space, find_info.stream) + timestamp = _wrap_r_long(space, find_info.stream) + return load_compiled_module(space, w_modulename, w_mod, find_info.filename, + magic, timestamp, + _wrap_readall(space, find_info.stream)) + elif find_info.modtype == PKG_DIRECTORY: + w_path = space.newlist([space.newtext(find_info.filename)]) + space.setattr(w_mod, space.newtext('__path__'), w_path) + find_info = find_module(space, "__init__", None, "__init__", + w_path, use_loader=False) + if find_info is None: + return w_mod + try: + w_mod = load_module(space, w_modulename, find_info, + reuse=True) + finally: + _close_ignore(find_info.stream) + return w_mod + elif find_info.modtype == C_EXTENSION and has_so_extension(space): + return load_c_extension(space, find_info.filename, + space.text_w(w_modulename)) + except OperationError: + w_mods = space.sys.get('modules') + space.call_method(w_mods, 'pop', w_modulename, space.w_None) + raise + +def load_part(space, w_path, prefix, partname, w_parent, tentative): + modulename = '.'.join(prefix + [partname]) + w_modulename = space.newtext(modulename) + w_mod = check_sys_modules(space, w_modulename) + + if w_mod is not None: + if not space.is_w(w_mod, space.w_None): + return w_mod + elif not prefix or w_path is not None: + find_info = find_module( + space, modulename, w_modulename, partname, w_path) + + try: + if find_info: + w_mod = load_module(space, w_modulename, find_info) + if w_parent is not None: + space.setattr(w_parent, space.newtext(partname), w_mod) + return w_mod + finally: + if find_info: + stream = find_info.stream + if stream: + _close_ignore(stream) + + if tentative: + return None + else: + # ImportError + raise oefmt(space.w_ImportError, "No module named %s", modulename) + + at jit.dont_look_inside +def reload(space, w_module): + """Reload the module. + The module must have been successfully imported before.""" + if not space.isinstance_w(w_module, space.type(space.sys)): + raise oefmt(space.w_TypeError, "reload() argument must be module") + + w_modulename = space.getattr(w_module, space.newtext("__name__")) + modulename = space.text0_w(w_modulename) + if not space.is_w(check_sys_modules(space, w_modulename), w_module): + raise oefmt(space.w_ImportError, + "reload(): module %s not in sys.modules", modulename) + + try: + w_mod = space.reloading_modules[modulename] + # Due to a recursive reload, this module is already being reloaded. + return w_mod + except KeyError: + pass + + space.reloading_modules[modulename] = w_module + try: + namepath = modulename.split('.') + subname = namepath[-1] + parent_name = '.'.join(namepath[:-1]) + if parent_name: + w_parent = check_sys_modules_w(space, parent_name) + if w_parent is None: + raise oefmt(space.w_ImportError, + "reload(): parent %s not in sys.modules", + parent_name) + w_path = space.getattr(w_parent, space.newtext("__path__")) + else: + w_path = None + + find_info = find_module( + space, modulename, w_modulename, subname, w_path) + + if not find_info: + # ImportError + raise oefmt(space.w_ImportError, "No module named %s", modulename) + + try: + try: + return load_module(space, w_modulename, find_info, reuse=True) + finally: + if find_info.stream: + _wrap_close(space, find_info.stream) + except: + # load_module probably removed name from modules because of + # the error. Put back the original module object. + space.sys.setmodule(w_module) + raise + finally: + del space.reloading_modules[modulename] + + +# __________________________________________________________________ +# +# import lock, to prevent two threads from running module-level code in +# parallel. This behavior is more or less part of the language specs, +# as an attempt to avoid failure of 'from x import y' if module x is +# still being executed in another thread. + +# This logic is tested in pypy.module.thread.test.test_import_lock. + +class ImportRLock: + + def __init__(self, space): + self.space = space + self.lock = None + self.lockowner = None + self.lockcounter = 0 + + def lock_held_by_someone_else(self): + me = self.space.getexecutioncontext() # used as thread ident + return self.lockowner is not None and self.lockowner is not me + + def lock_held_by_anyone(self): + return self.lockowner is not None + + def acquire_lock(self): + # this function runs with the GIL acquired so there is no race + # condition in the creation of the lock + if self.lock is None: + try: + self.lock = self.space.allocate_lock() + except CannotHaveLock: + return + me = self.space.getexecutioncontext() # used as thread ident + if self.lockowner is me: + pass # already acquired by the current thread + else: + self.lock.acquire(True) + assert self.lockowner is None + assert self.lockcounter == 0 + self.lockowner = me + self.lockcounter += 1 + + def release_lock(self, silent_after_fork): + me = self.space.getexecutioncontext() # used as thread ident + if self.lockowner is not me: + if self.lockowner is None and silent_after_fork: + # Too bad. This situation can occur if a fork() occurred + # with the import lock held, and we're the child. + return + if self.lock is None: # CannotHaveLock occurred + return + space = self.space + raise oefmt(space.w_RuntimeError, "not holding the import lock") + assert self.lockcounter > 0 + self.lockcounter -= 1 + if self.lockcounter == 0: + self.lockowner = None + self.lock.release() + + def reinit_lock(self): + # Called after fork() to ensure that newly created child + # processes do not share locks with the parent + self.lock = None + self.lockowner = None + self.lockcounter = 0 + +def getimportlock(space): + return space.fromcache(ImportRLock) + +# __________________________________________________________________ +# +# .pyc file support + +""" + Magic word to reject .pyc files generated by other Python versions. + It should change for each incompatible change to the bytecode. + + The value of CR and LF is incorporated so if you ever read or write + a .pyc file in text mode the magic number will be wrong; also, the + Apple MPW compiler swaps their values, botching string constants. + + CPython uses values between 20121 - 62xxx + +""" + +# picking a magic number is a mess. So far it works because we +# have only one extra opcode which might or might not be present. +# CPython leaves a gap of 10 when it increases its own magic number. +# To avoid assigning exactly the same numbers as CPython, we can pick +# any number between CPython + 2 and CPython + 9. Right now, +# default_magic = CPython + 7. +# +# CPython + 0 -- used by CPython without the -U option +# CPython + 1 -- used by CPython with the -U option +# CPython + 7 = default_magic -- used by PyPy (incompatible!) +# +from pypy.interpreter.pycode import default_magic +MARSHAL_VERSION_FOR_PYC = 2 + +def get_pyc_magic(space): + # XXX CPython testing hack: delegate to the real imp.get_magic + if not we_are_translated(): + if '__pypy__' not in space.builtin_modules: + import struct + magic = __import__('imp').get_magic() + return struct.unpack('= 2: + code_w.remove_docstrings(space) + + update_code_filenames(space, code_w, pathname) + return exec_code_module(space, w_mod, code_w, w_modulename, + check_afterwards=check_afterwards) + +def update_code_filenames(space, code_w, pathname, oldname=None): + assert isinstance(code_w, PyCode) + if oldname is None: + oldname = code_w.co_filename + elif code_w.co_filename != oldname: + return + + code_w.co_filename = pathname + constants = code_w.co_consts_w + for const in constants: + if const is not None and isinstance(const, PyCode): + update_code_filenames(space, const, pathname, oldname) + +def _get_long(s): + a = ord(s[0]) + b = ord(s[1]) + c = ord(s[2]) + d = ord(s[3]) + if d >= 0x80: + d -= 0x100 + return a | (b<<8) | (c<<16) | (d<<24) + +def _read_n(stream, n): + buf = '' + while len(buf) < n: + data = stream.read(n - len(buf)) + if not data: + raise streamio.StreamError("end of file") + buf += data + return buf + +def _r_long(stream): + s = _read_n(stream, 4) + return _get_long(s) + +def _w_long(stream, x): + a = x & 0xff + x >>= 8 + b = x & 0xff + x >>= 8 + c = x & 0xff + x >>= 8 + d = x & 0xff + stream.write(chr(a) + chr(b) + chr(c) + chr(d)) + +def _wrap_r_long(space, stream): + """like _r_long(), but raising app-level exceptions""" + try: + return _r_long(stream) + except StreamErrors as e: + raise wrap_streamerror(space, e) + +def _wrap_readall(space, stream): + """stream.readall(), but raising app-level exceptions""" + try: + return stream.readall() + except StreamErrors as e: + raise wrap_streamerror(space, e) + +def _wrap_close(space, stream): + """stream.close(), but raising app-level exceptions""" + try: + stream.close() + except StreamErrors as e: + raise wrap_streamerror(space, e) + +def _close_ignore(stream): + """stream.close(), but ignoring any stream exception""" + try: + stream.close() + except StreamErrors as e: + pass + + +def check_compiled_module(space, pycfilename, expected_mtime): + """ + Check if a pyc file's magic number and mtime match. + """ + stream = None + try: + stream = streamio.open_file_as_stream(pycfilename, "rb") + magic = _r_long(stream) + if magic != get_pyc_magic(space): + stream.close() + return None + pyc_mtime = _r_long(stream) + if pyc_mtime != expected_mtime: + stream.close() + return None + return stream + except StreamErrors: + if stream: + _close_ignore(stream) + return None # XXX! must not eat all exceptions, e.g. + # Out of file descriptors. + +def read_compiled_module(space, cpathname, strbuf): + """ Read a code object from a file and check it for validity """ + + w_marshal = space.getbuiltinmodule('marshal') + w_code = space.call_method(w_marshal, 'loads', space.newbytes(strbuf)) + if not isinstance(w_code, Code): + raise oefmt(space.w_ImportError, "Non-code object in %s", cpathname) + return w_code + + at jit.dont_look_inside +def load_compiled_module(space, w_modulename, w_mod, cpathname, magic, + timestamp, source, check_afterwards=True): + """ + Load a module from a compiled file and execute it. Returns + 'sys.modules[modulename]', which must exist. + """ + log_pyverbose(space, 1, "import %s # compiled from %s\n" % + (space.text_w(w_modulename), cpathname)) + + if magic != get_pyc_magic(space): + raise oefmt(space.w_ImportError, "Bad magic number in %s", cpathname) + #print "loading pyc file:", cpathname + code_w = read_compiled_module(space, cpathname, source) + try: + optimize = space.sys.get_flag('optimize') + except RuntimeError: + # during bootstrapping + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + + return exec_code_module(space, w_mod, code_w, w_modulename, + check_afterwards=check_afterwards) + +def open_exclusive(space, cpathname, mode): + try: + os.unlink(cpathname) + except OSError: + pass + + flags = (os.O_EXCL|os.O_CREAT|os.O_WRONLY|os.O_TRUNC| + streamio.O_BINARY) + fd = os.open(cpathname, flags, mode) + return streamio.fdopen_as_stream(fd, "wb") + +def write_compiled_module(space, co, cpathname, src_mode, src_mtime): + """ + Write a compiled module to a file, placing the time of last + modification of its source into the header. + Errors are ignored, if a write error occurs an attempt is made to + remove the file. + """ + w_marshal = space.getbuiltinmodule('marshal') + try: + w_str = space.call_method(w_marshal, 'dumps', co, + space.newint(MARSHAL_VERSION_FOR_PYC)) + strbuf = space.text_w(w_str) + except OperationError as e: + if e.async(space): + raise + #print "Problem while marshalling %s, skipping" % cpathname + return + # + # Careful here: we must not crash nor leave behind something that looks + # too much like a valid pyc file but really isn't one. + # + mode = src_mode & ~0111 + try: + stream = open_exclusive(space, cpathname, mode) + except (OSError, StreamErrors): + try: + os.unlink(cpathname) + except OSError: + pass + return + + try: + try: + # will patch the header later; write zeroes until we are sure that + # the rest of the file is valid + _w_long(stream, 0) # pyc_magic + _w_long(stream, 0) # mtime + stream.write(strbuf) + + # should be ok (XXX or should call os.fsync() to be sure?) + stream.seek(0, 0) + _w_long(stream, get_pyc_magic(space)) + _w_long(stream, src_mtime) + finally: + stream.close() + except StreamErrors: + try: + os.unlink(cpathname) + except OSError: + pass From pypy.commits at gmail.com Thu Dec 19 19:37:44 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:44 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: tweak the code until it works: remove the references to lonepycfiles and make MODULES_THAT_ALWAYS_SHADOW a global var since it's no longer on the space Message-ID: <5dfc17d8.1c69fb81.c0efd.8090@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98341:37239125314a Date: 2019-12-20 01:19 +0100 http://bitbucket.org/pypy/pypy/changeset/37239125314a/ Log: tweak the code until it works: remove the references to lonepycfiles and make MODULES_THAT_ALWAYS_SHADOW a global var since it's no longer on the space diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -1,5 +1,10 @@ """ -Implementation of the interpreter-level default import logic. +This is mostly a copy&paste from pypy/module/imp/importing.py in the +default branch, adapted to work on pypy3. This module is NOT meant to be +translated and probably the logic is slightly different than the real logic +needed for Python3. However, since it is written at interp-level, it is much +faster than _frozen_importlib which is written at applevel, which makes +running tests much faster. """ import sys, os, stat @@ -37,6 +42,15 @@ # split the two usages again. DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] +# on default this is an attribute of space defined in baseobjspace.py: here +# it's no longer on the space, we we copied&pasted this from baseobjspace +MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([ + '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings', + '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal', + 'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport', +], None) + + @specialize.memo() def get_so_extension(space): if space.config.objspace.soabi is not None: @@ -77,8 +91,6 @@ init = os.path.join(filepart, "__init__") if path_exists(init + ".py"): return True - if space.config.objspace.lonepycfiles and path_exists(init + ".pyc"): - return True return False def find_modtype(space, filepart): @@ -97,18 +109,6 @@ if file_exists(pyfile): return PY_SOURCE, ".pyw", "U" - # The .py file does not exist. By default on PyPy, lonepycfiles - # is False: if a .py file does not exist, we don't even try to - # look for a lone .pyc file. - # The "imp" module does not respect this, and is allowed to find - # lone .pyc files. - # check the .pyc file - if space.config.objspace.lonepycfiles: - pycfile = filepart + ".pyc" - if file_exists(pycfile): - # existing .pyc file - return PY_COMPILED, ".pyc", "rb" - if has_so_extension(space): so_extension = get_so_extension(space) pydfile = filepart + so_extension @@ -520,6 +520,7 @@ find_module=interp2app(W_NullImporter.find_module_w), ) + class FindInfo: def __init__(self, modtype, filename, stream, suffix="", filemode="", w_loader=None): @@ -556,7 +557,7 @@ # could possibly be; a "pseudo-extension module" does not, and # is only loaded at the point in sys.path where we find # '.../lib_pypy/__extensions__'. - if modulename in space.MODULES_THAT_ALWAYS_SHADOW: + if modulename in MODULES_THAT_ALWAYS_SHADOW: return delayed_builtin w_lib_extensions = space.sys.get_state(space).w_lib_extensions w_path = space.sys.get('path') diff --git a/pypy/module/_dummy_importlib/moduledef.py b/pypy/module/_dummy_importlib/moduledef.py --- a/pypy/module/_dummy_importlib/moduledef.py +++ b/pypy/module/_dummy_importlib/moduledef.py @@ -1,4 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule +from pypy.interpreter.gateway import interp2app from pypy.module._dummy_importlib import interp_import class Module(MixedModule): @@ -11,7 +12,8 @@ def install(self): """NOT_RPYTHON""" super(Module, self).install() - self.w_import = self.space.wrap(interp_import.importhook) + self.w_import = self.space.wrap( + interp2app(interp_import.importhook, app_name='__dummy_import__')) def startup(self, space): """Copy our __import__ to builtins.""" diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py --- a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -8,3 +8,16 @@ space = gettestobjspace(usemodules=['_frozen_importlib']) assert not space.config.objspace.usemodules._dummy_importlib assert space.config.objspace.usemodules._frozen_importlib + + +class AppTestDummyImportlib: + + def test_import_builtin(self): + import sys + import operator + assert sys.modules['operator'] is operator + assert operator.add(1, 2) == 3 + + def test_import_from_sys_path(self): + import keyword # this is a module from lib-python + assert keyword.iskeyword('def') From pypy.commits at gmail.com Thu Dec 19 19:37:46 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:46 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib2: add a nice error message in case something goes wrong. Add tests to check that we are actually using the importlib module which we expect Message-ID: <5dfc17da.1c69fb81.94473.6df2@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib2 Changeset: r98342:3c435619e333 Date: 2019-12-20 01:36 +0100 http://bitbucket.org/pypy/pypy/changeset/3c435619e333/ Log: add a nice error message in case something goes wrong. Add tests to check that we are actually using the importlib module which we expect diff --git a/pypy/module/_dummy_importlib/interp_import.py b/pypy/module/_dummy_importlib/interp_import.py --- a/pypy/module/_dummy_importlib/interp_import.py +++ b/pypy/module/_dummy_importlib/interp_import.py @@ -22,6 +22,32 @@ from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.module.sys.version import PYPY_VERSION + + at unwrap_spec(name='text0', level=int) +def dummy_importhook(space, name, w_globals=None, + w_locals=None, w_fromlist=None, level=-1): + try: + return importhook(space, name, w_globals, w_locals, w_fromlist, level) + except OperationError as e: + if not e.match(space, space.w_ImportError): + raise + w_value = e.get_w_value(space) + message = space.text_w(space.str(w_value)) + new_message = """%s + + You are using _dummy_importlib: this is not supposed to be a + fully-compatible importing library, but it contains just enough logic to + run most of the tests. If you are experiencing problems with it, consider + adding more logic, or to switch to the fully-working _frozen_importlib by + adding this line to your AppTest class: + + spaceconfig = {'usemodules': ['_frozen_importlib']} + """ % message + raise OperationError(space.w_ImportError, space.newtext(new_message)) + + +# the following code has been copied/pasted/adapted from default + _WIN32 = sys.platform == 'win32' SEARCH_ERROR = 0 diff --git a/pypy/module/_dummy_importlib/moduledef.py b/pypy/module/_dummy_importlib/moduledef.py --- a/pypy/module/_dummy_importlib/moduledef.py +++ b/pypy/module/_dummy_importlib/moduledef.py @@ -13,7 +13,7 @@ """NOT_RPYTHON""" super(Module, self).install() self.w_import = self.space.wrap( - interp2app(interp_import.importhook, app_name='__dummy_import__')) + interp2app(interp_import.dummy_importhook, app_name='__dummy_import__')) def startup(self, space): """Copy our __import__ to builtins.""" diff --git a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py --- a/pypy/module/_dummy_importlib/test/test__dummy_importlib.py +++ b/pypy/module/_dummy_importlib/test/test__dummy_importlib.py @@ -12,6 +12,9 @@ class AppTestDummyImportlib: + def test_no_frozen_importlib(self): + raises(ImportError, "import _frozen_importlib") + def test_import_builtin(self): import sys import operator @@ -21,3 +24,21 @@ def test_import_from_sys_path(self): import keyword # this is a module from lib-python assert keyword.iskeyword('def') + + def test_error_message_on_ImportError(self): + try: + import i_dont_exist + except ImportError as e: + message = str(e) + assert 'i_dont_exist' in message + assert 'spaceconfig' in message + + +class AppTestNoDummyImportlib: + spaceconfig = {'usemodules': ['_frozen_importlib']} + + def test_no_dummy_importlib(self): + try: + import _dummy_importlib + except ImportError as e: + assert 'spaceconfig' not in str(e) From pypy.commits at gmail.com Thu Dec 19 19:37:48 2019 From: pypy.commits at gmail.com (antocuni) Date: Thu, 19 Dec 2019 16:37:48 -0800 (PST) Subject: [pypy-commit] pypy dummy-importlib: close dead branch Message-ID: <5dfc17dc.1c69fb81.aeb2d.3287@mx.google.com> Author: Antonio Cuni Branch: dummy-importlib Changeset: r98343:348abe425ff2 Date: 2019-12-20 01:36 +0100 http://bitbucket.org/pypy/pypy/changeset/348abe425ff2/ Log: close dead branch From pypy.commits at gmail.com Thu Dec 19 23:06:10 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 20:06:10 -0800 (PST) Subject: [pypy-commit] pypy default: Removed tag release-pypy3.6-v7.2.0rc3 Message-ID: <5dfc48b2.1c69fb81.8d066.a5bf@mx.google.com> Author: Matti Picus Branch: Changeset: r98347:f56dfbc9ded2 Date: 2019-12-20 06:05 +0200 http://bitbucket.org/pypy/pypy/changeset/f56dfbc9ded2/ Log: Removed tag release-pypy3.6-v7.2.0rc3 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -64,3 +64,5 @@ c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 e7e02dccbd8c14fa2d4880f6bd4c47362a8952f5 release-pypy3.6-v7.3.0rc3 c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 From pypy.commits at gmail.com Thu Dec 19 23:06:08 2019 From: pypy.commits at gmail.com (mattip) Date: Thu, 19 Dec 2019 20:06:08 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v7.3.0rc3 for changeset c124c11a5921 Message-ID: <5dfc48b0.1c69fb81.94473.8a43@mx.google.com> Author: Matti Picus Branch: Changeset: r98346:bc3c916d5dd1 Date: 2019-12-20 06:02 +0200 http://bitbucket.org/pypy/pypy/changeset/bc3c916d5dd1/ Log: Added tag release-pypy2.7-v7.3.0rc3 for changeset c124c11a5921 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -63,3 +63,4 @@ 008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 e7e02dccbd8c14fa2d4880f6bd4c47362a8952f5 release-pypy3.6-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 From pypy.commits at gmail.com Fri Dec 20 13:33:58 2019 From: pypy.commits at gmail.com (rlamy) Date: Fri, 20 Dec 2019 10:33:58 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Fix some pypyjit tests Message-ID: <5dfd1416.1c69fb81.3be54.c1c6@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98350:67ecbe59924d Date: 2019-12-20 18:33 +0000 http://bitbucket.org/pypy/pypy/changeset/67ecbe59924d/ Log: Fix some pypyjit tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -82,7 +82,6 @@ assert entry_bridge.match_by_id('call', """ dummy_get_utf8? p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p99 = getfield_gc_r(p38, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() p41 = getfield_gc_r(p38, descr=) @@ -444,7 +443,6 @@ dummy_get_utf8? guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p99 = getfield_gc_r(p29, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() p32 = getfield_gc_r(p29, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -428,7 +428,6 @@ p156 = getfield_gc_r(p48, descr=...) i158 = getfield_raw_i(..., descr=...) setfield_gc(p48, p49, descr=...) - setfield_gc(p48, p50, descr=...) setfield_gc(p134, ConstPtr(null), descr=...) i159 = int_lt(i158, 0) guard_false(i159, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import sys +import pytest from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC # XXX review the descrs to replace some EF=5 with EF=4 (elidable) @@ -66,26 +67,20 @@ i97 = int_ge(i94, i53) guard_false(i97, descr=...) i98 = strgetitem(p52, i94) - p1 = force_token() p103 = newstr(1) strsetitem(p103, 0, i98) - setfield_gc(p0, p1, descr=) - p296 = call_may_force_r(ConstClass(str_decode_utf8), p103, ConstPtr(null), 1, _, 0, descr=) - guard_not_forced(descr=...) + i95 = call_i(ConstClass(_check_utf8), p103, 0, 0, -1, descr=) guard_no_exception(descr=...) - p116 = getfield_gc_r(p296, descr=) - i107 = getfield_gc_i(p296, descr=) - i109 = int_lt(i107, 0) - guard_false(i109, descr=...) - guard_not_invalidated(descr=...) + i98 = int_ge(i95, 0) + guard_true(i98, descr=...) i99 = int_ge(i94, i46) guard_false(i99, descr=...) i115 = int_add(i94, 1) i116 = int_gt(i115, i71) guard_false(i116, descr=...) - i120 = strgetitem(p45, i94) - i122 = call_i(ConstClass(_ll_2_str_eq_checknull_char__rpy_stringPtr_Char), p116, i120, descr=) - guard_true(i122, descr=...) + + i104 = call_i(ConstClass(_ll_4_str_eq_slice_char__rpy_stringPtr_Signed_Signed_Char), p65, i94, 1, i98, descr=) + guard_true(i104, descr=...) i124 = int_add(i83, 1) --TICK-- jump(..., descr=...) @@ -207,6 +202,7 @@ ''') assert loop.match_by_id('calltwo', '') # nothing + @pytest.mark.xfail def test_move_method_call_out_of_loop(self): # XXX this does not work: _lower_unicode() is found to be elidable, # but it can raise (because of 'raise StopIteration' in @@ -273,7 +269,6 @@ --TICK-- jump(..., descr=...) """) - # XXX remove the guard_nonnull above? def test_unicode_indexing_makes_no_bridges(self): log = self.run(r""" From pypy.commits at gmail.com Fri Dec 20 19:26:13 2019 From: pypy.commits at gmail.com (rlamy) Date: Fri, 20 Dec 2019 16:26:13 -0800 (PST) Subject: [pypy-commit] pypy py3.6: Fix tests that can't work on PyPy (and avoid a warning) Message-ID: <5dfd66a5.1c69fb81.94473.83d7@mx.google.com> Author: Ronan Lamy Branch: py3.6 Changeset: r98351:996be08dae82 Date: 2019-12-21 00:25 +0000 http://bitbucket.org/pypy/pypy/changeset/996be08dae82/ Log: Fix tests that can't work on PyPy (and avoid a warning) diff --git a/lib-python/3/test/test_coroutines.py b/lib-python/3/test/test_coroutines.py --- a/lib-python/3/test/test_coroutines.py +++ b/lib-python/3/test/test_coroutines.py @@ -890,11 +890,11 @@ def test_corotype_1(self): ct = types.CoroutineType - self.assert_('into coroutine' in ct.send.__doc__ or + self.assertTrue('into coroutine' in ct.send.__doc__ or 'into generator/coroutine' in ct.send.__doc__) - self.assert_('inside coroutine' in ct.close.__doc__ or + self.assertTrue('inside coroutine' in ct.close.__doc__ or 'inside generator/coroutine' in ct.close.__doc__) - self.assert_('in coroutine' in ct.throw.__doc__ or + self.assertTrue('in coroutine' in ct.throw.__doc__ or 'in generator/coroutine' in ct.throw.__doc__) self.assertIn('of the coroutine', ct.__dict__['__name__'].__doc__) self.assertIn('of the coroutine', ct.__dict__['__qualname__'].__doc__) @@ -1238,8 +1238,8 @@ with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aenter__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): # it's important that __aexit__ wasn't called run_async(foo()) @@ -1261,8 +1261,8 @@ except TypeError as exc: self.assertRegex( exc.args[0], - "'async with' received an object from __aexit__ " - "that does not implement __await__: int") + # XXX: PyPy change + "object int can't be used in 'await' expression") self.assertTrue(exc.__context__ is not None) self.assertTrue(isinstance(exc.__context__, ZeroDivisionError)) else: @@ -1286,8 +1286,8 @@ CNT += 1 with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 1) @@ -1300,8 +1300,8 @@ break with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 2) @@ -1314,8 +1314,8 @@ continue with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 3) @@ -1327,8 +1327,8 @@ return with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 4) From pypy.commits at gmail.com Sat Dec 21 16:11:15 2019 From: pypy.commits at gmail.com (mattip) Date: Sat, 21 Dec 2019 13:11:15 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: add pypy7.3.0rc3 hashes Message-ID: <5dfe8a73.1c69fb81.19355.96e0@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r961:d3d2835dab48 Date: 2019-12-21 22:59 +0200 http://bitbucket.org/pypy/pypy.org/changeset/d3d2835dab48/ Log: add pypy7.3.0rc3 hashes diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -403,6 +403,15 @@ pypy2.7-7.3.0 sha256:: + 615dda761d51b68895b633e99f442b061594a7a625cebbc6d5c05005587d33bd pypy2.7-v7.3.0rc3-aarch64.tar.bz2 + 060a44b0def1c70f081f5d238570ca0bfcce8f52d8e9a0bcadaaf4da9c08f994 pypy2.7-v7.3.0rc3-linux32.tar.bz2 + 10603b360d52e824d7309978be605c8f144117c61e563e9d3a0206bd4fbadb18 pypy2.7-v7.3.0rc3-linux64.tar.bz2 + 96b77ed82b6dc79b157e970433c9f819b1c5ef40997160535d7f904288c5ac49 pypy2.7-v7.3.0rc3-osx64.tar.bz2 + 539ba0faa6b8edaf6d185c17616fbab3e2081707e130e1c124ebcaba50620487 pypy2.7-v7.3.0rc3-s390x.tar.bz2 + 55ba03b3dd9b2ea1379b63908619bc457f358c42a61dc37e62ce6779f4619565 pypy2.7-v7.3.0rc3-src.tar.bz2 + ca38900ed79aa0fe87d565ed6d2528a6863a41de24b160800b1959e6e206203d pypy2.7-v7.3.0rc3-src.zip + f01c89da3fc1b32b8e524eb1c54d83a1bb9825f539cd69ef1d3cb4a162303b70 pypy2.7-v7.3.0rc3-win32.zip + bf8df42c43e43af558f5c6a59c76e4b01cee718999d28d0820d17ba66f21125d pypy2.7-v7.3.0rc1-aarch64.tar.bz2 e8c08c5e6303d12656b3860a54788a581325202db1d9526477be27344c40d106 pypy2.7-v7.3.0rc1-linux32.tar.bz2 7b42389279b2ed00c2e9a66c9a2eb4fefb4ce62929da0d73880f041f37d37f17 pypy2.7-v7.3.0rc1-linux64.tar.bz2 @@ -414,6 +423,16 @@ pypy3.6-7.3.0 sha256:: + + 3d5b7e3b69bd7149f445f9ec947fd84bd3e7c884d6adea938e33cf7b9d86b8ae pypy3.6-v7.3.0rc3-aarch64.tar.bz2 + 819468637b02ad18a6d814ed310a8498d0d456283ef64c5b22ec853d81cef95b pypy3.6-v7.3.0rc3-linux32.tar.bz2 + fca54c97d39a5738faf6ec60f1826b4ccf7ea485a0a58d7950e653e6c8d52ecd pypy3.6-v7.3.0rc3-linux64.tar.bz2 + 6ca6abc145bfd2b0ea356272461b163752ed3d1a117f74cbcdcd16ea814ff5b9 pypy3.6-v7.3.0rc3-osx64.tar.bz2 + 797ab124cee1d2c2c09bf46bb5bbb7e59f832d26aad41df1e647f6ccb80b349e pypy3.6-v7.3.0rc3-s390x.tar.bz2 + 1067705f09fef54007da353b34780d782dcec583d11ae8d40df2bbab869770a6 pypy3.6-v7.3.0rc3-src.tar.bz2 + b8422d1781f63c5fd8caab454076d3dd64e13d082713555576e8f95b998a2de3 pypy3.6-v7.3.0rc3-src.zip + 48333c816963681b2261abf72bce009eca43bc4da59c9e25be878c0f235b65e5 pypy3.6-v7.3.0rc3-win32.zip + dc7c2a34920e13a2968f822291d1a85faec99f7c8708da15828ae3f4b142b284 pypy3.6-v7.3.0rc1-aarch64.tar.bz2 16d7ee8b6e031863fd958024d9d38dcb114484d4673db5f0ada60bedb5c2ed2c pypy3.6-v7.3.0rc1-linux32.tar.bz2 4b4d63d60746a8812a5a6524b5242425a41dbe2bcdb59435893d1212048a1f18 pypy3.6-v7.3.0rc1-linux64.tar.bz2 From pypy.commits at gmail.com Sun Dec 22 14:28:35 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 22 Dec 2019 11:28:35 -0800 (PST) Subject: [pypy-commit] pypy py3.6: test, fix for PyModule_AddFunctions, issue 3131 Message-ID: <5dffc3e3.1c69fb81.58689.54b1@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98352:0ddf60ed10d2 Date: 2019-12-22 21:27 +0200 http://bitbucket.org/pypy/pypy/changeset/0ddf60ed10d2/ Log: test, fix for PyModule_AddFunctions, issue 3131 diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -232,3 +232,14 @@ raise oefmt(space.w_SystemError, "PyModule_GetName(): not a module") from pypy.module.cpyext.unicodeobject import PyUnicode_AsUTF8 return PyUnicode_AsUTF8(space, as_pyobj(space, w_mod.w_name)) + + at cpython_api([PyObject, lltype.Ptr(PyMethodDef)], rffi.INT_real, error=-1) +def PyModule_AddFunctions(space, w_mod, methods): + if not isinstance(w_mod, Module): + raise oefmt(space.w_SystemError, "PyModule_AddFuntions(): not a module") + name = space.utf8_w(w_mod.w_name) + dict_w = {} + convert_method_defs(space, dict_w, methods, None, w_mod, name=name) + for key, w_value in dict_w.items(): + space.setattr(w_mod, space.newtext(key), w_value) + return 0 diff --git a/pypy/module/cpyext/test/_widechar.c b/pypy/module/cpyext/test/_widechar.c --- a/pypy/module/cpyext/test/_widechar.c +++ b/pypy/module/cpyext/test/_widechar.c @@ -36,11 +36,7 @@ "_widechar", NULL, -1, - TestMethods, NULL, - NULL, - NULL, - NULL }; PyMODINIT_FUNC @@ -50,5 +46,6 @@ m = PyModule_Create(&_testcapimodule); if (m == NULL) return NULL; + PyModule_AddFunctions(m, TestMethods); return m; } From pypy.commits at gmail.com Sun Dec 22 15:27:21 2019 From: pypy.commits at gmail.com (mattip) Date: Sun, 22 Dec 2019 12:27:21 -0800 (PST) Subject: [pypy-commit] pypy default: package on portable builds, and contitionally use tk, tcl libraries Message-ID: <5dffd1a9.1c69fb81.53b6c.aae2@mx.google.com> Author: Matti Picus Branch: Changeset: r98353:ad2431a4d478 Date: 2019-12-22 22:24 +0200 http://bitbucket.org/pypy/pypy/changeset/ad2431a4d478/ Log: package on portable builds, and contitionally use tk, tcl libraries diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -32,6 +32,16 @@ def Tcl_AppInit(app): + # For portable builds, try to load a local version of the libraries + from os.path import join, dirname, exists + lib_path = join(dirname(dirname(dirname(__file__))), 'lib') + tcl_path = join(lib_path, 'tcl') + tk_path = join(lib_path, 'tk') + if exists(tcl_path): + tklib.Tcl_Eval(app.interp, 'set tcl_library "{0}"'.format(tcl_path).encode('utf-8')) + if exists(tk_path): + tklib.Tcl_Eval(app.interp, 'set tk_library "{0}"'.format(tk_path).encode('utf-8')) + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: app.raiseTclError() skip_tk_init = tklib.Tcl_GetVar( diff --git a/pypy/tool/release/make_portable.py b/pypy/tool/release/make_portable.py --- a/pypy/tool/release/make_portable.py +++ b/pypy/tool/release/make_portable.py @@ -5,7 +5,7 @@ import os from os.path import dirname, relpath, join, exists, basename, realpath -from shutil import copy2 +from shutil import copy2, copytree import sys from glob import glob from subprocess import check_output, check_call @@ -85,6 +85,9 @@ for path, item in copied.items(): print('Copied {0} to {1}'.format(path, item)) + copytree('/usr/share/tcl8.5', 'lib/tcl') + copytree('/usr/share/tk8.5', 'lib/tk') + binaries.extend(copied.values()) rpaths = rpath_binaries(binaries) From pypy.commits at gmail.com Mon Dec 23 05:48:04 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 02:48:04 -0800 (PST) Subject: [pypy-commit] pypy default: add tcl8, tk8 to bundle; make sure so is writable Message-ID: <5e009b64.1c69fb81.c5088.eb54@mx.google.com> Author: Matti Picus Branch: Changeset: r98354:c39c8c877848 Date: 2019-12-23 12:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c39c8c877848/ Log: add tcl8, tk8 to bundle; make sure so is writable diff --git a/pypy/tool/release/make_portable.py b/pypy/tool/release/make_portable.py --- a/pypy/tool/release/make_portable.py +++ b/pypy/tool/release/make_portable.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl', 'tk', 'gdbm', +bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl8', 'tk8', 'gdbm', 'lzma', 'tinfo', 'tinfow', 'ncursesw', 'panelw', 'ncurses', 'panel', 'panelw'] import os @@ -62,6 +62,7 @@ rpaths = {} for binary in binaries: + check_call(['chmod', 'a+w', binary]) rpath = join('$ORIGIN', relpath('lib', dirname(binary))) check_call(['patchelf', '--set-rpath', rpath, binary]) From pypy.commits at gmail.com Mon Dec 23 05:48:08 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 02:48:08 -0800 (PST) Subject: [pypy-commit] pypy release-pypy3.6-v7.x: merge py3.6 into release Message-ID: <5e009b68.1c69fb81.22ed5.1172@mx.google.com> Author: Matti Picus Branch: release-pypy3.6-v7.x Changeset: r98356:1608da62bfc7 Date: 2019-12-23 12:46 +0200 http://bitbucket.org/pypy/pypy/changeset/1608da62bfc7/ Log: merge py3.6 into release diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -61,3 +61,8 @@ 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +e7e02dccbd8c14fa2d4880f6bd4c47362a8952f5 release-pypy3.6-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 diff --git a/lib-python/3/test/test_coroutines.py b/lib-python/3/test/test_coroutines.py --- a/lib-python/3/test/test_coroutines.py +++ b/lib-python/3/test/test_coroutines.py @@ -890,11 +890,11 @@ def test_corotype_1(self): ct = types.CoroutineType - self.assert_('into coroutine' in ct.send.__doc__ or + self.assertTrue('into coroutine' in ct.send.__doc__ or 'into generator/coroutine' in ct.send.__doc__) - self.assert_('inside coroutine' in ct.close.__doc__ or + self.assertTrue('inside coroutine' in ct.close.__doc__ or 'inside generator/coroutine' in ct.close.__doc__) - self.assert_('in coroutine' in ct.throw.__doc__ or + self.assertTrue('in coroutine' in ct.throw.__doc__ or 'in generator/coroutine' in ct.throw.__doc__) self.assertIn('of the coroutine', ct.__dict__['__name__'].__doc__) self.assertIn('of the coroutine', ct.__dict__['__qualname__'].__doc__) @@ -1238,8 +1238,8 @@ with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aenter__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): # it's important that __aexit__ wasn't called run_async(foo()) @@ -1261,8 +1261,8 @@ except TypeError as exc: self.assertRegex( exc.args[0], - "'async with' received an object from __aexit__ " - "that does not implement __await__: int") + # XXX: PyPy change + "object int can't be used in 'await' expression") self.assertTrue(exc.__context__ is not None) self.assertTrue(isinstance(exc.__context__, ZeroDivisionError)) else: @@ -1286,8 +1286,8 @@ CNT += 1 with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 1) @@ -1300,8 +1300,8 @@ break with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 2) @@ -1314,8 +1314,8 @@ continue with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 3) @@ -1327,8 +1327,8 @@ return with self.assertRaisesRegex( TypeError, - "'async with' received an object from __aexit__ " - "that does not implement __await__: int"): + # XXX: PyPy change + "object int can't be used in 'await' expression"): run_async(foo()) self.assertEqual(CNT, 4) diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -35,6 +35,16 @@ def Tcl_AppInit(app): + # For portable builds, try to load a local version of the libraries + from os.path import join, dirname, exists + lib_path = join(dirname(dirname(dirname(__file__))), 'lib') + tcl_path = join(lib_path, 'tcl') + tk_path = join(lib_path, 'tk') + if exists(tcl_path): + tklib.Tcl_Eval(app.interp, 'set tcl_library "{0}"'.format(tcl_path).encode('utf-8')) + if exists(tk_path): + tklib.Tcl_Eval(app.interp, 'set tk_library "{0}"'.format(tk_path).encode('utf-8')) + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: app.raiseTclError() skip_tk_init = tklib.Tcl_GetVar( diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -565,7 +565,7 @@ # Copy values from the fastlocals to self.w_locals d = self.getorcreatedebug() if d.w_locals is None: - d.w_locals = self.space.newdict() + d.w_locals = self.space.newdict(module=True) varnames = self.getcode().getvarnames() for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -232,3 +232,14 @@ raise oefmt(space.w_SystemError, "PyModule_GetName(): not a module") from pypy.module.cpyext.unicodeobject import PyUnicode_AsUTF8 return PyUnicode_AsUTF8(space, as_pyobj(space, w_mod.w_name)) + + at cpython_api([PyObject, lltype.Ptr(PyMethodDef)], rffi.INT_real, error=-1) +def PyModule_AddFunctions(space, w_mod, methods): + if not isinstance(w_mod, Module): + raise oefmt(space.w_SystemError, "PyModule_AddFuntions(): not a module") + name = space.utf8_w(w_mod.w_name) + dict_w = {} + convert_method_defs(space, dict_w, methods, None, w_mod, name=name) + for key, w_value in dict_w.items(): + space.setattr(w_mod, space.newtext(key), w_value) + return 0 diff --git a/pypy/module/cpyext/test/_widechar.c b/pypy/module/cpyext/test/_widechar.c --- a/pypy/module/cpyext/test/_widechar.c +++ b/pypy/module/cpyext/test/_widechar.c @@ -36,11 +36,7 @@ "_widechar", NULL, -1, - TestMethods, NULL, - NULL, - NULL, - NULL }; PyMODINIT_FUNC @@ -50,5 +46,6 @@ m = PyModule_Create(&_testcapimodule); if (m == NULL) return NULL; + PyModule_AddFunctions(m, TestMethods); return m; } diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -82,7 +82,6 @@ assert entry_bridge.match_by_id('call', """ dummy_get_utf8? p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p99 = getfield_gc_r(p38, descr=) p39 = getfield_gc_r(p38, descr=) i40 = force_token() p41 = getfield_gc_r(p38, descr=) @@ -444,7 +443,6 @@ dummy_get_utf8? guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p99 = getfield_gc_r(p29, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() p32 = getfield_gc_r(p29, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py b/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_capture_locals.py @@ -0,0 +1,41 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestCaptureLocals(BaseTestPyPyC): + def test_capture_locals(self): + def main(n): + num = 42 + i = 0 + acc = 0 + src = ''' +while i < n: + acc += num + i += 1 +''' + exec(src) + return acc + + log = self.run(main, [500]) + print (log.result) + assert log.result == 0 + loop, = log.loops_by_filename("") + print (loop) + assert loop.match(""" + i41 = instance_ptr_eq(ConstPtr(ptr18), p16) + guard_false(i41, descr=...) + guard_not_invalidated(descr=...) + i43 = int_lt(i35, 500) + guard_true(i43, descr=...) + i45 = getfield_gc_i(ConstPtr(ptr44), descr=...) + i47 = int_add_ovf(i45, 42) + guard_no_overflow(descr=...) + setfield_gc(ConstPtr(ptr48), i47, descr=...) + i50 = getfield_gc_i(ConstPtr(ptr49), descr=...) + i52 = int_add_ovf(i50, 1) + guard_no_overflow(descr=...) + i54 = getfield_raw_i(..., descr=...) + setfield_gc(ConstPtr(ptr55), i52, descr=...) + i57 = int_lt(i54, 0) + guard_false(i57, descr=...) + jump(..., descr=...) + """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -428,7 +428,6 @@ p156 = getfield_gc_r(p48, descr=...) i158 = getfield_raw_i(..., descr=...) setfield_gc(p48, p49, descr=...) - setfield_gc(p48, p50, descr=...) setfield_gc(p134, ConstPtr(null), descr=...) i159 = int_lt(i158, 0) guard_false(i159, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import sys +import pytest from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC # XXX review the descrs to replace some EF=5 with EF=4 (elidable) @@ -66,26 +67,20 @@ i97 = int_ge(i94, i53) guard_false(i97, descr=...) i98 = strgetitem(p52, i94) - p1 = force_token() p103 = newstr(1) strsetitem(p103, 0, i98) - setfield_gc(p0, p1, descr=) - p296 = call_may_force_r(ConstClass(str_decode_utf8), p103, ConstPtr(null), 1, _, 0, descr=) - guard_not_forced(descr=...) + i95 = call_i(ConstClass(_check_utf8), p103, 0, 0, -1, descr=) guard_no_exception(descr=...) - p116 = getfield_gc_r(p296, descr=) - i107 = getfield_gc_i(p296, descr=) - i109 = int_lt(i107, 0) - guard_false(i109, descr=...) - guard_not_invalidated(descr=...) + i98 = int_ge(i95, 0) + guard_true(i98, descr=...) i99 = int_ge(i94, i46) guard_false(i99, descr=...) i115 = int_add(i94, 1) i116 = int_gt(i115, i71) guard_false(i116, descr=...) - i120 = strgetitem(p45, i94) - i122 = call_i(ConstClass(_ll_2_str_eq_checknull_char__rpy_stringPtr_Char), p116, i120, descr=) - guard_true(i122, descr=...) + + i104 = call_i(ConstClass(_ll_4_str_eq_slice_char__rpy_stringPtr_Signed_Signed_Char), p65, i94, 1, i98, descr=) + guard_true(i104, descr=...) i124 = int_add(i83, 1) --TICK-- jump(..., descr=...) @@ -207,6 +202,7 @@ ''') assert loop.match_by_id('calltwo', '') # nothing + @pytest.mark.xfail def test_move_method_call_out_of_loop(self): # XXX this does not work: _lower_unicode() is found to be elidable, # but it can raise (because of 'raise StopIteration' in @@ -273,7 +269,6 @@ --TICK-- jump(..., descr=...) """) - # XXX remove the guard_nonnull above? def test_unicode_indexing_makes_no_bridges(self): log = self.run(r""" diff --git a/pypy/tool/release/make_portable.py b/pypy/tool/release/make_portable.py --- a/pypy/tool/release/make_portable.py +++ b/pypy/tool/release/make_portable.py @@ -1,11 +1,11 @@ #!/usr/bin/env python -bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl', 'tk', 'gdbm', +bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl8', 'tk8', 'gdbm', 'lzma', 'tinfo', 'tinfow', 'ncursesw', 'panelw', 'ncurses', 'panel', 'panelw'] import os from os.path import dirname, relpath, join, exists, basename, realpath -from shutil import copy2 +from shutil import copy2, copytree import sys from glob import glob from subprocess import check_output, check_call @@ -62,6 +62,7 @@ rpaths = {} for binary in binaries: + check_call(['chmod', 'a+w', binary]) rpath = join('$ORIGIN', relpath('lib', dirname(binary))) check_call(['patchelf', '--set-rpath', rpath, binary]) @@ -85,6 +86,9 @@ for path, item in copied.items(): print('Copied {0} to {1}'.format(path, item)) + copytree('/usr/share/tcl8.5', 'lib/tcl') + copytree('/usr/share/tk8.5', 'lib/tk') + binaries.extend(copied.values()) rpaths = rpath_binaries(binaries) From pypy.commits at gmail.com Mon Dec 23 05:48:06 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 02:48:06 -0800 (PST) Subject: [pypy-commit] pypy py3.6: merge default into py3.6 Message-ID: <5e009b66.1c69fb81.2e645.41c7@mx.google.com> Author: Matti Picus Branch: py3.6 Changeset: r98355:bad589e6fe76 Date: 2019-12-23 12:46 +0200 http://bitbucket.org/pypy/pypy/changeset/bad589e6fe76/ Log: merge default into py3.6 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -61,3 +61,8 @@ 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +e7e02dccbd8c14fa2d4880f6bd4c47362a8952f5 release-pypy3.6-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -35,6 +35,16 @@ def Tcl_AppInit(app): + # For portable builds, try to load a local version of the libraries + from os.path import join, dirname, exists + lib_path = join(dirname(dirname(dirname(__file__))), 'lib') + tcl_path = join(lib_path, 'tcl') + tk_path = join(lib_path, 'tk') + if exists(tcl_path): + tklib.Tcl_Eval(app.interp, 'set tcl_library "{0}"'.format(tcl_path).encode('utf-8')) + if exists(tk_path): + tklib.Tcl_Eval(app.interp, 'set tk_library "{0}"'.format(tk_path).encode('utf-8')) + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: app.raiseTclError() skip_tk_init = tklib.Tcl_GetVar( diff --git a/pypy/tool/release/make_portable.py b/pypy/tool/release/make_portable.py --- a/pypy/tool/release/make_portable.py +++ b/pypy/tool/release/make_portable.py @@ -1,11 +1,11 @@ #!/usr/bin/env python -bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl', 'tk', 'gdbm', +bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl8', 'tk8', 'gdbm', 'lzma', 'tinfo', 'tinfow', 'ncursesw', 'panelw', 'ncurses', 'panel', 'panelw'] import os from os.path import dirname, relpath, join, exists, basename, realpath -from shutil import copy2 +from shutil import copy2, copytree import sys from glob import glob from subprocess import check_output, check_call @@ -62,6 +62,7 @@ rpaths = {} for binary in binaries: + check_call(['chmod', 'a+w', binary]) rpath = join('$ORIGIN', relpath('lib', dirname(binary))) check_call(['patchelf', '--set-rpath', rpath, binary]) @@ -85,6 +86,9 @@ for path, item in copied.items(): print('Copied {0} to {1}'.format(path, item)) + copytree('/usr/share/tcl8.5', 'lib/tcl') + copytree('/usr/share/tk8.5', 'lib/tk') + binaries.extend(copied.values()) rpaths = rpath_binaries(binaries) From pypy.commits at gmail.com Mon Dec 23 09:59:02 2019 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 23 Dec 2019 06:59:02 -0800 (PST) Subject: [pypy-commit] pypy py3.7: start implementing PEP-0567 Message-ID: <5e00d636.1c69fb81.a4937.cb0a@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: py3.7 Changeset: r98358:f9a14309d6d1 Date: 2019-12-23 15:46 +0100 http://bitbucket.org/pypy/pypy/changeset/f9a14309d6d1/ Log: start implementing PEP-0567 Python code is taken from there, pure Python apart from two functions in __pypy__ to read and write to a new field in the executioncontext diff --git a/lib-python/3/test/test_context.py b/lib-python/3/test/test_context.py --- a/lib-python/3/test/test_context.py +++ b/lib-python/3/test/test_context.py @@ -24,7 +24,7 @@ class ContextTest(unittest.TestCase): def test_context_var_new_1(self): - with self.assertRaisesRegex(TypeError, 'takes exactly 1'): + with self.assertRaises(TypeError): contextvars.ContextVar() with self.assertRaisesRegex(TypeError, 'must be a str'): @@ -76,11 +76,11 @@ pass def test_context_new_1(self): - with self.assertRaisesRegex(TypeError, 'any arguments'): + with self.assertRaises(TypeError): contextvars.Context(1) - with self.assertRaisesRegex(TypeError, 'any arguments'): + with self.assertRaises(TypeError): contextvars.Context(1, a=1) - with self.assertRaisesRegex(TypeError, 'any arguments'): + with self.assertRaises(TypeError): contextvars.Context(a=1) contextvars.Context(**{}) diff --git a/lib_pypy/_contextvars.py b/lib_pypy/_contextvars.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_contextvars.py @@ -0,0 +1,219 @@ +from __pypy__ import get_contextvar_context, set_contextvar_context +# implementation taken from PEP-0567 https://www.python.org/dev/peps/pep-0567/ + +_NO_DEFAULT = object() + + +class Unsubclassable(type): + def __new__(cls, name, bases, dct): + for base in bases: + if isinstance(base, Unsubclassable): + raise TypeError(f"type '{base.__name__}' is not an acceptable base type") + return type.__new__(cls, name, bases, dict(dct)) + + +class _ContextData: + # XXX wrong complexity! need to implement a real immutable dict instead + + def __init__(self): + self._mapping = dict() + + def __getitem__(self, key): + return self._mapping[key] + + def __contains__(self, key): + return key in self._mapping + + def __len__(self): + return len(self._mapping) + + def __iter__(self): + return iter(self._mapping) + + def set(self, key, value): + copy = _ContextData() + copy._mapping = self._mapping.copy() + copy._mapping[key] = value + return copy + + def delete(self, key): + copy = _ContextData() + copy._mapping = self._mapping.copy() + del copy._mapping[key] + return copy + + +def get_context(): + context = get_contextvar_context() + if context is None: + context = Context() + set_contextvar_context(context) + return context + + +class Context(metaclass=Unsubclassable): + + #_data: _ContextData + #_prev_context: Optional[Context] + + def __init__(self): + self._data = _ContextData() + self._prev_context = None + + def run(self, callable, *args, **kwargs): + if self._prev_context is not None: + raise RuntimeError( + f'cannot enter context: {self} is already entered') + + self._prev_context = get_context() + try: + set_contextvar_context(self) + return callable(*args, **kwargs) + finally: + set_contextvar_context(self._prev_context) + self._prev_context = None + + def copy(self): + new = Context() + new._data = self._data + return new + + # Implement abstract Mapping.__getitem__ + def __getitem__(self, var): + if not isinstance(var, ContextVar): + raise TypeError("ContextVar key was expected") + return self._data[var] + + # Implement abstract Mapping.__contains__ + def __contains__(self, var): + if not isinstance(var, ContextVar): + raise TypeError("ContextVar key was expected") + return var in self._data + + # Implement abstract Mapping.__len__ + def __len__(self): + return len(self._data) + + # Implement abstract Mapping.__iter__ + def __iter__(self): + return iter(self._data) + + def get(self, key, default=None): + if not isinstance(key, ContextVar): + raise TypeError("ContextVar key was expected") + try: + return self._data[key] + except KeyError: + return default + + def keys(self): + from collections.abc import KeysView + return KeysView(self) + + def values(self): + from collections.abc import ValuesView + return ValuesView(self) + + def items(self): + from collections.abc import ItemsView + return ItemsView(self) + + def __eq__(self, other): + if not isinstance(other, Context): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + +def copy_context(): + return get_context().copy() + +class ContextVar(metaclass=Unsubclassable): + + def __init__(self, name, *, default=_NO_DEFAULT): + if not isinstance(name, str): + raise TypeError("context variable name must be a str") + self._name = name + self._default = default + + @property + def name(self): + return self._name + + def get(self, default=_NO_DEFAULT): + context = get_context() + try: + return context[self] + except KeyError: + pass + + if default is not _NO_DEFAULT: + return default + + if self._default is not _NO_DEFAULT: + return self._default + + raise LookupError + + def set(self, value): + context = get_context() + + data: _ContextData = context._data + try: + old_value = data[self] + except KeyError: + old_value = Token.MISSING + + updated_data = data.set(self, value) + context._data = updated_data + return Token(context, self, old_value) + + def reset(self, token): + if token._used: + raise RuntimeError("Token has already been used once") + + if token._var is not self: + raise ValueError( + "Token was created by a different ContextVar") + + context = get_context() + if token._context is not context: + raise ValueError( + "Token was created in a different Context") + + if token._old_value is Token.MISSING: + context._data = context._data.delete(token._var) + else: + context._data = context._data.set(token._var, token._old_value) + + token._used = True + + @classmethod + def __class_getitem__(self, key): + return None + + def __repr__(self): + default = '' + if self._default is not _NO_DEFAULT: + default = f"default={self._default} " + return f"" + + +class Token(metaclass=Unsubclassable): + MISSING = object() + + def __init__(self, context, var, old_value): + self._context = context + self._var = var + self._old_value = old_value + self._used = False + + @property + def var(self): + return self._var + + @property + def old_value(self): + return self._old_value + + def __repr__(self): + return f"" diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -44,6 +44,7 @@ self.in_coroutine_wrapper = False self.w_asyncgen_firstiter_fn = None self.w_asyncgen_finalizer_fn = None + self.contextvar_context = None @staticmethod def _mark_thread_disappeared(space): diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -215,3 +215,17 @@ def set_exc_info(space, w_type, w_value, w_traceback=None): ec = space.getexecutioncontext() ec.set_sys_exc_info3(w_type, w_value, w_traceback) + +def get_contextvar_context(space): + ec = space.getexecutioncontext() + context = ec.contextvar_context + if context: + return context + else: + return space.w_None + +def set_contextvar_context(space, w_obj): + ec = space.getexecutioncontext() + ec.contextvar_context = w_obj + return space.w_None + diff --git a/pypy/module/__pypy__/moduledef.py b/pypy/module/__pypy__/moduledef.py --- a/pypy/module/__pypy__/moduledef.py +++ b/pypy/module/__pypy__/moduledef.py @@ -119,6 +119,9 @@ 'pyos_inputhook' : 'interp_magic.pyos_inputhook', 'newmemoryview' : 'interp_buffer.newmemoryview', 'set_exc_info' : 'interp_magic.set_exc_info', + + 'get_contextvar_context' : 'interp_magic.get_contextvar_context', + 'set_contextvar_context' : 'interp_magic.set_contextvar_context', } submodules = { diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -92,3 +92,16 @@ terr = TypeError("hello world") set_exc_info(TypeError, terr, tb) assert sys.exc_info()[2] is tb + + def test_get_set_contextvar_context(self): + from __pypy__ import get_contextvar_context, set_contextvar_context + context = get_contextvar_context() + try: + set_contextvar_context(1) + assert get_contextvar_context() == 1 + set_contextvar_context(5) + assert get_contextvar_context() == 5 + + finally: + set_contextvar_context(context) + From pypy.commits at gmail.com Mon Dec 23 09:59:00 2019 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 23 Dec 2019 06:59:00 -0800 (PST) Subject: [pypy-commit] pypy py3.7: merge py3.6 Message-ID: <5e00d634.1c69fb81.f0826.5ffa@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: py3.7 Changeset: r98357:53d92a0bd2cd Date: 2019-12-22 15:51 +0100 http://bitbucket.org/pypy/pypy/changeset/53d92a0bd2cd/ Log: merge py3.6 diff too long, truncating to 2000 out of 22776 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -57,3 +57,7 @@ 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0rc2 4a68d8d3d2fc1faec2e83bcb4d28559099092574 release-pypy2.7-v7.2.0 5da45ced70e515f94686be0df47c59abd1348ebc release-pypy3.6-v7.2.0 +e6471221abc16f4584a07fbfeece7ebcaeb7fc38 release-pypy2.7-v7.3.0rc1 +533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 +285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 +008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -99,16 +99,16 @@ Spenser Bauman Michal Bendowski Jan de Mooij + Stefano Rivera Tyler Wade + Stefan Beyer Vincent Legoll Michael Foord Stephan Diehl - Stefano Rivera Jean-Paul Calderone Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefan Beyer Patrick Maupin Devin Jeanpierre Bob Ippolito @@ -137,9 +137,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Stian Andreassen + Julian Berman William Leslie Paweł Piotr Przeradowski - Stian Andreassen marky1991 Ilya Osadchiy Tobias Oberstein @@ -150,7 +151,7 @@ tav Georg Brandl Joannah Nanjekye - Julian Berman + Yannick Jadoul Bert Freudenberg Wanja Saatkamp Mike Blume @@ -275,6 +276,7 @@ Lutz Paelike Ian Foote Philipp Rustemeuer + Bernd Schoeller Logan Chien Catalin Gabriel Manciu Jacob Oscarson @@ -302,7 +304,6 @@ Laurens Van Houtven Bobby Impollonia Roberto De Ioris - Yannick Jadoul Jeong YunWon Christopher Armstrong Aaron Tubbs @@ -357,6 +358,7 @@ Daniil Yarancev Min RK OlivierBlanvillain + bernd.schoeller at inf.ethz.ch dakarpov at gmail.com Jonas Pfannschmidt Zearin @@ -398,6 +400,7 @@ Jesdi Konrad Delong Dinu Gherman + Sam Edwards pizi Tomáš Pružina James Robert diff --git a/extra_tests/cffi_tests/cffi0/test_verify.py b/extra_tests/cffi_tests/cffi0/test_verify.py --- a/extra_tests/cffi_tests/cffi0/test_verify.py +++ b/extra_tests/cffi_tests/cffi0/test_verify.py @@ -4,6 +4,7 @@ import sys, os, math, weakref from cffi import FFI, VerificationError, VerificationMissing, model, FFIError from extra_tests.cffi_tests.support import * +from extra_tests.cffi_tests.support import extra_compile_args lib_m = ['m'] @@ -14,17 +15,6 @@ lib_m = ['msvcrt'] pass # no obvious -Werror equivalent on MSVC else: - if (sys.platform == 'darwin' and - [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): - # assume a standard clang or gcc - extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] - # special things for clang - extra_compile_args.append('-Qunused-arguments') - else: - # assume a standard gcc - extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion', - '-Wno-unused-parameter'] - class FFI(FFI): def verify(self, *args, **kwds): return super(FFI, self).verify( diff --git a/extra_tests/cffi_tests/cffi1/test_recompiler.py b/extra_tests/cffi_tests/cffi1/test_recompiler.py --- a/extra_tests/cffi_tests/cffi1/test_recompiler.py +++ b/extra_tests/cffi_tests/cffi1/test_recompiler.py @@ -35,8 +35,9 @@ source = 'extern "C" {\n%s\n}' % (source,) elif sys.platform != 'win32': # add '-Werror' to the existing 'extra_compile_args' flags + from extra_tests.cffi_tests.support import extra_compile_args kwds['extra_compile_args'] = (kwds.get('extra_compile_args', []) + - ['-Werror']) + extra_compile_args) return _verify(ffi, module_name, source, *args, **kwds) def test_set_source_no_slashes(): @@ -2039,7 +2040,7 @@ ffi.cdef("float _Complex f1(float a, float b);"); lib = verify(ffi, "test_function_returns_float_complex", """ #include - static float _Complex f1(float a, float b) { return a + I*2.0*b; } + static float _Complex f1(float a, float b) { return a + I*2.0f*b; } """, no_cpp=True) # fails on some systems with C++ result = lib.f1(1.25, 5.1) assert type(result) == complex diff --git a/extra_tests/cffi_tests/cffi1/test_verify1.py b/extra_tests/cffi_tests/cffi1/test_verify1.py --- a/extra_tests/cffi_tests/cffi1/test_verify1.py +++ b/extra_tests/cffi_tests/cffi1/test_verify1.py @@ -5,7 +5,7 @@ from cffi import CDefError from cffi import recompiler from extra_tests.cffi_tests.support import * -from extra_tests.cffi_tests.support import _verify +from extra_tests.cffi_tests.support import _verify, extra_compile_args import _cffi_backend lib_m = ['m'] @@ -14,18 +14,6 @@ import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = ['msvcrt'] - extra_compile_args = [] # no obvious -Werror equivalent on MSVC -else: - if (sys.platform == 'darwin' and - [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): - # assume a standard clang or gcc - extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] - # special things for clang - extra_compile_args.append('-Qunused-arguments') - else: - # assume a standard gcc - extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion', - '-Wno-unused-parameter'] class FFI(FFI): error = _cffi_backend.FFI.error diff --git a/extra_tests/cffi_tests/support.py b/extra_tests/cffi_tests/support.py --- a/extra_tests/cffi_tests/support.py +++ b/extra_tests/cffi_tests/support.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import sys +import sys, os if sys.version_info < (3,): __all__ = ['u'] @@ -87,3 +87,24 @@ if not name.startswith('_') and not hasattr(module.ffi, name): setattr(ffi, name, NotImplemented) return module.lib + + +# For testing, we call gcc with "-Werror". This is fragile because newer +# versions of gcc are always better at producing warnings, particularly for +# auto-generated code. We need here to adapt and silence them as needed. + +if sys.platform == 'win32': + extra_compile_args = [] # no obvious -Werror equivalent on MSVC +else: + if (sys.platform == 'darwin' and + [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): + # assume a standard clang or gcc + extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion', + '-Wno-unreachable-code'] + # special things for clang + extra_compile_args.append('-Qunused-arguments') + else: + # assume a standard gcc + extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion', + '-Wno-unused-parameter', + '-Wno-unreachable-code'] diff --git a/extra_tests/test_datetime.py b/extra_tests/test_datetime.py --- a/extra_tests/test_datetime.py +++ b/extra_tests/test_datetime.py @@ -350,3 +350,31 @@ d2 = d.replace(hour=7) assert type(d2) is MyDatetime assert d2 == datetime.datetime(2016, 4, 5, 7, 2, 3) + +def test_normalize_pair(): + normalize = datetime._normalize_pair + + assert normalize(1, 59, 60) == (1, 59) + assert normalize(1, 60, 60) == (2, 0) + assert normalize(1, 95, 60) == (2, 35) + +def test_normalize_date(): + normalize = datetime._normalize_date + + # Huge year is caught correctly + with pytest.raises(OverflowError): + normalize(1000 * 1000, 1, 1) + # Normal dates should be unchanged + assert normalize(3000, 1, 1) == (3000, 1, 1) + # Month overflows year boundary + assert normalize(2001, 24, 1) == (2002, 12, 1) + # Day overflows month boundary + assert normalize(2001, 14, 31) == (2002, 3, 3) + # Leap years? :S + assert normalize(2001, 1, 61) == (2001, 3, 2) + assert normalize(2000, 1, 61) == (2000, 3, 1) + +def test_normalize_datetime(): + normalize = datetime._normalize_datetime + abnormal = (2002, 13, 35, 30, 95, 75, 1000001) + assert normalize(*abnormal) == (2003, 2, 5, 7, 36, 16, 1) diff --git a/lib-python/3/datetime.py b/lib-python/3/datetime.py --- a/lib-python/3/datetime.py +++ b/lib-python/3/datetime.py @@ -1544,9 +1544,13 @@ self.__setstate(year, month) self._hashcode = -1 return self - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond, fold = _check_time_fields( - hour, minute, second, microsecond, fold) + elif isinstance(year, tuple) and len(year) == 7: + # Internal operation - numbers guaranteed to be valid + year, month, day, hour, minute, second, microsecond = year + else: + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond, fold = _check_time_fields( + hour, minute, second, microsecond, fold) _check_tzinfo_arg(tzinfo) self = dateinterop.__new__(cls) self._year = int(year) @@ -2035,20 +2039,18 @@ "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - delta = timedelta(self.toordinal(), - hours=self._hour, - minutes=self._minute, - seconds=self._second, - microseconds=self._microsecond) - delta += other - hour, rem = divmod(delta.seconds, 3600) - minute, second = divmod(rem, 60) - if 0 < delta.days <= _MAXORDINAL: - return datetime.combine(date.fromordinal(delta.days), - time(hour, minute, second, - delta.microseconds, - tzinfo=self._tzinfo)) - raise OverflowError("result out of range") + + result = _normalize_datetime( + self._year, + self._month, + self._day + other.days, + self._hour, + self._minute, + self._second + other.seconds, + self._microsecond + other.microseconds, + ) + + return datetime(result, tzinfo=self._tzinfo) __radd__ = __add__ @@ -2145,6 +2147,65 @@ datetime.resolution = timedelta(microseconds=1) +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo + + +def _normalize_datetime(y, m, d, hh, mm, ss, us): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d) + return y, m, d, hh, mm, ss, us + + +def _normalize_date(year, month, day): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 + + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) + + if not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day + + def _isoweek1monday(year): # Helper to calculate the day number of the Monday starting week 1 # XXX This could be done more efficiently diff --git a/lib-python/3/importlib/_bootstrap.py b/lib-python/3/importlib/_bootstrap.py --- a/lib-python/3/importlib/_bootstrap.py +++ b/lib-python/3/importlib/_bootstrap.py @@ -67,6 +67,7 @@ # Deadlock avoidance for concurrent circular imports. me = _thread.get_ident() tid = self.owner + count = 0 while True: lock = _blocking_on.get(tid) if lock is None: @@ -74,6 +75,14 @@ tid = lock.owner if tid == me: return True + # workaround for https://bugs.python.org/issue38091: + # instead of looping here forever, eventually return False. + # Unsure if this will cause real deadlocks to go undetected, + # but at least it doesn't cause *this* logic here to + # deadlock when there is otherwise no deadlock! + count += 1 + if count >= 100: + return False def acquire(self): """ diff --git a/lib-python/3/sysconfig.py b/lib-python/3/sysconfig.py --- a/lib-python/3/sysconfig.py +++ b/lib-python/3/sysconfig.py @@ -451,6 +451,10 @@ vars['EXE'] = '.exe' vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) + # pypy: give us control over the ABI tag in a wheel name + import _imp + so_ext = _imp.extension_suffixes()[0] + vars['SOABI']= '-'.join(so_ext.split('.')[1].split('-')[:2]) # # public APIs diff --git a/lib-python/3/test/test_asyncio/test_events.py b/lib-python/3/test/test_asyncio/test_events.py --- a/lib-python/3/test/test_asyncio/test_events.py +++ b/lib-python/3/test/test_asyncio/test_events.py @@ -943,9 +943,14 @@ server = self.loop.run_until_complete(f) self.assertEqual(len(server.sockets), 1) sock = server.sockets[0] - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) + try: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + except OSError: + # SO_REUSEPORT is not actually supported, bail! + server.close() + return server.close() test_utils.run_briefly(self.loop) diff --git a/lib-python/3/test/test_dis.py b/lib-python/3/test/test_dis.py --- a/lib-python/3/test/test_dis.py +++ b/lib-python/3/test/test_dis.py @@ -272,7 +272,7 @@ 20 RETURN_VALUE """ -# XXX: change for PyPy? +# changed for PyPy dis_traceback = """\ %3d 0 SETUP_EXCEPT 12 (to 14) @@ -830,9 +830,9 @@ # End fodder for opinfo generation tests expected_outer_line = 1 _line_offset = outer.__code__.co_firstlineno - 1 -code_object_f = outer.__code__.co_consts[3] +code_object_f = outer.__code__.co_consts[2] expected_f_line = code_object_f.co_firstlineno - _line_offset -code_object_inner = code_object_f.co_consts[3] +code_object_inner = code_object_f.co_consts[2] expected_inner_line = code_object_inner.co_firstlineno - _line_offset expected_jumpy_line = 1 @@ -857,22 +857,22 @@ Instruction = dis.Instruction expected_opinfo_outer = [ - Instruction(opname='LOAD_CONST', opcode=100, arg=8, argval=(3, 4), argrepr='(3, 4)', offset=0, starts_line=2, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=1, argval=(3, 4), argrepr='(3, 4)', offset=0, starts_line=2, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=0, argval='a', argrepr='a', offset=2, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=1, argval='b', argrepr='b', offset=4, starts_line=None, is_jump_target=False), Instruction(opname='BUILD_TUPLE', opcode=102, arg=2, argval=2, argrepr='', offset=6, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=code_object_f, argrepr=repr(code_object_f), offset=8, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=4, argval='outer..f', argrepr="'outer..f'", offset=10, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=2, argval=code_object_f, argrepr=repr(code_object_f), offset=8, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval='outer..f', argrepr="'outer..f'", offset=10, starts_line=None, is_jump_target=False), Instruction(opname='MAKE_FUNCTION', opcode=132, arg=9, argval=9, argrepr='', offset=12, starts_line=None, is_jump_target=False), Instruction(opname='STORE_FAST', opcode=125, arg=2, argval='f', argrepr='f', offset=14, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_GLOBAL', opcode=116, arg=0, argval='print', argrepr='print', offset=16, starts_line=7, is_jump_target=False), Instruction(opname='LOAD_DEREF', opcode=136, arg=0, argval='a', argrepr='a', offset=18, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_DEREF', opcode=136, arg=1, argval='b', argrepr='b', offset=20, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval='', argrepr="''", offset=22, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=6, argval=1, argrepr='1', offset=24, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=4, argval='', argrepr="''", offset=22, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=1, argrepr='1', offset=24, starts_line=None, is_jump_target=False), Instruction(opname='BUILD_LIST', opcode=103, arg=0, argval=0, argrepr='', offset=26, starts_line=None, is_jump_target=False), Instruction(opname='BUILD_MAP', opcode=105, arg=0, argval=0, argrepr='', offset=28, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=7, argval='Hello world!', argrepr="'Hello world!'", offset=30, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=6, argval='Hello world!', argrepr="'Hello world!'", offset=30, starts_line=None, is_jump_target=False), Instruction(opname='CALL_FUNCTION', opcode=131, arg=7, argval=7, argrepr='', offset=32, starts_line=None, is_jump_target=False), Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=34, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_FAST', opcode=124, arg=2, argval='f', argrepr='f', offset=36, starts_line=8, is_jump_target=False), @@ -880,14 +880,14 @@ ] expected_opinfo_f = [ - Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=(5, 6), argrepr='(5, 6)', offset=0, starts_line=3, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=1, argval=(5, 6), argrepr='(5, 6)', offset=0, starts_line=3, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=2, argval='a', argrepr='a', offset=2, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=3, argval='b', argrepr='b', offset=4, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=0, argval='c', argrepr='c', offset=6, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_CLOSURE', opcode=135, arg=1, argval='d', argrepr='d', offset=8, starts_line=None, is_jump_target=False), Instruction(opname='BUILD_TUPLE', opcode=102, arg=4, argval=4, argrepr='', offset=10, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=code_object_inner, argrepr=repr(code_object_inner), offset=12, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=4, argval='outer..f..inner', argrepr="'outer..f..inner'", offset=14, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=2, argval=code_object_inner, argrepr=repr(code_object_inner), offset=12, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval='outer..f..inner', argrepr="'outer..f..inner'", offset=14, starts_line=None, is_jump_target=False), Instruction(opname='MAKE_FUNCTION', opcode=132, arg=9, argval=9, argrepr='', offset=16, starts_line=None, is_jump_target=False), Instruction(opname='STORE_FAST', opcode=125, arg=2, argval='inner', argrepr='inner', offset=18, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_GLOBAL', opcode=116, arg=0, argval='print', argrepr='print', offset=20, starts_line=5, is_jump_target=False), diff --git a/lib-python/3/test/test_extcall.py b/lib-python/3/test/test_extcall.py --- a/lib-python/3/test/test_extcall.py +++ b/lib-python/3/test/test_extcall.py @@ -57,7 +57,7 @@ Traceback (most recent call last): ... TypeError: ...got multiple values for keyword argument 'a' - >>> f(1, 2, a=3, **{'a': 4}, **{'a': 5}) + >>> f(1, 2, a=3, **{'a': 4}, **{'a': 5}) #doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...got multiple values for keyword argument 'a' @@ -254,20 +254,21 @@ ... TypeError: h() argument after * must be an iterable, not function - >>> h(*[1], *h) + >>> h(*[1], *h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: h() argument after * must be an iterable, not function + TypeError: ... >>> dir(*h) Traceback (most recent call last): ... TypeError: dir() argument after * must be an iterable, not function - >>> None(*h) + >>> None(**h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: ...argument after * must be an iterable, not function + TypeError: ... object argument after ** must be a mapping, \ +not function >>> h(**h) Traceback (most recent call last): @@ -289,35 +290,20 @@ ... TypeError: h() argument after ** must be a mapping, not list - >>> h(**{'a': 1}, **h) + >>> h(**{'a': 1}, **h) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: h() argument after ** must be a mapping, not function + TypeError: ...argument after ** must be a mapping, not function - >>> h(**{'a': 1}, **[]) + >>> h(**{'a': 1}, **[]) #doctest: +ELLIPSIS Traceback (most recent call last): ... - TypeError: h() argument after ** must be a mapping, not list + TypeError: ...argument after ** must be a mapping, not list >>> dir(**h) Traceback (most recent call last): ... - TypeError: ...argument after * must be an iterable, not function - - >>> None(*h) #doctest: +ELLIPSIS - Traceback (most recent call last): - ... - TypeError: ...argument after * must be an iterable, not function - - >>> h(**h) #doctest: +ELLIPSIS - Traceback (most recent call last): - ... - TypeError: ...argument after ** must be a mapping, not function - - >>> dir(**h) #doctest: +ELLIPSIS - Traceback (most recent call last): - ... - TypeError: ...argument after ** must be a mapping, not function + TypeError: dir() argument after ** must be a mapping, not function >>> None(**h) #doctest: +ELLIPSIS Traceback (most recent call last): diff --git a/lib-python/3/test/test_flufl.py b/lib-python/3/test/test_flufl.py --- a/lib-python/3/test/test_flufl.py +++ b/lib-python/3/test/test_flufl.py @@ -15,7 +15,7 @@ self.assertEqual(cm.exception.text, '2 != 3\n') self.assertEqual(cm.exception.filename, '') self.assertEqual(cm.exception.lineno, 2) - self.assertEqual(cm.exception.offset, 4) + self.assertEqual(cm.exception.offset, 2) # changed in PyPy def test_guido_as_bdfl(self): code = '2 {0} 3' @@ -26,7 +26,7 @@ self.assertEqual(cm.exception.text, '2 <> 3\n') self.assertEqual(cm.exception.filename, '') self.assertEqual(cm.exception.lineno, 1) - self.assertEqual(cm.exception.offset, 4) + self.assertEqual(cm.exception.offset, 2) # changed in PyPy if __name__ == '__main__': diff --git a/lib-python/3/test/test_import/__init__.py b/lib-python/3/test/test_import/__init__.py --- a/lib-python/3/test/test_import/__init__.py +++ b/lib-python/3/test/test_import/__init__.py @@ -414,16 +414,22 @@ os.does_not_exist def test_concurrency(self): + def delay_has_deadlock(frame, event, arg): + if event == 'call' and frame.f_code.co_name == 'has_deadlock': + time.sleep(0.05) + sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data')) try: exc = None def run(): + sys.settrace(delay_has_deadlock) event.wait() try: import package except BaseException as e: nonlocal exc exc = e + sys.settrace(None) for i in range(10): event = threading.Event() diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py --- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py +++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py @@ -1,4 +1,5 @@ import sys +import os import time import _thread import weakref @@ -10,7 +11,7 @@ import os msg = "\n\nThe _ssl cffi module either doesn't exist or is incompatible with your machine's shared libraries.\n" + \ "If you have a compiler installed, you can try to rebuild it by running:\n" + \ - "cd %s\n" % os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + \ + "cd %s\n" % os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + \ "%s _ssl_build.py\n" % sys.executable raise ImportError(str(e) + msg) @@ -83,6 +84,11 @@ OP_NO_SSLv2 = lib.SSL_OP_NO_SSLv2 OP_NO_SSLv3 = lib.SSL_OP_NO_SSLv3 OP_NO_TLSv1_3 = lib.SSL_OP_NO_TLSv1_3 +if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): + # OP_ENABLE_MIDDLEBOX_COMPAT = lib.SSL_OP_ENABLE_MIDDLEBOX_COMPAT + # XXX should be conditionally compiled into lib + OP_ENABLE_MIDDLEBOX_COMPAT = 0x00100000 + SSL_CLIENT = 0 @@ -289,6 +295,20 @@ mode |= lib.SSL_MODE_AUTO_RETRY lib.SSL_set_mode(ssl, mode) + if HAS_TLSv1_3: + if sslctx._post_handshake_auth: + if socket_type == SSL_SERVER: + # bpo-37428: OpenSSL does not ignore SSL_VERIFY_POST_HANDSHAKE. + # Set SSL_VERIFY_POST_HANDSHAKE flag only for server sockets and + # only in combination with SSL_VERIFY_PEER flag. + mode = lib.SSL_CTX_get_verify_mode(lib.SSL_get_SSL_CTX(self.ssl)) + if (mode & lib.SSL_VERIFY_PEER): + verify_cb = lib.SSL_get_verify_callback(self.ssl) + mode |= lib.SSL_VERIFY_POST_HANDSHAKE + lib.SSL_set_verify(ssl, mode, verify_cb) + else: + lib.SSL_set_post_handshake_auth(ssl, 1) + if HAS_SNI and self.server_hostname: name = _str_to_ffi_buffer(self.server_hostname) lib.SSL_set_tlsext_host_name(ssl, name) @@ -711,6 +731,15 @@ else: return None + def verify_client_post_handshake(self): + + if not HAS_TLSv1_3: + raise NotImplementedError("Post-handshake auth is not supported by " + "your OpenSSL version.") + err = lib.SSL_verify_client_post_handshake(self.ssl); + if err == 0: + raise pyssl_error(self, err) + def pending(self): count = lib.SSL_pending(self.ssl) if count < 0: @@ -767,6 +796,7 @@ return bool(lib.SSL_session_reused(self.ssl)) + def _fs_decode(name): return name.decode(sys.getfilesystemencoding()) def _fs_converter(name): @@ -822,13 +852,13 @@ if OPENSSL_VERSION_INFO > (1, 1, 0, 0, 0): aead = lib.SSL_CIPHER_is_aead(cipher) nid = lib.SSL_CIPHER_get_cipher_nid(cipher) - skcipher = OBJ_nid2ln(nid) if nid != NID_undef else None + skcipher = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_digest_nid(cipher); - digest = OBJ_nid2ln(nid) if nid != NID_undef else None + digest = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None nid = lib.SSL_CIPHER_get_kx_nid(cipher); - kx = OBJ_nid2ln(nid) if nid != NID_undef else None - nid = SSL_CIPHER_get_auth_nid(cipher); - auth = OBJ_nid2ln(nid) if nid != NID_undef else None + kx = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None + nid = lib.SSL_CIPHER_get_auth_nid(cipher); + auth = lib.OBJ_nid2ln(nid) if nid != lib.NID_undef else None ret.update({'aead' : bool(aead), 'symmmetric' : skcipher, 'digest' : digest, @@ -888,9 +918,8 @@ class _SSLContext(object): __slots__ = ('ctx', '_check_hostname', 'servername_callback', 'alpn_protocols', '_alpn_protocols_handle', - 'npn_protocols', 'set_hostname', + 'npn_protocols', 'set_hostname', '_post_handshake_auth', '_set_hostname_handle', '_npn_protocols_handle') - def __new__(cls, protocol): self = object.__new__(cls) self.ctx = ffi.NULL @@ -967,6 +996,9 @@ if lib.Cryptography_HAS_X509_V_FLAG_TRUSTED_FIRST: store = lib.SSL_CTX_get_cert_store(self.ctx) lib.X509_STORE_set_flags(store, lib.X509_V_FLAG_TRUSTED_FIRST) + if HAS_TLSv1_3: + self.post_handshake_auth = 0; + lib.SSL_CTX_set_post_handshake_auth(self.ctx, self.post_handshake_auth) return self @property @@ -1052,6 +1084,7 @@ "CERT_OPTIONAL or CERT_REQUIRED") self._check_hostname = check_hostname + def set_ciphers(self, cipherlist): cipherlistbuf = _str_to_ffi_buffer(cipherlist) ret = lib.SSL_CTX_set_cipher_list(self.ctx, cipherlistbuf) @@ -1263,6 +1296,12 @@ return stats def set_default_verify_paths(self): + if (not os.environ.get('SSL_CERT_FILE') and + not os.environ.get('SSL_CERT_DIR') and + not sys.platform == 'win32'): + locations = get_default_verify_paths() + self.load_verify_locations(locations[1], locations[3]) + return if not lib.SSL_CTX_set_default_verify_paths(self.ctx): raise ssl_error("") @@ -1385,6 +1424,44 @@ outgoing) return sock + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + + @property + def post_handshake_auth(self): + if HAS_TLSv1_3: + return bool(self._post_handshake_auth) + return None + + @post_handshake_auth.setter + def post_handshake_auth(self, arg): + if arg is None: + raise AttributeError("cannot delete attribute") + + pha = bool(arg) + self._post_handshake_auth = pha; + + # bpo-37428: newPySSLSocket() sets SSL_VERIFY_POST_HANDSHAKE flag for + # server sockets and SSL_set_post_handshake_auth() for client + + return 0; + # cryptography constraint: OPENSSL_NO_TLSEXT will never be set! @@ -1609,20 +1686,69 @@ lib.RAND_add(buf, len(buf), entropy) def get_default_verify_paths(): + ''' + Find a certificate store and associated values + Returns something like + `('SSL_CERT_FILE', '/usr/lib/ssl/cert.pem', 'SSL_CERT_DIR', '/usr/lib/ssl/certs')` + on Ubuntu and windows10 + + `('SSL_CERT_FILE', '/usr/local/cert.pem', 'SSL_CERT_DIR', '/usr/local/certs')` + on CentOS + + `('SSL_CERT_FILE', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem', + 'SSL_CERT_DIR', '/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/certs')` + on Darwin + + For portable builds (based on CentOS, but could be running on any glibc + linux) we need to check other locations. The list of places to try was taken + from golang in Dec 2018: + https://golang.org/src/crypto/x509/root_unix.go (for the directories), + https://golang.org/src/crypto/x509/root_linux.go (for the files) + ''' + certFiles = [ + "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", # Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", # OpenSUSE + "/etc/pki/tls/cacert.pem", # OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", # CentOS/RHEL 7 + "/etc/ssl/cert.pem", # Alpine Linux + ] + certDirectories = [ + "/etc/ssl/certs", # SLES10/SLES11 + "/system/etc/security/cacerts", # Android + "/usr/local/share/certs", # FreeBSD + "/etc/pki/tls/certs", # Fedora/RHEL + "/etc/openssl/certs", # NetBSD + "/var/ssl/certs", # AIX + ] + + # optimization: reuse the values from a local varaible + if getattr(get_default_verify_paths, 'retval', None): + return get_default_verify_paths.retval + + # This should never fail, it should always return SSL_CERT_FILE and SSL_CERT_DIR ofile_env = _cstr_decode_fs(lib.X509_get_default_cert_file_env()) - if ofile_env is None: - return None + odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) + + # Platform depenedent ofile = _cstr_decode_fs(lib.X509_get_default_cert_file()) - if ofile is None: - return None - odir_env = _cstr_decode_fs(lib.X509_get_default_cert_dir_env()) - if odir_env is None: - return None odir = _cstr_decode_fs(lib.X509_get_default_cert_dir()) - if odir is None: - return odir - return (ofile_env, ofile, odir_env, odir); + + if os.path.exists(ofile) and os.path.exists(odir): + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + + # OpenSSL didn't supply the goods. Try some other options + for f in certFiles: + if os.path.exists(f): + ofile = f + for f in certDirectories: + if os.path.exists(f): + odir = f + get_default_verify_paths.retval = (ofile_env, ofile, odir_env, odir) + return get_default_verify_paths.retval + @ffi.callback("int(SSL*,unsigned char **,unsigned char *,const unsigned char *,unsigned int,void *)") def select_alpn_callback(ssl, out, outlen, client_protocols, client_protocols_len, args): diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py --- a/lib_pypy/_curses_build.py +++ b/lib_pypy/_curses_build.py @@ -1,12 +1,23 @@ -from cffi import FFI +from cffi import FFI, VerificationError import os +version_str = ''' + static const int NCURSES_VERSION_MAJOR; + static const int NCURSES_VERSION_MINOR; +''' + +version = (0, 0) def find_library(options): + global version for library in options: ffi = FFI() - ffi.set_source("_curses_cffi_check", "", libraries=[library]) + ffi.cdef(version_str) + ffi.set_source("_curses_cffi_check", version_str, libraries=[library]) try: ffi.compile() + import _curses_cffi_check + lib = _curses_cffi_check.lib + version = (lib.NCURSES_VERSION_MAJOR, lib.NCURSES_VERSION_MINOR) except VerificationError as e: e_last = e continue @@ -17,16 +28,27 @@ # error message raise e_last -def find_curses_include_dirs(): - if os.path.exists('/usr/include/ncurses'): - return ['/usr/include/ncurses'] - if os.path.exists('/usr/include/ncursesw'): - return ['/usr/include/ncursesw'] - return [] +def find_curses_dir_and_name(): + for base in ('/usr', '/usr/local'): + if os.path.exists(os.path.join(base, 'include', 'ncursesw')): + return base, 'ncursesw' + if os.path.exists(os.path.join(base, 'include', 'ncurses')): + return base, 'ncurses' + return '', None +base, name = find_curses_dir_and_name() +if base: + include_dirs = [os.path.join(base, 'include', name)] + library_dirs = [os.path.join(base, 'lib')] + libs = [name, name.replace('ncurses', 'panel')] +else: + include_dirs = [] + library_dirs = [] + libs = [find_library(['ncursesw', 'ncurses']), + find_library(['panelw', 'panel']), + ] ffi = FFI() - ffi.set_source("_curses_cffi", """ #ifdef __APPLE__ /* the following define is necessary for OS X 10.6+; without it, the @@ -73,9 +95,10 @@ void _m_getsyx(int *yx) { getsyx(yx[0], yx[1]); } -""", libraries=[find_library(['ncurses', 'ncursesw']), - find_library(['panel', 'panelw'])], - include_dirs=find_curses_include_dirs()) +""", libraries=libs, + library_dirs = library_dirs, + include_dirs=include_dirs, +) ffi.cdef(""" @@ -86,8 +109,6 @@ typedef unsigned long... chtype; typedef chtype attr_t; -typedef int... wint_t; - typedef struct { short id; /* ID to distinguish multiple devices */ @@ -159,11 +180,11 @@ int setupterm(char *, int, int *); -WINDOW *stdscr; -int COLORS; -int COLOR_PAIRS; -int COLS; -int LINES; +extern WINDOW *stdscr; +extern int COLORS; +extern int COLOR_PAIRS; +extern int COLS; +extern int LINES; int baudrate(void); int beep(void); @@ -186,8 +207,6 @@ void filter(void); int flash(void); int flushinp(void); -int wget_wch(WINDOW *, wint_t *); -int mvwget_wch(WINDOW *, int, int, wint_t *); chtype getbkgd(WINDOW *); WINDOW * getwin(FILE *); int halfdelay(int); @@ -263,7 +282,6 @@ int touchwin(WINDOW *); int typeahead(int); int ungetch(int); -int unget_wch(const wchar_t); int untouchwin(WINDOW *); void use_env(bool); int waddch(WINDOW *, const chtype); @@ -342,7 +360,7 @@ #define _m_NetBSD ... int _m_ispad(WINDOW *); -chtype acs_map[]; +extern chtype acs_map[]; // For _curses_panel: @@ -367,6 +385,14 @@ void _m_getsyx(int *yx); """) +if version > (5, 7): + ffi.cdef(""" +typedef int... wint_t; +int wget_wch(WINDOW *, wint_t *); +int mvwget_wch(WINDOW *, int, int, wint_t *); +int unget_wch(const wchar_t); +""") + if __name__ == "__main__": ffi.compile() diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.13.1 +Version: 1.13.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -5,8 +5,8 @@ from .error import CDefError, FFIError, VerificationError, VerificationMissing from .error import PkgConfigError -__version__ = "1.13.1" -__version_info__ = (1, 13, 1) +__version__ = "1.13.2" +__version_info__ = (1, 13, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -261,12 +261,12 @@ return (int)_cffi_to_c_wchar3216_t(o); } -_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(int x) +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x) { if (sizeof(_cffi_wchar_t) == 4) return _cffi_from_c_wchar_t((_cffi_wchar_t)x); else - return _cffi_from_c_wchar3216_t(x); + return _cffi_from_c_wchar3216_t((int)x); } diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -224,7 +224,7 @@ if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.13.1" + "\ncompiled with cffi version: 1.13.2" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -159,9 +159,9 @@ def _warn_for_non_extern_non_static_global_variable(decl): if not decl.storage: import warnings - warnings.warn("Declaration of global variable '%s' in cdef() should " - "be marked 'extern' for consistency (or possibly " - "'static' in API mode)" % (decl.name,)) + warnings.warn("Global variable '%s' in cdef(): for consistency " + "with C it should have a storage class specifier " + "(usually 'extern')" % (decl.name,)) def _preprocess(csource): # Remove comments. NOTE: this only work because the cdef() section diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -219,11 +219,6 @@ BoolOption("newshortcut", "cache and shortcut calling __new__ from builtin types", default=False), - BoolOption("reinterpretasserts", - "Perform reinterpretation when an assert fails " - "(only relevant for tests)", - default=False), - ]), ]) diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -14,7 +14,7 @@ conf = get_pypy_config() conf.translation.gc = "boehm" with py.test.raises(ConfigError): - conf.translation.gcrootfinder = 'asmgcc' + conf.translation.gcrootfinder = 'shadowstack' def test_frameworkgc(): for name in ["minimark", "semispace"]: diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -135,8 +135,8 @@ return PyPyModule(path, parent) def is_applevel(item): - from pypy.tool.pytest.apptest import AppTestFunction - return isinstance(item, AppTestFunction) + from pypy.tool.pytest.apptest import AppTestMethod + return isinstance(item, AppTestMethod) def pytest_collection_modifyitems(config, items): if config.getoption('runappdirect') or config.getoption('direct_apptest'): @@ -166,8 +166,6 @@ def funcnamefilter(self, name): if name.startswith('test_'): return self.accept_regular_test() - if name.startswith('app_test_'): - return True return False def classnamefilter(self, name): @@ -182,13 +180,6 @@ if name.startswith('AppTest'): from pypy.tool.pytest.apptest import AppClassCollector return AppClassCollector(name, parent=self) - - elif hasattr(obj, 'func_code') and self.funcnamefilter(name): - if name.startswith('app_test_'): - assert not obj.func_code.co_flags & 32, \ - "generator app level functions? you must be joking" - from pypy.tool.pytest.apptest import AppTestFunction - return AppTestFunction(name, parent=self) return super(PyPyModule, self).makeitem(name, obj) def skip_on_missing_buildoption(**ropts): @@ -207,27 +198,15 @@ py.test.skip("need translated pypy3 with: %s, got %s" %(ropts,options)) -class LazyObjSpaceGetter(object): - def __get__(self, obj, cls=None): - from pypy.tool.pytest.objspace import gettestobjspace - space = gettestobjspace() - if cls: - cls.space = space - return space - - @pytest.hookimpl(tryfirst=True) def pytest_runtest_setup(item): if isinstance(item, py.test.collect.Function): appclass = item.getparent(py.test.Class) if appclass is not None: + from pypy.tool.pytest.objspace import gettestobjspace # Make cls.space and cls.runappdirect available in tests. - spaceconfig = getattr(appclass.obj, 'spaceconfig', None) - if spaceconfig is not None: - from pypy.tool.pytest.objspace import gettestobjspace - appclass.obj.space = gettestobjspace(**spaceconfig) - else: - appclass.obj.space = LazyObjSpaceGetter() + spaceconfig = getattr(appclass.obj, 'spaceconfig', {}) + appclass.obj.space = gettestobjspace(**spaceconfig) appclass.obj.runappdirect = option.runappdirect def pytest_ignore_collect(path, config): diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -60,12 +60,9 @@ Install build-time dependencies ------------------------------- (**Note**: for some hints on how to translate the Python interpreter under -Windows, see the `windows document`_ . For hints on how to cross-compile in -a chroot using scratchbox2, see the `arm document`_ in the -`RPython documentation`_) +Windows, see the `windows document`_ . .. _`windows document`: windows.html -.. _`arm document`: http://rpython.readthedocs.org/en/latest/arm.html .. _`RPython documentation`: http://rpython.readthedocs.org The host Python needs to have CFFI installed. If translating on PyPy, CFFI is @@ -88,9 +85,6 @@ pyexpat libexpat1 -_ssl - libssl - _vmprof libunwind (optional, loaded dynamically at runtime) @@ -104,6 +98,9 @@ sqlite3 libsqlite3 +_ssl, _hashlib + libssl + curses libncurses-dev (for PyPy2) libncursesw-dev (for PyPy3) @@ -115,11 +112,12 @@ tk-dev lzma (PyPy3 only) - liblzma + liblzma or libxz, version 5 and up -To run untranslated tests, you need the Boehm garbage collector libgc. +To run untranslated tests, you need the Boehm garbage collector libgc, version +7.4 and up -On recent Debian and Ubuntu (16.04 onwards), this is the command to install +On Debian and Ubuntu (16.04 onwards), this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config zlib1g-dev libbz2-dev \ @@ -127,18 +125,11 @@ tk-dev libgc-dev python-cffi \ liblzma-dev libncursesw5-dev # these two only needed on PyPy3 -On older Debian and Ubuntu (12.04-14.04):: - - apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ - tk-dev libgc-dev python-cffi \ - liblzma-dev libncursesw-dev # these two only needed on PyPy3 - On Fedora:: dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ sqlite-devel ncurses-devel expat-devel openssl-devel tk-devel \ - gdbm-devel python-cffi\ + gdbm-devel python-cffi gc-devel\ xz-devel # For lzma on PyPy3. On SLES11:: diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -73,7 +73,7 @@ # The short X.Y version. version = '7.3' # The full version, including alpha/beta/rc tags. -release = '7.3.0' +release = '7.3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/translation.gcrootfinder.txt b/pypy/doc/config/translation.gcrootfinder.txt --- a/pypy/doc/config/translation.gcrootfinder.txt +++ b/pypy/doc/config/translation.gcrootfinder.txt @@ -1,16 +1,7 @@ Choose the method used to find the roots in the GC. This only -applies to our framework GCs. You have a choice of two -alternatives: +applies to our framework GCs. - ``--gcrootfinder=shadowstack``: use a so-called "shadow stack", which is an explicitly maintained custom stack of - root pointers. This is the most portable solution. - -- ``--gcrootfinder=asmgcc``: use assembler hackery to find the - roots directly from the normal stack. This is a bit faster, - but platform specific. It works so far with GCC or MSVC, - on i386 and x86-64. It is tested only on Linux - so other platforms (as well as MSVC) may need - various fixes before they can be used. Note asmgcc will be deprecated - at some future date, and does not work with clang. - + root pointers. This is the most portable solution, and also + the only one available now. diff --git a/pypy/doc/contributing.rst b/pypy/doc/contributing.rst --- a/pypy/doc/contributing.rst +++ b/pypy/doc/contributing.rst @@ -311,16 +311,13 @@ directory or even the top level subdirectory ``pypy``. It takes hours and uses huge amounts of RAM and is not recommended. -To run CPython regression tests you can point to the ``lib-python`` -directory:: - - py.test lib-python/2.7/test/test_datetime.py - -This will usually take a long time because this will run -the PyPy Python interpreter on top of CPython. On the plus -side, it's usually still faster than doing a full translation -and running the regression test with the translated PyPy Python -interpreter. +To run CPython regression tests, you should start with a translated PyPy and +run the tests as you would with CPython (see below). You can, however, also +attempt to run the tests before translation, but be aware that it is done with +a hack that doesn't work in all cases and it is usually extremely slow: +``py.test lib-python/2.7/test/test_datetime.py``. Usually, a better idea is to +extract a minimal failing test of at most a few lines, and put it into one of +our own tests in ``pypy/*/test/``. .. _py.test testing tool: http://pytest.org .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage @@ -350,6 +347,11 @@ cpython2 pytest.py -A pypy/module/cpyext/test --python=path/to/pypy3 +To run a test from the standard CPython regression test suite, use the regular +Python way, i.e. (replace "pypy" with the exact binary name, if needed):: + + pypy -m test.test_datetime + Tooling & Utilities ^^^^^^^^^^^^^^^^^^^ diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -65,16 +65,16 @@ Spenser Bauman Michal Bendowski Jan de Mooij + Stefano Rivera Tyler Wade + Stefan Beyer Vincent Legoll Michael Foord Stephan Diehl - Stefano Rivera Jean-Paul Calderone Stefan Schwarzer Tomek Meka Valentino Volonghi - Stefan Beyer Patrick Maupin Devin Jeanpierre Bob Ippolito @@ -103,9 +103,10 @@ Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Stian Andreassen + Julian Berman William Leslie Paweł Piotr Przeradowski - Stian Andreassen marky1991 Ilya Osadchiy Tobias Oberstein @@ -116,7 +117,7 @@ tav Georg Brandl Joannah Nanjekye - Julian Berman + Yannick Jadoul Bert Freudenberg Wanja Saatkamp Mike Blume @@ -241,6 +242,7 @@ Lutz Paelike Ian Foote Philipp Rustemeuer + Bernd Schoeller Logan Chien Catalin Gabriel Manciu Jacob Oscarson @@ -253,7 +255,6 @@ Lene Wagner Tomo Cocoa Miro Hrončok - Anthony Sottile David Lievens Neil Blakey-Milner Henrik Vendelbo @@ -269,7 +270,6 @@ Laurens Van Houtven Bobby Impollonia Roberto De Ioris - Yannick Jadoul Jeong YunWon Christopher Armstrong Aaron Tubbs @@ -324,6 +324,7 @@ Daniil Yarancev Min RK OlivierBlanvillain + bernd.schoeller at inf.ethz.ch dakarpov at gmail.com Jonas Pfannschmidt Zearin @@ -365,6 +366,7 @@ Jesdi Konrad Delong Dinu Gherman + Sam Edwards pizi Tomáš Pružina James Robert diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-v7.3.0.rst release-v7.2.0.rst release-v7.1.1.rst release-v7.1.0.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-pypy2-7.3.0.rst whatsnew-pypy2-7.2.0.rst whatsnew-pypy2-7.1.0.rst whatsnew-pypy2-7.0.0.rst @@ -43,6 +44,7 @@ .. toctree:: whatsnew-pypy3-head.rst + whatsnew-pypy3-7.3.0.rst whatsnew-pypy3-7.2.0.rst whatsnew-pypy3-7.1.0.rst diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-v7.3.0.rst @@ -0,0 +1,238 @@ +==================================== +PyPy v7.3.0: release of 2.7, and 3.6 +==================================== + +The PyPy team is proud to release the version 7.3.0 of PyPy, which includes +two different interpreters: + + - PyPy2.7, which is an interpreter supporting the syntax and the features of + Python 2.7 including the stdlib for CPython 2.7.13 + + - PyPy3.6: which is an interpreter supporting the syntax and the features of + Python 3.6, including the stdlib for CPython 3.6.9. + +The interpreters are based on much the same codebase, thus the double +release. + +We have worked with the python packaging group to support tooling around +building third party packages for python, so this release changes the ABI tag +for PyPy. + +Based on the great work done in `portable-pypy`_, the linux downloads we +provide are now built on top of the `manylinux2010`_ CentOS6 docker image. +The tarballs include the needed shared objects to run on any platform that +supports manylinux2010 wheels, which should include all supported versions of +debian- and RedHat-based distributions (including Ubuntu, CentOS, and Fedora). + +The `CFFI`_ backend has been updated to version 1.13.1. We recommend using CFFI +rather than c-extensions to interact with C. + +The built-in ``_cppyy`` module was upgraded to 1.10.6, which +provides, among others, better template resolution, stricter ``enum`` handling, +anonymous struct/unions, cmake fragments for distribution, optimizations for +PODs, and faster wrapper calls. We reccomend using cppyy_ for performant +wrapping of C++ code for Python. + +The vendored pyrepl package for interaction inside the REPL was updated. + +Support for codepage encoding and decoding was added for Windows. + +As always, this release fixed several issues and bugs raised by the growing +community of PyPy users. We strongly recommend updating. Many of the fixes are +the direct result of end-user bug reports, so please continue reporting issues +as they crop up. + +You can download the v7.3 releases here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. If PyPy is not quite good enough for your needs, we are available for +direct consulting work. + +We would also like to thank our contributors and encourage new people to join +the project. PyPy has many layers and we need help with all of them: `PyPy`_ +and `RPython`_ documentation improvements, tweaking popular modules to run +on pypy, or general `help`_ with making RPython's JIT even better. Since the +previous release, we have accepted contributions from 3 new contributors, +thanks for pitching in. + +.. _`PyPy`: index.html +.. _`RPython`: https://rpython.readthedocs.org +.. _`help`: project-ideas.html +.. _`CFFI`: http://cffi.readthedocs.io +.. _`cppyy`: https://cppyy.readthedocs.io +.. _`available as wheels`: https://github.com/antocuni/pypy-wheels +.. _`portable-pypy`: https://github.com/squeaky-pl/portable-pypy +.. _`manylinux2010`: https://github.com/pypa/manylinux + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7, 3.6. It's fast (`PyPy and CPython 2.7.x`_ performance +comparison) due to its integrated tracing JIT compiler. + +We also welcome developers of other `dynamic languages`_ to see what RPython +can do for them. + +This PyPy release supports: + + * **x86** machines on most common operating systems + (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD) + + * big- and little-endian variants of **PPC64** running Linux, + + * **s390x** running Linux + + * 64-bit **ARM** machines running Linux. + +Unfortunately at the moment of writing our ARM buildbots are out of service, +so for now we are **not** releasing any binary for the ARM architecture (32 +bit), although PyPy does support ARM 32 bit processors. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html + + +Changelog +========= + +Changes shared across versions +------------------------------ + +* Fix segfault when calling descr-methods with no arguments +* Change the SOABI and subsequently change the reported ABI tag. +* Update the builtin ``_cppyy`` backend to 1.10.6 +* Performance improvements in string/unicode handling +* Fix compilation error when building `revdb`_ (`issue 3084`_, actually + released in PyPy 7.2 but not mentioned in the release notes) +* Add JIT options to documentation and an option for JIT help via ``pypy --jit + help`` +* Fix regex escapes in pyrepl (`issue 3097`_) +* Allow reloading of subclasses of module (`issue 3099`_) +* Work around a gcc bug, which was reported to them and fixed (`issue 3086`_) +* Fix (again) faulty logic when decoding invalid UTF-8 (`issue 2389`_) +* Fix up LICENSE file +* Turn all ``http`` links in the documentation to ``https`` +* Update the bndled pip and setuptools (used in ``pypy -mensurepip`` to version + that support `manylinux2010`_ wheels +* Link the ``DEFAULT_SOABI`` to the ``PYPY_VERSION`` +* Workaround for programs calling ``sys.setrecursionlimit(huge_value)`` (`issue + 3094`_) +* Set minimal ``MACOSX_DEPLOYMENT_TARGET`` to 10.7 on macOS; cpython uses 10.5 +* Fix a JIT bug causing rare register corruption on aarch64 +* Add discovery of ``ncursesw`` when building ``_minimal_curses`` and improve + handling of old curses versions (`issue 2970`_) +* Improve the error message for ``class X(random_object)`` (`issue 3109`_) +* Deal with json dicts containing repeated keys in the new map based parser + (`issue 3108`_) +* Port parts of the `portable pypy`_ repackaging scripts to add an option for + ``RPATH`` manipulation on linux +* Check for overflow in ctypes array creation +* Better support and report MSVC versions used to compile on windows +* Allow any kind of buffer in socket.setsockopt(), like CPython (`issue 3114`_) +* Fix importing a module with unicode in ``sys.path`` (`issue 3112`_) +* Support OpenSSL 1.1 and TLSv1_3 +* Remove the (deprecated since 5.7) asmgcc rootfinder from the GC +* Overflow in RPython when converting ``2<<32`` into a ``Signed`` on 32-bit + platforms rather than automatically using a ``SignedLongLong``, require an + explicit ``r_int64()`` call instead + +C-API (cpyext) and c-extensions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Add ``_PySet_Next``, ``_PySet_NextEntry`` +* Correctly swallow exceptions happening inside ``PyDict_GetItem()`` (`issue + 3098`_) +* Respect tp_dict on PyType_Ready (`issue XXX`_) +* Allow calling ``PyType_Ready`` on a subclass with a partially built + ``tp_base`` (issue 3117`_) +* Rename ``tuple_new`` to ``_PyPy_tuple_new`` to follow the naming convention of + exported symbols in ``libpypy-c.so`` +* Actually restore the traceback in ``PyErr_Restore`` (`issue 3120`_) + +Python 3.6 only +--------------- + +* Don't grow the ``lzma.decompress()`` buffer past ``max_length`` (`issue 3088`_) +* Backport fix from CPython for failure of ``lzma`` to decompress a file + (`issue 3090`_) +* Fix ``asyncgen_hooks`` and refactor ``coroutine execution`` +* Fix range checking in GB18030 decoder (CPython issue `29990`_) +* Fix handling escape characters in HZ codec (CPython issue `30003`_) +* Reject null characters in a few more functions (CPython issue `13617`_) +* Fix build on macOS without ``clock_gettime`` (before 10.12 and xcode 8, + released 2016) +* Backport 3.7.5 changes to ``timedelta.__eq__`` and ``time.__eq__`` (CPython + issue `37579`_) +* Backport more fixes to comparisons in ``datetime.py`` (CPython issue `37985`_) +* Use the python tag in ``pyc`` file names, not the abi tag +* Handle string formatting with a single ``[`` in the format string (`issue + 3100`_) +* Backport some of the patches in `macports pypy`_ +* Add missing ``HAVE_FACCESSAT`` to ``posix._have_functions`` +* Update pyrepl from upstream package (`issue 2971`_) +* Fix ``PyFrame._guess_function_name_parens()`` +* Fix range of allowed years in ``time.mktime`` to match CPython `13312`_ +* Generators need to store the old current ``exc_info`` in a place that is + visible, because in one corner case a call to ``sys.exc_info()`` might need + it. (`issue 3096`_) +* Remove incorrect clobbering of the ``locals`` after running ``exec()`` +* Adds encoding, decoding codepages on win32 +* Remove socket error attributes from ``_ssl`` (`issue 3119`_) +* Add missing ``os.getgrouplist`` (part of `issue 2375`_) +* Back-port the tentative fix from cpython: "Import deadlock detection causes + deadlock" (part of `issue 3111`_) +* Fix handling of ``sys.exc_info()`` in generators +* Return ``W_IntObject`` when converting from ``float`` to ``int`` when + possible, which speeds up many code paths. + +Python 3.6 C-API +~~~~~~~~~~~~~~~~ + +* Add ``PyObject_GenericGetDict``, ``PyObject_GenericSetDict``, ``_Py_strhex``, + ``_Py_strhex_bytes``, ``PyUnicodeNew``, ``_PyFinalizing``, + ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath`` +* Implement ``pystrhex.h`` (`issue 2687`_) +* Make ``PyUnicodeObject`` slightly more compact +* Fix memory leak when releasing a ``PyUnicodeObject`` + +.. _`revdb`: fix broken link +.. _`portable pypy`: fix broken link +.. _`manylinux2010`: fix broken link +.. _`macports pypy`: https://github.com/macports/macports-ports/blob/master/lang/pypy/files/darwin.py.diff + +.. _`issue 2375`: https://bitbucket.com/pypy/pypy/issues/2375 +.. _`issue 2389`: https://bitbucket.com/pypy/pypy/issues/2389 +.. _`issue 2687`: https://bitbucket.com/pypy/pypy/issues/2687 +.. _`issue 2970`: https://bitbucket.com/pypy/pypy/issues/2970 +.. _`issue 2971`: https://bitbucket.com/pypy/pypy/issues/2971 +.. _`issue 3084`: https://bitbucket.com/pypy/pypy/issues/3084 +.. _`issue 3086`: https://bitbucket.com/pypy/pypy/issues/3086 +.. _`issue 3088`: https://bitbucket.com/pypy/pypy/issues/3088 +.. _`issue 3090`: https://bitbucket.com/pypy/pypy/issues/3090 +.. _`issue 3094`: https://bitbucket.com/pypy/pypy/issues/3094 +.. _`issue 3096`: https://bitbucket.com/pypy/pypy/issues/3096 +.. _`issue 3097`: https://bitbucket.com/pypy/pypy/issues/3097 +.. _`issue 3098`: https://bitbucket.com/pypy/pypy/issues/3098 +.. _`issue 3099`: https://bitbucket.com/pypy/pypy/issues/3099 +.. _`issue 3100`: https://bitbucket.com/pypy/pypy/issues/3100 +.. _`issue 3108`: https://bitbucket.com/pypy/pypy/issues/3108 +.. _`issue 3109`: https://bitbucket.com/pypy/pypy/issues/3109 +.. _`issue 3111`: https://bitbucket.com/pypy/pypy/issues/3111 +.. _`issue 3112`: https://bitbucket.com/pypy/pypy/issues/3112 +.. _`issue 3114`: https://bitbucket.com/pypy/pypy/issues/3114 +.. _`issue 3117`: https://bitbucket.com/pypy/pypy/issues/3117 +.. _`issue 3119`: https://bitbucket.com/pypy/pypy/issues/3119 +.. _`issue 3120`: https://bitbucket.com/pypy/pypy/issues/3120 + +.. _13312: https://bugs.python.org/issue13312 +.. _13617: https://bugs.python.org/issue13617 +.. _29990: https://bugs.python.org/issue29990 +.. _30003: https://bugs.python.org/issue30003 +.. _37579: https://bugs.python.org/issue37579 +.. _37985: https://bugs.python.org/issue37985 +.. _37985: https://bugs.python.org/issue37985 + + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,22 +1,7 @@ -========================== -What's new in PyPy2.7 7.3+ -========================== +============================ +What's new in PyPy2.7 7.3.0+ +============================ -.. this is a revision shortly after release-pypy-7.2.0 -.. startrev: a511d86377d6 +.. this is a revision shortly after release-pypy-7.3.0 +.. startrev: 994c42529580 -.. branch: fix-descrmismatch-crash - -Fix segfault when calling descr-methods with no arguments - -.. branch: https-readme - -Convert http -> https in README.rst - -.. branch: license-update - -Update list directories in LICENSE - -.. branch: allow-forcing-no-embed - -When packaging, allow suppressing embedded dependencies via PYPY_NO_EMBED_DEPENDENCIES diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-pypy2-7.3.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-pypy2-7.3.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-pypy2-7.3.0.rst @@ -1,6 +1,6 @@ -========================== -What's new in PyPy2.7 7.3+ -========================== +=========================== +What's new in PyPy2.7 7.3.0 +=========================== .. this is a revision shortly after release-pypy-7.2.0 .. startrev: a511d86377d6 @@ -19,4 +19,23 @@ .. branch: allow-forcing-no-embed -When packaging, allow suppressing embedded dependencies via PYPY_NO_EMBED_DEPENDENCIES +When packaging, allow suppressing embedded dependencies via +PYPY_NO_EMBED_DEPENDENCIES + +.. branch: int-test-is-zero + +.. branch: cppyy-dev + +Upgraded the built-in ``_cppyy`` module to ``cppyy-backend 1.10.6``, which +provides, among others, better template resolution, stricter ``enum`` handling, +anonymous struct/unions, cmake fragments for distribution, optimizations for +PODs, and faster wrapper calls. + +.. branch: backport-decode_timeval_ns-py3.7 + +Backport ``rtime.decode_timeval_ns`` from py3.7 to rpython + +.. branch: kill-asmgcc + +Completely remove the deprecated translation option ``--gcrootfinder=asmgcc`` +because it no longer works with a recent enough ``gcc``. diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-7.3.0.rst copy from pypy/doc/whatsnew-pypy3-head.rst copy to pypy/doc/whatsnew-pypy3-7.3.0.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-7.3.0.rst @@ -1,6 +1,6 @@ -======================== -What's new in PyPy3 7.2+ -======================== +========================= +What's new in PyPy3 7.3.0 +========================= .. this is the revision after release-pypy3.6-v7.2 .. startrev: 6d2f8470165b @@ -9,3 +9,16 @@ .. branch: py3.6-asyncgen Fix asyncgen_hooks and refactor coroutine execution + +.. branch: py3.6-exc-info + +Follow CPython's use of exc_info more closely (issue 3096) + +.. branch: code_page-utf8 + +Add encoding, decoding of codepages on windows + +.. branch: py3.6-exc-info-2 + +Fix handling of sys.exc_info() in generators + diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst --- a/pypy/doc/whatsnew-pypy3-head.rst +++ b/pypy/doc/whatsnew-pypy3-head.rst @@ -1,11 +1,7 @@ -======================== -What's new in PyPy3 7.2+ -======================== +========================== +What's new in PyPy3 7.3.0+ +========================== -.. this is the revision after release-pypy3.6-v7.2 -.. startrev: 6d2f8470165b +.. this is the revision after release-pypy3.6-v7.3.0 +.. startrev: a56889d5df88 - -.. branch: py3.6-asyncgen - -Fix asyncgen_hooks and refactor coroutine execution diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -155,7 +155,7 @@ the `get_externals.py` utility to checkout the proper branch for your platform and PyPy version. -.. _subrepository: https://bitbucket.org/pypy/external +.. _subrepository: https://bitbucket.org/pypy/externals Using the mingw compiler ------------------------ diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -736,7 +736,15 @@ filename = sys.argv[0] mainmodule.__file__ = filename mainmodule.__cached__ = None - if not isolated: + for hook in sys.path_hooks: + try: + importer = hook(filename) + break + except ImportError: + continue + else: + importer = None + if importer is None and not isolated: sys.path.insert(0, sys.pypy_resolvedirof(filename)) # assume it's a pyc file only if its name says so. # CPython goes to great lengths to detect other cases @@ -772,18 +780,13 @@ args = (execfile, filename, mainmodule.__dict__) else: filename = sys.argv[0] - for hook in sys.path_hooks: - try: - importer = hook(filename) - except ImportError: - continue + if importer is not None: # It's the name of a directory or a zip file. # put the filename in sys.path[0] and import # the module __main__ import runpy sys.path.insert(0, filename) args = (runpy._run_module_as_main, '__main__', False) - break else: # That's the normal path, "pypy3 stuff.py". # We don't actually load via SourceFileLoader diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py --- a/pypy/interpreter/astcompiler/astbuilder.py +++ b/pypy/interpreter/astcompiler/astbuilder.py @@ -497,10 +497,10 @@ def handle_async_funcdef(self, node, decorators=None): return self.handle_funcdef_impl(node.get_child(1), 1, decorators) - + def handle_funcdef(self, node, decorators=None): return self.handle_funcdef_impl(node, 0, decorators) - + def handle_async_stmt(self, node): ch = node.get_child(1) if ch.type == syms.funcdef: @@ -942,7 +942,7 @@ if flufl and comp_node.get_value() == '!=': self.error("with Barry as BDFL, use '<>' instead of '!='", comp_node) elif not flufl and comp_node.get_value() == '<>': - self.error('invalid comparison', comp_node) + self.error('invalid syntax', comp_node) return ast.NotEq elif comp_type == tokens.NAME: if comp_node.get_value() == "is": @@ -1014,7 +1014,7 @@ atom_node.get_column()) else: return atom_expr - + def handle_power(self, power_node): atom_expr = self.handle_atom_expr(power_node.get_child(0)) if power_node.num_children() == 1: @@ -1092,7 +1092,7 @@ def handle_call(self, args_node, callable_expr): arg_count = 0 # position args + iterable args unpackings keyword_count = 0 # keyword args + keyword args unpackings - generator_count = 0 + generator_count = 0 for i in range(args_node.num_children()): argument = args_node.get_child(i) if argument.type == syms.argument: @@ -1300,7 +1300,6 @@ if is_dict: raise self.error("dict unpacking cannot be used in " "dict comprehension", atom_node) - return self.handle_dictcomp(maker, atom_node) else: # a dictionary display @@ -1423,7 +1422,7 @@ comps = self.comprehension_helper(dict_maker.get_child(i)) return ast.DictComp(key, value, comps, atom_node.get_lineno(), atom_node.get_column()) - + def handle_dictdisplay(self, node, atom_node): keys = [] values = [] @@ -1435,7 +1434,7 @@ i += 1 return ast.Dict(keys, values, atom_node.get_lineno(), atom_node.get_column()) - + def handle_setdisplay(self, node, atom_node): elts = [] i = 0 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -393,6 +393,14 @@ self.emit_op_arg(ops.BUILD_CONST_KEY_MAP, l) return l + def _visit_defaults(self, defaults): + w_tup = self._tuple_of_consts(defaults) + if w_tup: + self.load_const(w_tup) + else: + self.visit_sequence(defaults) + self.emit_op_arg(ops.BUILD_TUPLE, len(defaults)) + @specialize.arg(2) def _visit_function(self, func, function_code_generator): self.update_position(func.lineno, True) @@ -403,11 +411,10 @@ assert isinstance(args, ast.arguments) oparg = 0 - self.visit_sequence(args.defaults) if args.defaults is not None and len(args.defaults): oparg = oparg | 0x01 - self.emit_op_arg(ops.BUILD_TUPLE, len(args.defaults)) + self._visit_defaults(args.defaults) if args.kwonlyargs: kw_default_count = self._visit_kwonlydefaults(args) @@ -438,12 +445,10 @@ args = lam.args assert isinstance(args, ast.arguments) - self.visit_sequence(args.defaults) - oparg = 0 if args.defaults is not None and len(args.defaults): oparg = oparg | 0x01 - self.emit_op_arg(ops.BUILD_TUPLE, len(args.defaults)) + self._visit_defaults(args.defaults) if args.kwonlyargs: kw_default_count = self._visit_kwonlydefaults(args) @@ -1003,11 +1008,12 @@ attr = target.value self._annotation_evaluate(attr) elif isinstance(target, ast.Subscript): - # similar to the above, `a[0:5]: int` evaluates the name and the slice argument - # and if not in a function, also evaluates the annotation - sl = target.slice - self._annotation_evaluate(target.value) - self._annotation_eval_slice(sl) + if not assign.value: + # similar to the above, `a[0:5]: int` evaluates the name and the slice argument + # and if not in a function, also evaluates the annotation + sl = target.slice + self._annotation_evaluate(target.value) + self._annotation_eval_slice(sl) else: self.error("can't handle annotation with %s" % (target,), target) # if this is not in a function, evaluate the annotation diff --git a/pypy/interpreter/astcompiler/test/apptest_misc.py b/pypy/interpreter/astcompiler/test/apptest_misc.py new file mode 100644 From pypy.commits at gmail.com Mon Dec 23 10:29:26 2019 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 23 Dec 2019 07:29:26 -0800 (PST) Subject: [pypy-commit] pypy py3.7: pure python HAMT immutable dict implementation from the immutables package Message-ID: <5e00dd56.1c69fb81.a177b.1616@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: py3.7 Changeset: r98359:4f0d7c92cfdd Date: 2019-12-23 16:14 +0100 http://bitbucket.org/pypy/pypy/changeset/4f0d7c92cfdd/ Log: pure python HAMT immutable dict implementation from the immutables package taken from rev 11863b29e3fbcd7d25335befce706e21a785f5e0 from https://github.com/MagicStack/immutables/ discussed on Twitter here: https://twitter.com/1st1/status/1208819455507218437 tests converted to pytest using unittest2pytest diff too long, truncating to 2000 out of 2125 lines diff --git a/extra_tests/test_immutables_map.py b/extra_tests/test_immutables_map.py new file mode 100644 --- /dev/null +++ b/extra_tests/test_immutables_map.py @@ -0,0 +1,1298 @@ +import collections.abc +import gc +import pickle +import random +import sys +import weakref +import pytest + +from _immutables_map import Map + + +class HashKey: + _crasher = None + + def __init__(self, hash, name, *, error_on_eq_to=None): + assert hash != -1 + self.name = name + self.hash = hash + self.error_on_eq_to = error_on_eq_to + + def __repr__(self): + if self._crasher is not None and self._crasher.error_on_repr: + raise ReprError + return ''.format(self.name, self.hash) + + def __hash__(self): + if self._crasher is not None and self._crasher.error_on_hash: + raise HashingError + + return self.hash + + def __eq__(self, other): + if not isinstance(other, HashKey): + return NotImplemented + + if self._crasher is not None and self._crasher.error_on_eq: + raise EqError + + if self.error_on_eq_to is not None and self.error_on_eq_to is other: + raise ValueError('cannot compare {!r} to {!r}'.format(self, other)) + if other.error_on_eq_to is not None and other.error_on_eq_to is self: + raise ValueError('cannot compare {!r} to {!r}'.format(other, self)) + + return (self.name, self.hash) == (other.name, other.hash) + + +class KeyStr(str): + + def __hash__(self): + if HashKey._crasher is not None and HashKey._crasher.error_on_hash: + raise HashingError + return super().__hash__() + + def __eq__(self, other): + if HashKey._crasher is not None and HashKey._crasher.error_on_eq: + raise EqError + return super().__eq__(other) + + def __repr__(self, other): + if HashKey._crasher is not None and HashKey._crasher.error_on_repr: + raise ReprError + return super().__eq__(other) + + +class HashKeyCrasher: + + def __init__(self, *, error_on_hash=False, error_on_eq=False, + error_on_repr=False): + self.error_on_hash = error_on_hash + self.error_on_eq = error_on_eq + self.error_on_repr = error_on_repr + + def __enter__(self): + if HashKey._crasher is not None: + raise RuntimeError('cannot nest crashers') + HashKey._crasher = self + + def __exit__(self, *exc): + HashKey._crasher = None + + +class HashingError(Exception): + pass + + +class EqError(Exception): + pass + + +class ReprError(Exception): + pass + + +class BaseMapTest: + + def test_hashkey_helper_1(self): + k1 = HashKey(10, 'aaa') + k2 = HashKey(10, 'bbb') + + assert k1 != k2 + assert hash(k1) == hash(k2) + + d = dict() + d[k1] = 'a' + d[k2] = 'b' + + assert d[k1] == 'a' + assert d[k2] == 'b' + + def test_map_basics_1(self): + h = self.Map() + h = None # NoQA + + def test_map_basics_2(self): + h = self.Map() + assert len(h) == 0 + + h2 = h.set('a', 'b') + assert h is not h2 + assert len(h) == 0 + assert len(h2) == 1 + + assert h.get('a') is None + assert h.get('a', 42) == 42 + + assert h2.get('a') == 'b' + + h3 = h2.set('b', 10) + assert h2 is not h3 + assert len(h) == 0 + assert len(h2) == 1 + assert len(h3) == 2 + assert h3.get('a') == 'b' + assert h3.get('b') == 10 + + assert h.get('b') is None + assert h2.get('b') is None + + assert h.get('a') is None + assert h2.get('a') == 'b' + + h = h2 = h3 = None + + def test_map_basics_3(self): + h = self.Map() + o = object() + h1 = h.set('1', o) + h2 = h1.set('1', o) + assert h1 is h2 + + def test_map_basics_4(self): + h = self.Map() + h1 = h.set('key', []) + h2 = h1.set('key', []) + assert h1 is not h2 + assert len(h1) == 1 + assert len(h2) == 1 + assert h1.get('key') is not h2.get('key') + + def test_map_collision_1(self): + k1 = HashKey(10, 'aaa') + k2 = HashKey(10, 'bbb') + k3 = HashKey(10, 'ccc') + + h = self.Map() + h2 = h.set(k1, 'a') + h3 = h2.set(k2, 'b') + + assert h.get(k1) == None + assert h.get(k2) == None + + assert h2.get(k1) == 'a' + assert h2.get(k2) == None + + assert h3.get(k1) == 'a' + assert h3.get(k2) == 'b' + + h4 = h3.set(k2, 'cc') + h5 = h4.set(k3, 'aa') + + assert h3.get(k1) == 'a' + assert h3.get(k2) == 'b' + assert h4.get(k1) == 'a' + assert h4.get(k2) == 'cc' + assert h4.get(k3) == None + assert h5.get(k1) == 'a' + assert h5.get(k2) == 'cc' + assert h5.get(k2) == 'cc' + assert h5.get(k3) == 'aa' + + assert len(h) == 0 + assert len(h2) == 1 + assert len(h3) == 2 + assert len(h4) == 2 + assert len(h5) == 3 + + def test_map_collision_2(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(0b011000011100000100, 'C') + D = HashKey(0b011000011100000100, 'D') + E = HashKey(0b1011000011100000100, 'E') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + + # BitmapNode(size=6 bitmap=0b100110000): + # NULL: + # BitmapNode(size=4 bitmap=0b1000000000000000000001000): + # : 'a' + # NULL: + # CollisionNode(size=4 id=0x108572410): + # : 'c' + # : 'd' + # : 'b' + + h = h.set(E, 'e') + + # BitmapNode(size=4 count=2.0 bitmap=0b110000 id=10b8ea5c0): + # None: + # BitmapNode(size=4 count=2.0 + # bitmap=0b1000000000000000000001000 id=10b8ea518): + # : 'a' + # None: + # BitmapNode(size=2 count=1.0 bitmap=0b10 + # id=10b8ea4a8): + # None: + # BitmapNode(size=4 count=2.0 + # bitmap=0b100000001000 + # id=10b8ea4e0): + # None: + # CollisionNode(size=4 id=10b8ea470): + # : 'c' + # : 'd' + # : 'e' + # : 'b' + + def test_map_stress(self): + COLLECTION_SIZE = 7000 + TEST_ITERS_EVERY = 647 + CRASH_HASH_EVERY = 97 + CRASH_EQ_EVERY = 11 + RUN_XTIMES = 3 + + for _ in range(RUN_XTIMES): + h = self.Map() + d = dict() + + for i in range(COLLECTION_SIZE): + key = KeyStr(i) + + if not (i % CRASH_HASH_EVERY): + with HashKeyCrasher(error_on_hash=True): + with pytest.raises(HashingError): + h.set(key, i) + + h = h.set(key, i) + + if not (i % CRASH_EQ_EVERY): + with HashKeyCrasher(error_on_eq=True): + with pytest.raises(EqError): + h.get(KeyStr(i)) # really trigger __eq__ + + d[key] = i + assert len(d) == len(h) + + if not (i % TEST_ITERS_EVERY): + assert set(h.items()) == set(d.items()) + assert len(h.items()) == len(d.items()) + + assert len(h) == COLLECTION_SIZE + + for key in range(COLLECTION_SIZE): + assert h.get(KeyStr(key), 'not found') == key + + keys_to_delete = list(range(COLLECTION_SIZE)) + random.shuffle(keys_to_delete) + for iter_i, i in enumerate(keys_to_delete): + key = KeyStr(i) + + if not (iter_i % CRASH_HASH_EVERY): + with HashKeyCrasher(error_on_hash=True): + with pytest.raises(HashingError): + h.delete(key) + + if not (iter_i % CRASH_EQ_EVERY): + with HashKeyCrasher(error_on_eq=True): + with pytest.raises(EqError): + h.delete(KeyStr(i)) + + h = h.delete(key) + assert h.get(key, 'not found') == 'not found' + del d[key] + assert len(d) == len(h) + + if iter_i == COLLECTION_SIZE // 2: + hm = h + dm = d.copy() + + if not (iter_i % TEST_ITERS_EVERY): + assert set(h.keys()) == set(d.keys()) + assert len(h.keys()) == len(d.keys()) + + assert len(d) == 0 + assert len(h) == 0 + + # ============ + + for key in dm: + assert hm.get(str(key)) == dm[key] + assert len(dm) == len(hm) + + for i, key in enumerate(keys_to_delete): + if str(key) in dm: + hm = hm.delete(str(key)) + dm.pop(str(key)) + assert hm.get(str(key), 'not found') == 'not found' + assert len(d) == len(h) + + if not (i % TEST_ITERS_EVERY): + assert set(h.values()) == set(d.values()) + assert len(h.values()) == len(d.values()) + + assert len(d) == 0 + assert len(h) == 0 + assert list(h.items()) == [] + + def test_map_delete_1(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(102, 'C') + D = HashKey(103, 'D') + E = HashKey(104, 'E') + Z = HashKey(-100, 'Z') + + Er = HashKey(103, 'Er', error_on_eq_to=D) + + h = self.Map() + h = h.set(A, 'a') + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + + orig_len = len(h) + + # BitmapNode(size=10 bitmap=0b111110000 id=0x10eadc618): + # : 'a' + # : 'b' + # : 'c' + # : 'd' + # : 'e' + + h = h.delete(C) + assert len(h) == orig_len - 1 + + with pytest.raises(ValueError, match='cannot compare'): + h.delete(Er) + + h = h.delete(D) + assert len(h) == orig_len - 2 + + with pytest.raises(KeyError) as ex: + h.delete(Z) + assert ex.value.args[0] is Z + + h = h.delete(A) + assert len(h) == orig_len - 3 + + assert h.get(A, 42) == 42 + assert h.get(B) == 'b' + assert h.get(E) == 'e' + + def test_map_delete_2(self): + A = HashKey(100, 'A') + B = HashKey(201001, 'B') + C = HashKey(101001, 'C') + BLike = HashKey(201001, 'B-like') + D = HashKey(103, 'D') + E = HashKey(104, 'E') + Z = HashKey(-100, 'Z') + + Er = HashKey(201001, 'Er', error_on_eq_to=B) + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + + h = h.set(B, 'b') # trigger branch in BitmapNode.assoc + + with pytest.raises(KeyError): + h.delete(BLike) # trigger branch in BitmapNode.without + + orig_len = len(h) + + # BitmapNode(size=8 bitmap=0b1110010000): + # : 'a' + # : 'd' + # : 'e' + # NULL: + # BitmapNode(size=4 bitmap=0b100000000001000000000): + # : 'b' + # : 'c' + + with pytest.raises(ValueError, match='cannot compare'): + h.delete(Er) + + with pytest.raises(KeyError) as ex: + h.delete(Z) + assert ex.value.args[0] is Z + assert len(h) == orig_len + + h = h.delete(C) + assert len(h) == orig_len - 1 + + h = h.delete(B) + assert len(h) == orig_len - 2 + + h = h.delete(A) + assert len(h) == orig_len - 3 + + assert h.get(D) == 'd' + assert h.get(E) == 'e' + + with pytest.raises(KeyError): + h = h.delete(A) + with pytest.raises(KeyError): + h = h.delete(B) + h = h.delete(D) + h = h.delete(E) + assert len(h) == 0 + + def test_map_delete_3(self): + A = HashKey(0b00000000001100100, 'A') + B = HashKey(0b00000000001100101, 'B') + + C = HashKey(0b11000011100000100, 'C') + D = HashKey(0b11000011100000100, 'D') + X = HashKey(0b01000011100000100, 'Z') + Y = HashKey(0b11000011100000100, 'Y') + + E = HashKey(0b00000000001101000, 'E') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + + assert len(h) == 5 + h = h.set(C, 'c') # trigger branch in CollisionNode.assoc + assert len(h) == 5 + + orig_len = len(h) + + with pytest.raises(KeyError): + h.delete(X) + with pytest.raises(KeyError): + h.delete(Y) + + # BitmapNode(size=6 bitmap=0b100110000): + # NULL: + # BitmapNode(size=4 bitmap=0b1000000000000000000001000): + # : 'a' + # NULL: + # CollisionNode(size=4 id=0x108572410): + # : 'c' + # : 'd' + # : 'b' + # : 'e' + + h = h.delete(A) + assert len(h) == orig_len - 1 + + h = h.delete(E) + assert len(h) == orig_len - 2 + + assert h.get(C) == 'c' + assert h.get(B) == 'b' + + h2 = h.delete(C) + assert len(h2) == orig_len - 3 + + h2 = h.delete(D) + assert len(h2) == orig_len - 3 + + assert len(h) == orig_len - 2 + + def test_map_delete_4(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(100100, 'C') + D = HashKey(100100, 'D') + E = HashKey(100100, 'E') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + + orig_len = len(h) + + # BitmapNode(size=4 bitmap=0b110000): + # NULL: + # BitmapNode(size=4 bitmap=0b1000000000000000000001000): + # : 'a' + # NULL: + # CollisionNode(size=6 id=0x10515ef30): + # : 'c' + # : 'd' + # : 'e' + # : 'b' + + h = h.delete(D) + assert len(h) == orig_len - 1 + + h = h.delete(E) + assert len(h) == orig_len - 2 + + h = h.delete(C) + assert len(h) == orig_len - 3 + + h = h.delete(A) + assert len(h) == orig_len - 4 + + h = h.delete(B) + assert len(h) == 0 + + def test_map_delete_5(self): + h = self.Map() + + keys = [] + for i in range(17): + key = HashKey(i, str(i)) + keys.append(key) + h = h.set(key, 'val-{}'.format(i)) + + collision_key16 = HashKey(16, '18') + h = h.set(collision_key16, 'collision') + + # ArrayNode(id=0x10f8b9318): + # 0:: + # BitmapNode(size=2 count=1 bitmap=0b1): + # : 'val-0' + # + # ... 14 more BitmapNodes ... + # + # 15:: + # BitmapNode(size=2 count=1 bitmap=0b1): + # : 'val-15' + # + # 16:: + # BitmapNode(size=2 count=1 bitmap=0b1): + # NULL: + # CollisionNode(size=4 id=0x10f2f5af8): + # : 'val-16' + # : 'collision' + + assert len(h) == 18 + + h = h.delete(keys[2]) + assert len(h) == 17 + + h = h.delete(collision_key16) + assert len(h) == 16 + h = h.delete(keys[16]) + assert len(h) == 15 + + h = h.delete(keys[1]) + assert len(h) == 14 + with pytest.raises(KeyError) as ex: + h.delete(keys[1]) + assert ex.value.args[0] is keys[1] + assert len(h) == 14 + + for key in keys: + if key in h: + h = h.delete(key) + assert len(h) == 0 + + def test_map_delete_6(self): + h = self.Map() + h = h.set(1, 1) + h = h.delete(1) + assert len(h) == 0 + assert h == self.Map() + + def test_map_items_1(self): + A = HashKey(100, 'A') + B = HashKey(201001, 'B') + C = HashKey(101001, 'C') + D = HashKey(103, 'D') + E = HashKey(104, 'E') + F = HashKey(110, 'F') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + h = h.set(F, 'f') + + it = h.items() + assert set(list(it)) == \ + {(A, 'a'), (B, 'b'), (C, 'c'), (D, 'd'), (E, 'e'), (F, 'f')} + + def test_map_items_2(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(100100, 'C') + D = HashKey(100100, 'D') + E = HashKey(100100, 'E') + F = HashKey(110, 'F') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + h = h.set(F, 'f') + + it = h.items() + assert set(list(it)) == \ + {(A, 'a'), (B, 'b'), (C, 'c'), (D, 'd'), (E, 'e'), (F, 'f')} + + def test_map_items_3(self): + h = self.Map() + assert len(h.items()) == 0 + assert list(h.items()) == [] + + def test_map_items_4(self): + h = self.Map(a=1, b=2, c=3) + k = h.items() + assert set(k) == {('a', 1), ('b', 2), ('c', 3)} + assert set(k) == {('a', 1), ('b', 2), ('c', 3)} + + def test_map_keys_1(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(100100, 'C') + D = HashKey(100100, 'D') + E = HashKey(100100, 'E') + F = HashKey(110, 'F') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + h = h.set(F, 'f') + + assert set(list(h.keys())) == {A, B, C, D, E, F} + assert set(list(h)) == {A, B, C, D, E, F} + + def test_map_keys_2(self): + h = self.Map(a=1, b=2, c=3) + k = h.keys() + assert set(k) == {'a', 'b', 'c'} + assert set(k) == {'a', 'b', 'c'} + + def test_map_values_1(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(100100, 'C') + D = HashKey(100100, 'D') + E = HashKey(100100, 'E') + F = HashKey(110, 'F') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(B, 'b') + h = h.set(C, 'c') + h = h.set(D, 'd') + h = h.set(E, 'e') + h = h.set(F, 'f') + + assert set(list(h.values())) == {'a', 'b', 'c', 'd', 'e', 'f'} + + def test_map_values_2(self): + h = self.Map(a=1, b=2, c=3) + k = h.values() + assert set(k) == {1, 2, 3} + assert set(k) == {1, 2, 3} + + def test_map_eq_1(self): + A = HashKey(100, 'A') + B = HashKey(101, 'B') + C = HashKey(100100, 'C') + D = HashKey(100100, 'D') + E = HashKey(120, 'E') + + h1 = self.Map() + h1 = h1.set(A, 'a') + h1 = h1.set(B, 'b') + h1 = h1.set(C, 'c') + h1 = h1.set(D, 'd') + + h2 = self.Map() + h2 = h2.set(A, 'a') + + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.set(B, 'b') + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.set(C, 'c') + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.set(D, 'd2') + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.set(D, 'd') + assert h1 == h2 + assert not (h1 != h2) + + h2 = h2.set(E, 'e') + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.delete(D) + assert not (h1 == h2) + assert h1 != h2 + + h2 = h2.set(E, 'd') + assert not (h1 == h2) + assert h1 != h2 + + def test_map_eq_2(self): + A = HashKey(100, 'A') + Er = HashKey(100, 'Er', error_on_eq_to=A) + + h1 = self.Map() + h1 = h1.set(A, 'a') + + h2 = self.Map() + h2 = h2.set(Er, 'a') + + with pytest.raises(ValueError, match='cannot compare'): + h1 == h2 + + with pytest.raises(ValueError, match='cannot compare'): + h1 != h2 + + def test_map_eq_3(self): + assert self.Map() != 1 + + def test_map_gc_1(self): + A = HashKey(100, 'A') + + h = self.Map() + h = h.set(0, 0) # empty Map node is memoized in _map.c + ref = weakref.ref(h) + + a = [] + a.append(a) + a.append(h) + b = [] + a.append(b) + b.append(a) + h = h.set(A, b) + + del h, a, b + + gc.collect() + gc.collect() + gc.collect() + + assert ref() is None + + def test_map_gc_2(self): + A = HashKey(100, 'A') + + h = self.Map() + h = h.set(A, 'a') + h = h.set(A, h) + + ref = weakref.ref(h) + hi = iter(h.items()) + next(hi) + + del h, hi + + gc.collect() + gc.collect() + gc.collect() + + assert ref() is None + + def test_map_in_1(self): + A = HashKey(100, 'A') + AA = HashKey(100, 'A') + + B = HashKey(101, 'B') + + h = self.Map() + h = h.set(A, 1) + + assert A in h + assert not (B in h) + + with pytest.raises(EqError): + with HashKeyCrasher(error_on_eq=True): + AA in h + + with pytest.raises(HashingError): + with HashKeyCrasher(error_on_hash=True): + AA in h + + def test_map_getitem_1(self): + A = HashKey(100, 'A') + AA = HashKey(100, 'A') + + B = HashKey(101, 'B') + + h = self.Map() + h = h.set(A, 1) + + assert h[A] == 1 + assert h[AA] == 1 + + with pytest.raises(KeyError): + h[B] + + with pytest.raises(EqError): + with HashKeyCrasher(error_on_eq=True): + h[AA] + + with pytest.raises(HashingError): + with HashKeyCrasher(error_on_hash=True): + h[AA] + + def test_repr_1(self): + h = self.Map() + assert repr(h).startswith('> 32) & 0xffffffff) + + +def map_mask(hash, shift): + return (hash >> shift) & 0x01f + + +def map_bitpos(hash, shift): + return 1 << map_mask(hash, shift) + + +def map_bitcount(v): + v = v - ((v >> 1) & 0x55555555) + v = (v & 0x33333333) + ((v >> 2) & 0x33333333) + v = (v & 0x0F0F0F0F) + ((v >> 4) & 0x0F0F0F0F) + v = v + (v >> 8) + v = (v + (v >> 16)) & 0x3F + return v + + +def map_bitindex(bitmap, bit): + return map_bitcount(bitmap & (bit - 1)) + + +W_EMPTY, W_NEWNODE, W_NOT_FOUND = range(3) +void = object() + + +class BitmapNode: + + def __init__(self, size, bitmap, array, mutid): + self.size = size + self.bitmap = bitmap + assert isinstance(array, list) and len(array) == size + self.array = array + self.mutid = mutid + + def clone(self, mutid): + return BitmapNode(self.size, self.bitmap, self.array.copy(), mutid) + + def assoc(self, shift, hash, key, val, mutid): + bit = map_bitpos(hash, shift) + idx = map_bitindex(self.bitmap, bit) + + if self.bitmap & bit: + key_idx = 2 * idx + val_idx = key_idx + 1 + + key_or_null = self.array[key_idx] + val_or_node = self.array[val_idx] + + if key_or_null is None: + sub_node, added = val_or_node.assoc( + shift + 5, hash, key, val, mutid) + if val_or_node is sub_node: + return self, added + + if mutid and mutid == self.mutid: + self.array[val_idx] = sub_node + return self, added + else: + ret = self.clone(mutid) + ret.array[val_idx] = sub_node + return ret, added + + if key == key_or_null: + if val is val_or_node: + return self, False + + if mutid and mutid == self.mutid: + self.array[val_idx] = val + return self, False + else: + ret = self.clone(mutid) + ret.array[val_idx] = val + return ret, False + + existing_key_hash = map_hash(key_or_null) + if existing_key_hash == hash: + sub_node = CollisionNode( + 4, hash, [key_or_null, val_or_node, key, val], mutid) + else: + sub_node = BitmapNode(0, 0, [], mutid) + sub_node, _ = sub_node.assoc( + shift + 5, existing_key_hash, + key_or_null, val_or_node, + mutid) + sub_node, _ = sub_node.assoc( + shift + 5, hash, key, val, + mutid) + + if mutid and mutid == self.mutid: + self.array[key_idx] = None + self.array[val_idx] = sub_node + return self, True + else: + ret = self.clone(mutid) + ret.array[key_idx] = None + ret.array[val_idx] = sub_node + return ret, True + + else: + key_idx = 2 * idx + val_idx = key_idx + 1 + + n = map_bitcount(self.bitmap) + + new_array = self.array[:key_idx] + new_array.append(key) + new_array.append(val) + new_array.extend(self.array[key_idx:]) + + if mutid and mutid == self.mutid: + self.size = 2 * (n + 1) + self.bitmap |= bit + self.array = new_array + return self, True + else: + return BitmapNode( + 2 * (n + 1), self.bitmap | bit, new_array, mutid), True + + def find(self, shift, hash, key): + bit = map_bitpos(hash, shift) + + if not (self.bitmap & bit): + raise KeyError + + idx = map_bitindex(self.bitmap, bit) + key_idx = idx * 2 + val_idx = key_idx + 1 + + key_or_null = self.array[key_idx] + val_or_node = self.array[val_idx] + + if key_or_null is None: + return val_or_node.find(shift + 5, hash, key) + + if key == key_or_null: + return val_or_node + + raise KeyError(key) + + def without(self, shift, hash, key, mutid): + bit = map_bitpos(hash, shift) + if not (self.bitmap & bit): + return W_NOT_FOUND, None + + idx = map_bitindex(self.bitmap, bit) + key_idx = 2 * idx + val_idx = key_idx + 1 + + key_or_null = self.array[key_idx] + val_or_node = self.array[val_idx] + + if key_or_null is None: + res, sub_node = val_or_node.without(shift + 5, hash, key, mutid) + + if res is W_EMPTY: + raise RuntimeError('unreachable code') # pragma: no cover + + elif res is W_NEWNODE: + if (type(sub_node) is BitmapNode and + sub_node.size == 2 and + sub_node.array[0] is not None): + + if mutid and mutid == self.mutid: + self.array[key_idx] = sub_node.array[0] + self.array[val_idx] = sub_node.array[1] + return W_NEWNODE, self + else: + clone = self.clone(mutid) + clone.array[key_idx] = sub_node.array[0] + clone.array[val_idx] = sub_node.array[1] + return W_NEWNODE, clone + + if mutid and mutid == self.mutid: + self.array[val_idx] = sub_node + return W_NEWNODE, self + else: + clone = self.clone(mutid) + clone.array[val_idx] = sub_node + return W_NEWNODE, clone + + else: + assert sub_node is None + return res, None + + else: + if key == key_or_null: + if self.size == 2: + return W_EMPTY, None + + new_array = self.array[:key_idx] + new_array.extend(self.array[val_idx + 1:]) + + if mutid and mutid == self.mutid: + self.size -= 2 + self.bitmap &= ~bit + self.array = new_array + return W_NEWNODE, self + else: + new_node = BitmapNode( + self.size - 2, self.bitmap & ~bit, new_array, mutid) + return W_NEWNODE, new_node + + else: + return W_NOT_FOUND, None + + def keys(self): + for i in range(0, self.size, 2): + key_or_null = self.array[i] + + if key_or_null is None: + val_or_node = self.array[i + 1] + yield from val_or_node.keys() + else: + yield key_or_null + + def values(self): + for i in range(0, self.size, 2): + key_or_null = self.array[i] + val_or_node = self.array[i + 1] + + if key_or_null is None: + yield from val_or_node.values() + else: + yield val_or_node + + def items(self): + for i in range(0, self.size, 2): + key_or_null = self.array[i] + val_or_node = self.array[i + 1] + + if key_or_null is None: + yield from val_or_node.items() + else: + yield key_or_null, val_or_node + + def dump(self, buf, level): # pragma: no cover + buf.append( + ' ' * (level + 1) + + 'BitmapNode(size={} count={} bitmap={} id={:0x}):'.format( + self.size, self.size / 2, bin(self.bitmap), id(self))) + + for i in range(0, self.size, 2): + key_or_null = self.array[i] + val_or_node = self.array[i + 1] + + pad = ' ' * (level + 2) + + if key_or_null is None: + buf.append(pad + 'None:') + val_or_node.dump(buf, level + 2) + else: + buf.append(pad + '{!r}: {!r}'.format(key_or_null, val_or_node)) + + +class CollisionNode: + + def __init__(self, size, hash, array, mutid): + self.size = size + self.hash = hash + self.array = array + self.mutid = mutid + + def find_index(self, key): + for i in range(0, self.size, 2): + if self.array[i] == key: + return i + return -1 + + def find(self, shift, hash, key): + for i in range(0, self.size, 2): + if self.array[i] == key: + return self.array[i + 1] + raise KeyError(key) + + def assoc(self, shift, hash, key, val, mutid): + if hash == self.hash: + key_idx = self.find_index(key) + + if key_idx == -1: + new_array = self.array.copy() + new_array.append(key) + new_array.append(val) + + if mutid and mutid == self.mutid: + self.size += 2 + self.array = new_array + return self, True + else: + new_node = CollisionNode( + self.size + 2, hash, new_array, mutid) + return new_node, True + + val_idx = key_idx + 1 + if self.array[val_idx] is val: + return self, False + + if mutid and mutid == self.mutid: + self.array[val_idx] = val + return self, False + else: + new_array = self.array.copy() + new_array[val_idx] = val + return CollisionNode(self.size, hash, new_array, mutid), False + + else: + new_node = BitmapNode( + 2, map_bitpos(self.hash, shift), [None, self], mutid) + return new_node.assoc(shift, hash, key, val, mutid) + + def without(self, shift, hash, key, mutid): + if hash != self.hash: + return W_NOT_FOUND, None + + key_idx = self.find_index(key) + if key_idx == -1: + return W_NOT_FOUND, None + + new_size = self.size - 2 + if new_size == 0: + # Shouldn't be ever reachable + return W_EMPTY, None # pragma: no cover + + if new_size == 2: + if key_idx == 0: + new_array = [self.array[2], self.array[3]] + else: + assert key_idx == 2 + new_array = [self.array[0], self.array[1]] + + new_node = BitmapNode( + 2, map_bitpos(hash, shift), new_array, mutid) + return W_NEWNODE, new_node + + new_array = self.array[:key_idx] + new_array.extend(self.array[key_idx + 2:]) + if mutid and mutid == self.mutid: + self.array = new_array + self.size -= 2 + return W_NEWNODE, self + else: + new_node = CollisionNode( + self.size - 2, self.hash, new_array, mutid) + return W_NEWNODE, new_node + + def keys(self): + for i in range(0, self.size, 2): + yield self.array[i] + + def values(self): + for i in range(1, self.size, 2): + yield self.array[i] + + def items(self): + for i in range(0, self.size, 2): + yield self.array[i], self.array[i + 1] + + def dump(self, buf, level): # pragma: no cover + pad = ' ' * (level + 1) + buf.append( + pad + 'CollisionNode(size={} id={:0x}):'.format( + self.size, id(self))) + + pad = ' ' * (level + 2) + for i in range(0, self.size, 2): + key = self.array[i] + val = self.array[i + 1] + + buf.append('{}{!r}: {!r}'.format(pad, key, val)) + + +class MapKeys: + + def __init__(self, c, m): + self.__count = c + self.__root = m + + def __len__(self): + return self.__count + + def __iter__(self): + return iter(self.__root.keys()) + + +class MapValues: + + def __init__(self, c, m): + self.__count = c + self.__root = m + + def __len__(self): + return self.__count + + def __iter__(self): + return iter(self.__root.values()) + + +class MapItems: + + def __init__(self, c, m): + self.__count = c + self.__root = m + + def __len__(self): + return self.__count + + def __iter__(self): + return iter(self.__root.items()) + + +class Map: + + def __init__(self, col=None, **kw): + self.__count = 0 + self.__root = BitmapNode(0, 0, [], 0) + self.__hash = -1 + + if isinstance(col, Map): + self.__count = col.__count + self.__root = col.__root + self.__hash = col.__hash + col = None + elif isinstance(col, MapMutation): + raise TypeError('cannot create Maps from MapMutations') + + if col or kw: + init = self.update(col, **kw) + self.__count = init.__count + self.__root = init.__root + + @classmethod + def _new(cls, count, root): + m = Map.__new__(Map) + m.__count = count + m.__root = root + m.__hash = -1 + return m + + def __reduce__(self): + return (type(self), (dict(self.items()),)) + + def __len__(self): + return self.__count + + def __eq__(self, other): + if not isinstance(other, Map): + return NotImplemented + + if len(self) != len(other): + return False + + for key, val in self.__root.items(): + try: + oval = other.__root.find(0, map_hash(key), key) + except KeyError: + return False + else: + if oval != val: + return False + + return True + + def update(self, col=None, **kw): + it = None + if col is not None: + if hasattr(col, 'items'): + it = iter(col.items()) + else: + it = iter(col) + + if it is not None: + if kw: + it = iter(itertools.chain(it, kw.items())) + else: + if kw: + it = iter(kw.items()) + + if it is None: + + return self + + mutid = _mut_id() + root = self.__root + count = self.__count + + i = 0 + while True: + try: + tup = next(it) + except StopIteration: + break + + try: + tup = tuple(tup) + except TypeError: + raise TypeError( + 'cannot convert map update ' + 'sequence element #{} to a sequence'.format(i)) from None + key, val, *r = tup + if r: + raise ValueError( + 'map update sequence element #{} has length ' + '{}; 2 is required'.format(i, len(r) + 2)) + + root, added = root.assoc(0, map_hash(key), key, val, mutid) + if added: + count += 1 + + i += 1 + + return Map._new(count, root) + + def mutate(self): + return MapMutation(self.__count, self.__root) + + def set(self, key, val): + new_count = self.__count + new_root, added = self.__root.assoc(0, map_hash(key), key, val, 0) + + if new_root is self.__root: + assert not added + return self + + if added: + new_count += 1 + + return Map._new(new_count, new_root) + + def delete(self, key): + res, node = self.__root.without(0, map_hash(key), key, 0) + if res is W_EMPTY: + return Map() + elif res is W_NOT_FOUND: + raise KeyError(key) + else: + return Map._new(self.__count - 1, node) + + def get(self, key, default=None): + try: + return self.__root.find(0, map_hash(key), key) + except KeyError: + return default + + def __getitem__(self, key): + return self.__root.find(0, map_hash(key), key) + + def __contains__(self, key): + try: + self.__root.find(0, map_hash(key), key) + except KeyError: + return False + else: + return True + + def __iter__(self): + yield from self.__root.keys() + + def keys(self): + return MapKeys(self.__count, self.__root) + + def values(self): + return MapValues(self.__count, self.__root) + + def items(self): + return MapItems(self.__count, self.__root) + + def __hash__(self): + if self.__hash != -1: + return self.__hash + + MAX = sys.maxsize + MASK = 2 * MAX + 1 + + h = 1927868237 * (self.__count * 2 + 1) + h &= MASK + + for key, value in self.__root.items(): + hx = hash(key) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + + hx = hash(value) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + + h = h * 69069 + 907133923 + h &= MASK + + if h > MAX: + h -= MASK + 1 # pragma: no cover + if h == -1: + h = 590923713 # pragma: no cover + + self.__hash = h + return h + + @reprlib.recursive_repr("{...}") + def __repr__(self): + items = [] + for key, val in self.items(): + items.append("{!r}: {!r}".format(key, val)) + return ''.format( + ', '.join(items), id(self)) + + def __dump__(self): # pragma: no cover + buf = [] + self.__root.dump(buf, 0) + return '\n'.join(buf) + + def __class_getitem__(cls, item): + return cls + + +class MapMutation: + + def __init__(self, count, root): + self.__count = count + self.__root = root + self.__mutid = _mut_id() + + def set(self, key, val): + self[key] = val + + def __enter__(self): + return self + + def __exit__(self, *exc): + self.finish() + return False + + def __iter__(self): + raise TypeError('{} is not iterable'.format(type(self))) + + def __delitem__(self, key): + if self.__mutid == 0: + raise ValueError('mutation {!r} has been finished'.format(self)) + + res, new_root = self.__root.without( + 0, map_hash(key), key, self.__mutid) + if res is W_EMPTY: + self.__count = 0 + self.__root = BitmapNode(0, 0, [], self.__mutid) + elif res is W_NOT_FOUND: + raise KeyError(key) + else: + self.__root = new_root + self.__count -= 1 + + def __setitem__(self, key, val): + if self.__mutid == 0: + raise ValueError('mutation {!r} has been finished'.format(self)) + + self.__root, added = self.__root.assoc( + 0, map_hash(key), key, val, self.__mutid) + + if added: + self.__count += 1 + + def pop(self, key, *args): + if self.__mutid == 0: + raise ValueError('mutation {!r} has been finished'.format(self)) + + if len(args) > 1: + raise TypeError( + 'pop() accepts 1 to 2 positional arguments, ' + 'got {}'.format(len(args) + 1)) + elif len(args) == 1: + default = args[0] + else: From pypy.commits at gmail.com Mon Dec 23 10:29:28 2019 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 23 Dec 2019 07:29:28 -0800 (PST) Subject: [pypy-commit] pypy py3.7: use the _immutable_map.Map implementation in the _contextvars module Message-ID: <5e00dd58.1c69fb81.4d31b.0681@mx.google.com> Author: Carl Friedrich Bolz-Tereick Branch: py3.7 Changeset: r98360:4f3a9b9e4db1 Date: 2019-12-23 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/4f3a9b9e4db1/ Log: use the _immutable_map.Map implementation in the _contextvars module diff --git a/lib_pypy/_contextvars.py b/lib_pypy/_contextvars.py --- a/lib_pypy/_contextvars.py +++ b/lib_pypy/_contextvars.py @@ -1,4 +1,5 @@ from __pypy__ import get_contextvar_context, set_contextvar_context +from _immutables_map import Map # implementation taken from PEP-0567 https://www.python.org/dev/peps/pep-0567/ _NO_DEFAULT = object() @@ -12,37 +13,6 @@ return type.__new__(cls, name, bases, dict(dct)) -class _ContextData: - # XXX wrong complexity! need to implement a real immutable dict instead - - def __init__(self): - self._mapping = dict() - - def __getitem__(self, key): - return self._mapping[key] - - def __contains__(self, key): - return key in self._mapping - - def __len__(self): - return len(self._mapping) - - def __iter__(self): - return iter(self._mapping) - - def set(self, key, value): - copy = _ContextData() - copy._mapping = self._mapping.copy() - copy._mapping[key] = value - return copy - - def delete(self, key): - copy = _ContextData() - copy._mapping = self._mapping.copy() - del copy._mapping[key] - return copy - - def get_context(): context = get_contextvar_context() if context is None: @@ -53,11 +23,11 @@ class Context(metaclass=Unsubclassable): - #_data: _ContextData + #_data: Map #_prev_context: Optional[Context] def __init__(self): - self._data = _ContextData() + self._data = Map() self._prev_context = None def run(self, callable, *args, **kwargs): @@ -157,7 +127,7 @@ def set(self, value): context = get_context() - data: _ContextData = context._data + data: Map = context._data try: old_value = data[self] except KeyError: From pypy.commits at gmail.com Mon Dec 23 10:34:33 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 07:34:33 -0800 (PST) Subject: [pypy-commit] pypy release-pypy2.7-v7.x: merge default into release Message-ID: <5e00de89.1c69fb81.3d073.2abb@mx.google.com> Author: Matti Picus Branch: release-pypy2.7-v7.x Changeset: r98361:724f1a7d62e8 Date: 2019-12-23 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/724f1a7d62e8/ Log: merge default into release diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -61,3 +61,8 @@ 533398cfd64e5146a07c4824e90a1b629c8b6523 release-pypy3.6-v7.3.0rc1 285307a0f5a77ffa46781b5c54c52eb1c385081d release-pypy2.7-v7.3.0rc2 008914050baeedb6d3ca30fe26ef43b78bb63841 release-pypy3.6-v7.3.0rc2 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +e7e02dccbd8c14fa2d4880f6bd4c47362a8952f5 release-pypy3.6-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 +c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 +0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -32,6 +32,16 @@ def Tcl_AppInit(app): + # For portable builds, try to load a local version of the libraries + from os.path import join, dirname, exists + lib_path = join(dirname(dirname(dirname(__file__))), 'lib') + tcl_path = join(lib_path, 'tcl') + tk_path = join(lib_path, 'tk') + if exists(tcl_path): + tklib.Tcl_Eval(app.interp, 'set tcl_library "{0}"'.format(tcl_path).encode('utf-8')) + if exists(tk_path): + tklib.Tcl_Eval(app.interp, 'set tk_library "{0}"'.format(tk_path).encode('utf-8')) + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: app.raiseTclError() skip_tk_init = tklib.Tcl_GetVar( diff --git a/pypy/tool/release/make_portable.py b/pypy/tool/release/make_portable.py --- a/pypy/tool/release/make_portable.py +++ b/pypy/tool/release/make_portable.py @@ -1,11 +1,11 @@ #!/usr/bin/env python -bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl', 'tk', 'gdbm', +bundle = ['sqlite3', 'ssl', 'crypto', 'ffi', 'expat', 'tcl8', 'tk8', 'gdbm', 'lzma', 'tinfo', 'tinfow', 'ncursesw', 'panelw', 'ncurses', 'panel', 'panelw'] import os from os.path import dirname, relpath, join, exists, basename, realpath -from shutil import copy2 +from shutil import copy2, copytree import sys from glob import glob from subprocess import check_output, check_call @@ -62,6 +62,7 @@ rpaths = {} for binary in binaries: + check_call(['chmod', 'a+w', binary]) rpath = join('$ORIGIN', relpath('lib', dirname(binary))) check_call(['patchelf', '--set-rpath', rpath, binary]) @@ -85,6 +86,9 @@ for path, item in copied.items(): print('Copied {0} to {1}'.format(path, item)) + copytree('/usr/share/tcl8.5', 'lib/tcl') + copytree('/usr/share/tk8.5', 'lib/tk') + binaries.extend(copied.values()) rpaths = rpath_binaries(binaries) From pypy.commits at gmail.com Mon Dec 23 10:34:35 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 07:34:35 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy3.6-v7.3.0rc4 for changeset 1608da62bfc7 Message-ID: <5e00de8b.1c69fb81.8707.7583@mx.google.com> Author: Matti Picus Branch: Changeset: r98362:0e6c068722c0 Date: 2019-12-23 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/0e6c068722c0/ Log: Added tag release-pypy3.6-v7.3.0rc4 for changeset 1608da62bfc7 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -66,3 +66,4 @@ c124c11a5921bf12797b08a696753a12ae82595a release-pypy2.7-v7.3.0rc3 c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 +1608da62bfc71e8ac775121dd0b21bb72e61c6ea release-pypy3.6-v7.3.0rc4 From pypy.commits at gmail.com Mon Dec 23 10:34:37 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 07:34:37 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v7.3.0rc4 for changeset 724f1a7d62e8 Message-ID: <5e00de8d.1c69fb81.52a0.4241@mx.google.com> Author: Matti Picus Branch: Changeset: r98363:16369979c2f9 Date: 2019-12-23 17:33 +0200 http://bitbucket.org/pypy/pypy/changeset/16369979c2f9/ Log: Added tag release-pypy2.7-v7.3.0rc4 for changeset 724f1a7d62e8 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -67,3 +67,4 @@ c124c11a5921bf12797b08a696753a12ae82595a release-pypy3.6-v7.2.0rc3 0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 1608da62bfc71e8ac775121dd0b21bb72e61c6ea release-pypy3.6-v7.3.0rc4 +724f1a7d62e8d8ac0fa20823f5c35497b29ad56f release-pypy2.7-v7.3.0rc4 From pypy.commits at gmail.com Mon Dec 23 12:51:38 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 23 Dec 2019 09:51:38 -0800 (PST) Subject: [pypy-commit] pypy cpyext-speedup-tests: missing import Message-ID: <5e00feaa.1c69fb81.f0826.8f55@mx.google.com> Author: Antonio Cuni Branch: cpyext-speedup-tests Changeset: r98367:7171a0719fdb Date: 2019-12-23 18:50 +0100 http://bitbucket.org/pypy/pypy/changeset/7171a0719fdb/ Log: missing import diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -131,6 +131,7 @@ Eagerly create pyobjs for various builtins so they don't look like leaks. """ + from pypy.module.cpyext.pyobject import make_ref w_to_preload = space.appexec([], """(): import sys import mmap From pypy.commits at gmail.com Mon Dec 23 13:05:43 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 23 Dec 2019 10:05:43 -0800 (PST) Subject: [pypy-commit] pypy cpyext-speedup-tests: use __code__ to be reduce the diff with py3.6 Message-ID: <5e0101f7.1c69fb81.7c9d9.1030@mx.google.com> Author: Antonio Cuni Branch: cpyext-speedup-tests Changeset: r98368:64ffef8c26a7 Date: 2019-12-23 19:03 +0100 http://bitbucket.org/pypy/pypy/changeset/64ffef8c26a7/ Log: use __code__ to be reduce the diff with py3.6 diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -141,7 +141,7 @@ # def _f(): pass FunctionType = type(_f) - CodeType = type(_f.func_code) + CodeType = type(_f.__code__) try: raise TypeError except TypeError: From pypy.commits at gmail.com Mon Dec 23 13:18:06 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 23 Dec 2019 10:18:06 -0800 (PST) Subject: [pypy-commit] pypy cpyext-speedup-tests: close branch to be merged Message-ID: <5e0104de.1c69fb81.b51f.101a@mx.google.com> Author: Antonio Cuni Branch: cpyext-speedup-tests Changeset: r98373:c420cfb6c3d4 Date: 2019-12-23 19:16 +0100 http://bitbucket.org/pypy/pypy/changeset/c420cfb6c3d4/ Log: close branch to be merged From pypy.commits at gmail.com Mon Dec 23 13:18:07 2019 From: pypy.commits at gmail.com (antocuni) Date: Mon, 23 Dec 2019 10:18:07 -0800 (PST) Subject: [pypy-commit] pypy default: merge this branch to speedup cpyext tests, especially on py3.6 Message-ID: <5e0104df.1c69fb81.8a53.a065@mx.google.com> Author: Antonio Cuni Branch: Changeset: r98374:3bb3cd0a3643 Date: 2019-12-23 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3bb3cd0a3643/ Log: merge this branch to speedup cpyext tests, especially on py3.6 diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -1444,5 +1444,14 @@ import doctest return doctest.testmod() +# ===== PyPy modification to support pickling cpyext methods ===== +try: + import cpyext +except ImportError: + pass +else: + Pickler.dispatch[cpyext.FunctionType] = Pickler.save_global +# ================= end of PyPy modification ==================== + if __name__ == "__main__": _test() diff --git a/pypy/module/cpyext/moduledef.py b/pypy/module/cpyext/moduledef.py --- a/pypy/module/cpyext/moduledef.py +++ b/pypy/module/cpyext/moduledef.py @@ -6,6 +6,7 @@ interpleveldefs = { 'load_module': 'api.load_extension_module', 'is_cpyext_function': 'interp_cpyext.is_cpyext_function', + 'FunctionType': 'methodobject.W_PyCFunctionObject', } appleveldefs = { @@ -15,15 +16,6 @@ def startup(self, space): space.fromcache(State).startup(space) - method = pypy.module.cpyext.typeobject.get_new_method_def(space) - # the w_self argument here is a dummy, the only thing done with w_obj - # is call type() on it - w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space, - method, space.w_None) - space.appexec([w_obj], """(meth): - from pickle import Pickler - Pickler.dispatch[type(meth)] = Pickler.save_global - """) def register_atexit(self, function): if len(self.atexit_funcs) >= 32: diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -82,23 +82,6 @@ def freeze_refcnts(self): rawrefcount._dont_free_any_more() -def preload(space, name): - from pypy.module.cpyext.pyobject import make_ref - if '.' not in name: - w_obj = space.builtin.getdictvalue(space, name) - else: - module, localname = name.rsplit('.', 1) - code = "(): import {module}; return {module}.{localname}" - code = code.format(**locals()) - w_obj = space.appexec([], code) - make_ref(space, w_obj) - -def preload_expr(space, expr): - from pypy.module.cpyext.pyobject import make_ref - code = "(): return {}".format(expr) - w_obj = space.appexec([], code) - make_ref(space, w_obj) - def is_interned_string(space, w_obj): try: s = space.str_w(w_obj) @@ -148,13 +131,37 @@ Eagerly create pyobjs for various builtins so they don't look like leaks. """ - for name in [ - 'buffer', 'mmap.mmap', - 'types.FunctionType', 'types.CodeType', - 'types.TracebackType', 'types.FrameType']: - preload(space, name) - for expr in ['type(str.join)']: - preload_expr(space, expr) + from pypy.module.cpyext.pyobject import make_ref + w_to_preload = space.appexec([], """(): + import sys + import mmap + # + # copied&pasted to avoid importing the whole types.py, which is + # expensive on py3k + # + def _f(): pass + FunctionType = type(_f) + CodeType = type(_f.__code__) + try: + raise TypeError + except TypeError: + tb = sys.exc_info()[2] + TracebackType = type(tb) + FrameType = type(tb.tb_frame) + del tb + # + return [ + buffer, + mmap.mmap, + FunctionType, + CodeType, + TracebackType, + FrameType, + type(str.join), + ] + """) + for w_obj in space.unpackiterable(w_to_preload): + make_ref(space, w_obj) def cleanup(self): self.space.getexecutioncontext().cleanup_cpyext_state() From pypy.commits at gmail.com Mon Dec 23 16:43:00 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 13:43:00 -0800 (PST) Subject: [pypy-commit] pypy default: tweak release note for last-minute addition Message-ID: <5e0134e4.1c69fb81.67ca2.ca66@mx.google.com> Author: Matti Picus Branch: Changeset: r98376:f41af980342f Date: 2019-12-23 23:36 +0200 http://bitbucket.org/pypy/pypy/changeset/f41af980342f/ Log: tweak release note for last-minute addition diff --git a/pypy/doc/release-v7.3.0.rst b/pypy/doc/release-v7.3.0.rst --- a/pypy/doc/release-v7.3.0.rst +++ b/pypy/doc/release-v7.3.0.rst @@ -197,7 +197,8 @@ * Add ``PyObject_GenericGetDict``, ``PyObject_GenericSetDict``, ``_Py_strhex``, ``_Py_strhex_bytes``, ``PyUnicodeNew``, ``_PyFinalizing``, - ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath`` + ``PySlice_Unpack``, ``PySlice_AdjustIndices``, ``PyOS_FSPath``, + ``PyModule_AddFunctions`` * Implement ``pystrhex.h`` (`issue 2687`_) * Make ``PyUnicodeObject`` slightly more compact * Fix memory leak when releasing a ``PyUnicodeObject`` diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -3,7 +3,7 @@ pmin=7 # python minor version maj=7 min=3 -rev=0rc1 +rev=0 case $pmaj in "2") exe=pypy;; From pypy.commits at gmail.com Mon Dec 23 16:43:02 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 13:43:02 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy2.7-v7.3.0 for changeset 724f1a7d62e8 Message-ID: <5e0134e6.1c69fb81.478ca.39aa@mx.google.com> Author: Matti Picus Branch: Changeset: r98377:a433ce7a9e4e Date: 2019-12-23 23:41 +0200 http://bitbucket.org/pypy/pypy/changeset/a433ce7a9e4e/ Log: Added tag release-pypy2.7-v7.3.0 for changeset 724f1a7d62e8 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -68,3 +68,4 @@ 0000000000000000000000000000000000000000 release-pypy3.6-v7.2.0rc3 1608da62bfc71e8ac775121dd0b21bb72e61c6ea release-pypy3.6-v7.3.0rc4 724f1a7d62e8d8ac0fa20823f5c35497b29ad56f release-pypy2.7-v7.3.0rc4 +724f1a7d62e8d8ac0fa20823f5c35497b29ad56f release-pypy2.7-v7.3.0 From pypy.commits at gmail.com Mon Dec 23 16:43:03 2019 From: pypy.commits at gmail.com (mattip) Date: Mon, 23 Dec 2019 13:43:03 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-pypy3.6-v7.3.0 for changeset 1608da62bfc7 Message-ID: <5e0134e7.1c69fb81.c5e4e.7752@mx.google.com> Author: Matti Picus Branch: Changeset: r98378:430e63a88300 Date: 2019-12-23 23:42 +0200 http://bitbucket.org/pypy/pypy/changeset/430e63a88300/ Log: Added tag release-pypy3.6-v7.3.0 for changeset 1608da62bfc7 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -69,3 +69,4 @@ 1608da62bfc71e8ac775121dd0b21bb72e61c6ea release-pypy3.6-v7.3.0rc4 724f1a7d62e8d8ac0fa20823f5c35497b29ad56f release-pypy2.7-v7.3.0rc4 724f1a7d62e8d8ac0fa20823f5c35497b29ad56f release-pypy2.7-v7.3.0 +1608da62bfc71e8ac775121dd0b21bb72e61c6ea release-pypy3.6-v7.3.0 From pypy.commits at gmail.com Tue Dec 24 04:29:22 2019 From: pypy.commits at gmail.com (mattip) Date: Tue, 24 Dec 2019 01:29:22 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: release 7.3.0 Message-ID: <5e01da72.1c69fb81.d863.9726@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r962:fb71ab56fca3 Date: 2019-12-24 11:29 +0200 http://bitbucket.org/pypy/pypy.org/changeset/fb71ab56fca3/ Log: release 7.3.0 diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -65,11 +65,11 @@

There are nightly binary builds available. Those builds are not always as stable as the release, but they contain numerous bugfixes and performance improvements.

-

We provide binaries for x86, ARM, PPC and s390x running on different operating systems such as -Linux, Mac OS X and Windows (what's new in PyPy 7.2.0?):

+

We provide binaries for x86, aarch64, ppc64 and s390x running on different operating systems such as +Linux, Mac OS X and Windows (what's new in PyPy 7.3.0?):

    -
  • the Python2.7 compatible release — PyPy2.7 v7.2.0
  • -
  • the Python3.6 compatible release — PyPy3.6 v7.2.0
  • +
  • the Python2.7 compatible release — PyPy2.7 v7.3.0
  • +
  • the Python3.6 compatible release — PyPy3.6 v7.3.0
  • the Python2.7 Software Transactional Memory special release — PyPy-STM 2.5.1 (Linux x86-64 only)
    @@ -93,51 +93,72 @@

    Linux binaries and common distributions

    -

    Linux binaries are dynamically linked, as is usual, and thus might -not be usable due to the sad story of linux binary compatibility. This means -that Linux binaries are only usable on the distributions written next to -them unless you're ready to hack your system by adding symlinks to the -libraries it tries to open. There are better solutions:

    +

    Since version 7.3, the linux x86 binaries in the links below ship with versions +of OpenSSL, SQLite3, libffi, expat, and TCL/TK binary libraries linked in. This +make the binaries “portable” so that they should run on any current glibc-based +linux platform. The ideas were adopted from the portable-pypy package.

    +

    This solution to the portability problem means that the versions of the +packaged libraries are frozen to the version shipped, so updating your system +libraries will not affect this installation of PyPy. Also see the note about +SSL certificates below.

    +

    For aarch64, s390x, and ppc64, the binaries target a specific operating system. +These binaries are dynamically linked, and thus might not be usable due to the +sad story of linux binary compatibility. This means that Linux binaries are +only usable on the distributions written next to them unless you're ready to +hack your system by adding symlinks to the libraries it tries to open. There +are better solutions:

      -
    • use Squeaky's portable Linux binaries.
    • -
    • or download PyPy from your release vendor (usually an outdated +
    • download PyPy from your release vendor (usually an outdated version): Ubuntu (PPA), Debian, Homebrew, MacPorts, Fedora, Gentoo and Arch are known to package PyPy, with various degrees of being up-to-date.
    • +
    • use sudo snap install --classic <package>, where <package> is +pypy or pypy3`. Snap is a non-vendor specific package manager for +linux, and repackages the download tarballs below with the latest platform- +specific libraries (again, without changing libffi).
    • +
    • recompile the CFFI-based TCL/TK, OpenSSL, or sqlite3 modules, using system +libraries and the scripts in pypy/lib_pypy. This solution will not solve +compatibility issues with libffi, since that is baked into PyPy.
    • or translate your own PyPy.
    +

    SSL Certificates

    +

    While the linux binaries ship an OpenSSL library, they do not ship a +certificate store for SSL certificates. If you wish to use SSL module, +you will need a valid certificate store. You can use the certifi package +and set SSL_CERT_FILE to certifi.where() or install your platform +certificates which should be discovered by the _ssl module.

    -
    -

    Python2.7 compatible PyPy 7.2.0

    +
    +

    Python2.7 compatible PyPy 7.3.0

    -
    -

    Python 3.6 compatible PyPy3.6 v7.2.0

    +
    +

    Python 3.6 compatible PyPy3.6 v7.3.0

    @@ -149,8 +170,7 @@

    [1]: stating it again: the Linux binaries are provided for the distributions listed here. If your distribution is not exactly this one, it won't work, you will probably see: pypy: error while loading shared -libraries: …. Unless you want to hack a lot, try out the -portable Linux binaries.

    +libraries: ….

    PyPy-STM 2.5.1

    @@ -186,9 +206,9 @@

    Installing

    All binary versions are packaged in a tar.bz2 or zip file. When -uncompressed, they run in-place. For now you can uncompress them -either somewhere in your home directory or, say, in /opt, and -if you want, put a symlink from somewhere like +uncompressed, they run in-place. You can uncompress them +either somewhere in your home directory or, say, in /opt. +If you want, put a symlink from somewhere like /usr/local/bin/pypy to /path/to/pypy_expanded/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -222,8 +242,8 @@

    Alternatively, get one of the following smaller packages for the source at the same revision as the above binaries:

  • Make sure you installed the dependencies. See the list here.

    @@ -286,14 +306,6 @@ .../pypy/tool/build_cffi_imports.py if you want to be able to import the cffi-based modules.

  • -
  • On Linux, translating with asmgcroot, is delicate. -It requires using gcc with no particularly -fancy options. It does not work e.g. with clang, or if you pass uncommon -options with the CFLAGS environment variable. If you insist on -passing these options or using clang, then you can compile PyPy with -the default shadow stack option instead (for a small performance price in -non-JITted code).

    -
  • Like other JITs, PyPy doesn't work out of the box on some Linux distributions that trade full POSIX compliance for extra security features. E.g. with PAX, you have to run PyPy with paxctl -cm. @@ -337,7 +349,29 @@

  • Checksums

    -

    Here are the checksums for each of the downloads of PyPy 7.2.0, 7.1.1, 7.1.0

    +

    Here are the checksums for each of the downloads of PyPy 7.3.0, 7.2.0, 7.1.1, 7.1.0

    +

    pypy2.7-7.3.0 sha256:

    +
    +a3dd8d5e2a656849fa344dce4679d854a19bc4a096a0cf62b46a1be127a5d56c  pypy2.7-v7.3.0-aarch64.tar.bz2
    +eac1308b7d523003a5f6d20f58406d52ab14611bcec750122ae513a5a35110db  pypy2.7-v7.3.0-linux32.tar.bz2
    +f4950a54378ac637da2a6defa52d6ffed96af12fcd5d74e1182fb834883c9826  pypy2.7-v7.3.0-linux64.tar.bz2
    +ca7b056b243a6221ad04fa7fc8696e36a2fb858396999dcaa31dbbae53c54474  pypy2.7-v7.3.0-osx64.tar.bz2
    +d254b82a00021339762198e41ba7f72316010d0f9bd4dcd7b0755185da9c005e  pypy2.7-v7.3.0-s390x.tar.bz2
    +b0b25c7f8938ab0fedd8dedf26b9e73c490913b002b484c1b2f19d5844a518de  pypy2.7-v7.3.0-src.tar.bz2
    +42dc84a277e7a5e635fe39bbd745f06135902c229a257123332b7555800d915b  pypy2.7-v7.3.0-src.zip
    +a9e3c5c983edba0313a41d3c1ab55b080816c4129e67a6c272c53b9dbcdd97ec  pypy2.7-v7.3.0-win32.zip
    +
    +

    pypy3.6-7.3.0 sha256:

    +
    +b900241bca7152254c107a632767f49edede99ca6360b9a064141267b47ef598  pypy3.6-v7.3.0-aarch64.tar.bz2
    +7045b295d38ba0b5ee65bd3f078ca249fcf1de73fedeaab2d6ad78de2eab0f0e  pypy3.6-v7.3.0-linux32.tar.bz2
    +d3d549e8f43de820ac3385b698b83fa59b4d7dd6cf3fe34c115f731e26ad8856  pypy3.6-v7.3.0-linux64.tar.bz2
    +87b2545dad75fe3027b4b2108aceb9fdadcdd24e61ae312ac48b449fdd452bf3  pypy3.6-v7.3.0-osx64.tar.bz2
    +0fe2f7bbf42ea88b40954d7de773a43179a44f40656f2f58201524be70699544  pypy3.6-v7.3.0-s390x.tar.bz2
    +48d12c15fbcbcf4a32882a883195e1f922997cde78e7a16d4342b9b521eefcfa  pypy3.6-v7.3.0-src.tar.bz2
    +8ae9efd0a2aadb19e892bbd07eca8ef51536296a3ef93964149aceba511e79ca  pypy3.6-v7.3.0-src.zip
    +30e6870c4f3d8ef91890a6556a98080758000ba7c207cccdd86a8f5d358998c1  pypy3.6-v7.3.0-win32.zip
    +

    pypy2.7-7.2.0 sha256:

     57b0be053c6a5f069e23b843f38863cf7920f5eef7bc89f2e086e5c3a28a2ba9  pypy2.7-v7.2.0-aarch64.tar.bz2
    @@ -404,18 +438,6 @@
     4858e7e8a0007bc3b381bd392208b28d30889a4e5a88a3c28e3d9dc4f25b654e  pypy3.6-v7.1.0-src.zip
     77a0576a3d518210467f0df2d0d9a1892c664566dc02f25d974c2dbc6b4749e7  pypy3.6-v7.1.0-win32.zip
     
    -

    pypy2.7-7.0.0 sha256:

    -
    -446fc208dd77a0048368da830564e6e4180bcd786e524b5369c61785af5c903a  pypy2.7-v7.0.0-linux32.tar.bz2
    -971b1909f9fe960c4c643a6940d3f8a60d9a7a2937119535ab0cfaf83498ecd7  pypy2.7-v7.0.0-linux64.tar.bz2
    -e7ecb029d9c7a59388838fc4820a50a2f5bee6536010031060e3dfa882730dc8  pypy2.7-v7.0.0-osx64.tar.bz2
    -2ce390d93fa57ba912066a8b6439588bd9cf6aa9cef44d892b8e3e6dba64615e  pypy2.7-v7.0.0-s390x.tar.bz2
    -04477a41194240cd71e485c3f41dec35a787d1b3bc030f9aa59e5e81bcf4118b  pypy2.7-v7.0.0-win32.zip
    -165ffdf49a04c3ebdc966f76e67dd1767ad699657215dd83ca6996ab8ed87f52  pypy2.7-v7.0.0-ppc64.tar.bz2
    -cfb0e2e9b1434e94ea559548c7486c8e7b4319a397309e8ed3783d9beadf1c6c  pypy2.7-v7.0.0-ppc64le.tar.bz2
    -f51d8bbfc4e73a8a01820b7871a45d13c59f1399822cdf8a19388c69eb20c18c  pypy2.7-v7.0.0-src.tar.bz2
    -77c8c02cf412a5f8182ffe8845877cffa506e5a5ce3a7cd835483fdc1202afd4  pypy2.7-v7.0.0-src.zip
    -