From python-checkins at python.org Mon Sep 1 00:02:55 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:02:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIyMzE1OiBBZGQg?= =?utf-8?q?test_to_capture_the_failure=2E?= Message-ID: <3hmT7M2Yxvz7Lrs@mail.python.org> http://hg.python.org/cpython/rev/7304b9b95438 changeset: 92285:7304b9b95438 branch: 3.4 parent: 92283:c3cecf8e7497 user: Jason R. Coombs date: Sun Aug 31 15:02:42 2014 -0400 summary: #22315: Add test to capture the failure. files: Lib/distutils/tests/test_dir_util.py | 29 ++++++++++++++++ 1 files changed, 29 insertions(+), 0 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -3,7 +3,9 @@ import os import stat import sys +import contextlib +from distutils import dir_util, errors from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree, ensure_relative) @@ -11,6 +13,20 @@ from distutils.tests import support from test.support import run_unittest + + at contextlib.context_manager +def patch_obj(obj, attr, replacement): + """ + A poor man's mock.patch.object + """ + orig = getattr(obj, attr) + try: + setattr(obj, attr, replacement) + yield + finally: + setattr(obj, attr, orig) + + class DirUtilTestCase(support.TempdirManager, unittest.TestCase): def _log(self, msg, *args): @@ -119,6 +135,19 @@ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') + def test_copy_tree_exception_in_listdir(self): + """ + An exception in listdir should raise a DistutilsFileError + """ + def new_listdir(path): + raise OSError() + # simulate a transient network error or other failure invoking listdir + with patch_obj(os, 'listdir', new_listdir): + args = 'src', None + exc = errors.DistutilsFileError + self.assertRaises(exc, dir_util.copy_tree, *args) + + def test_suite(): return unittest.makeSuite(DirUtilTestCase) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 00:02:56 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:02:56 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIyMzE1OiBVc2Ug?= =?utf-8?q?technique_outlined_in_test=5Ffile=5Futil?= Message-ID: <3hmT7N4KbGz7Lrw@mail.python.org> http://hg.python.org/cpython/rev/300cd36eb25c changeset: 92286:300cd36eb25c branch: 3.4 user: Jason R. Coombs date: Sun Aug 31 17:31:32 2014 -0400 summary: #22315: Use technique outlined in test_file_util files: Lib/distutils/tests/test_dir_util.py | 27 ++------------- 1 files changed, 5 insertions(+), 22 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -3,7 +3,7 @@ import os import stat import sys -import contextlib +from unittest.mock import patch from distutils import dir_util, errors from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree, @@ -14,19 +14,6 @@ from test.support import run_unittest - at contextlib.context_manager -def patch_obj(obj, attr, replacement): - """ - A poor man's mock.patch.object - """ - orig = getattr(obj, attr) - try: - setattr(obj, attr, replacement) - yield - finally: - setattr(obj, attr, orig) - - class DirUtilTestCase(support.TempdirManager, unittest.TestCase): def _log(self, msg, *args): @@ -135,17 +122,13 @@ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') - def test_copy_tree_exception_in_listdir(self): + @patch('os.listdir', side_effect=OSError()) + def test_copy_tree_exception_in_listdir(self, listdir): """ An exception in listdir should raise a DistutilsFileError """ - def new_listdir(path): - raise OSError() - # simulate a transient network error or other failure invoking listdir - with patch_obj(os, 'listdir', new_listdir): - args = 'src', None - exc = errors.DistutilsFileError - self.assertRaises(exc, dir_util.copy_tree, *args) + with self.assertRaises(errors.DistutilsFileError): + dir_util.copy_tree('src', None) def test_suite(): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 00:02:57 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:02:57 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIyMzE1OiBQcm92?= =?utf-8?q?ide_an_actual_directory_during_test_invocation=2E?= Message-ID: <3hmT7P5g1tz7Lrx@mail.python.org> http://hg.python.org/cpython/rev/3402813338db changeset: 92287:3402813338db branch: 3.4 user: Jason R. Coombs date: Sun Aug 31 17:37:35 2014 -0400 summary: #22315: Provide an actual directory during test invocation. files: Lib/distutils/tests/test_dir_util.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -128,7 +128,7 @@ An exception in listdir should raise a DistutilsFileError """ with self.assertRaises(errors.DistutilsFileError): - dir_util.copy_tree('src', None) + dir_util.copy_tree(self.target, None) def test_suite(): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 00:02:58 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:02:58 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIyMzE1OiBVc2Ug?= =?utf-8?q?an_existent_directory_for_=27src=27_to_trigger_appropriate_beha?= =?utf-8?q?vior=2E?= Message-ID: <3hmT7Q6xrWz7LsF@mail.python.org> http://hg.python.org/cpython/rev/ddb0f90620b7 changeset: 92288:ddb0f90620b7 branch: 3.4 user: Jason R. Coombs date: Sun Aug 31 17:51:22 2014 -0400 summary: #22315: Use an existent directory for 'src' to trigger appropriate behavior. files: Lib/distutils/tests/test_dir_util.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -128,7 +128,8 @@ An exception in listdir should raise a DistutilsFileError """ with self.assertRaises(errors.DistutilsFileError): - dir_util.copy_tree(self.target, None) + src = self.tempdirs[-1] + dir_util.copy_tree(src, None) def test_suite(): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 00:03:00 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:03:00 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIyMzE1OiBVc2Ug?= =?utf-8?q?advertised_API_for_OSError?= Message-ID: <3hmT7S1BrHz7Ls3@mail.python.org> http://hg.python.org/cpython/rev/75a5cc4ef31c changeset: 92289:75a5cc4ef31c branch: 3.4 user: Jason R. Coombs date: Sun Aug 31 17:42:20 2014 -0400 summary: #22315: Use advertised API for OSError files: Lib/distutils/dir_util.py | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/Lib/distutils/dir_util.py b/Lib/distutils/dir_util.py --- a/Lib/distutils/dir_util.py +++ b/Lib/distutils/dir_util.py @@ -125,12 +125,11 @@ try: names = os.listdir(src) except OSError as e: - (errno, errstr) = e if dry_run: names = [] else: raise DistutilsFileError( - "error listing files in '%s': %s" % (src, errstr)) + "error listing files in '%s': %s" % (src, e.strerror)) if not dry_run: mkpath(dst, verbose=verbose) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 00:03:01 2014 From: python-checkins at python.org (jason.coombs) Date: Mon, 1 Sep 2014 00:03:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E4=3B_Closes_=2322315?= Message-ID: <3hmT7T2wDjz7LsG@mail.python.org> http://hg.python.org/cpython/rev/640c575ab3e1 changeset: 92290:640c575ab3e1 parent: 92284:2b2da4ae86b4 parent: 92289:75a5cc4ef31c user: Jason R. Coombs date: Sun Aug 31 18:02:18 2014 -0400 summary: Merge with 3.4; Closes #22315 files: Lib/distutils/dir_util.py | 3 +-- Lib/distutils/tests/test_dir_util.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/Lib/distutils/dir_util.py b/Lib/distutils/dir_util.py --- a/Lib/distutils/dir_util.py +++ b/Lib/distutils/dir_util.py @@ -125,12 +125,11 @@ try: names = os.listdir(src) except OSError as e: - (errno, errstr) = e if dry_run: names = [] else: raise DistutilsFileError( - "error listing files in '%s': %s" % (src, errstr)) + "error listing files in '%s': %s" % (src, e.strerror)) if not dry_run: mkpath(dst, verbose=verbose) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -3,7 +3,9 @@ import os import stat import sys +from unittest.mock import patch +from distutils import dir_util, errors from distutils.dir_util import (mkpath, remove_tree, create_tree, copy_tree, ensure_relative) @@ -11,6 +13,7 @@ from distutils.tests import support from test.support import run_unittest + class DirUtilTestCase(support.TempdirManager, unittest.TestCase): def _log(self, msg, *args): @@ -119,6 +122,16 @@ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') + @patch('os.listdir', side_effect=OSError()) + def test_copy_tree_exception_in_listdir(self, listdir): + """ + An exception in listdir should raise a DistutilsFileError + """ + with self.assertRaises(errors.DistutilsFileError): + src = self.tempdirs[-1] + dir_util.copy_tree(src, None) + + def test_suite(): return unittest.makeSuite(DirUtilTestCase) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 01:57:27 2014 From: python-checkins at python.org (alex.gaynor) Date: Mon, 1 Sep 2014 01:57:27 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_476=3A_Better_document_th?= =?utf-8?q?e_warning_mechanism_discussed_on_python-dev?= Message-ID: <3hmWgW1PTCz7LrX@mail.python.org> http://hg.python.org/peps/rev/d0de5805cbab changeset: 5537:d0de5805cbab user: Alex Gaynor date: Sun Aug 31 16:57:21 2014 -0700 summary: PEP 476: Better document the warning mechanism discussed on python-dev files: pep-0476.txt | 9 +++++++++ 1 files changed, 9 insertions(+), 0 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -115,6 +115,15 @@ following this PEP will be modified to emit a warning in cases that would raise an Exception in Python 3.5. +Warnings +-------- + +To support this warning, in 3.4.next a new ``verify_mode`` is introduced +``CERT_WARN``, which is equivilant to ``CERT_NONE``, except in cases that would +fail as ``CERT_REQUIRED`` or fail the hostname check emits a warning. In +3.4.next the ``httplib`` module will set this as the ``verify_mode`` if the +default context is used. + Other protocols =============== -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Mon Sep 1 05:19:09 2014 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 1 Sep 2014 05:19:09 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_Clarify_space_around_colon_in?= =?utf-8?q?_slice_=28severa_cases=29=2E?= Message-ID: <3hmc8F6RCDz7LrX@mail.python.org> http://hg.python.org/peps/rev/e98737176f1d changeset: 5538:e98737176f1d user: Guido van Rossum date: Sun Aug 31 20:18:33 2014 -0700 summary: Clarify space around colon in slice (severa cases). files: pep-0008.txt | 25 +++++++++++++++++++++++-- 1 files changed, 23 insertions(+), 2 deletions(-) diff --git a/pep-0008.txt b/pep-0008.txt --- a/pep-0008.txt +++ b/pep-0008.txt @@ -408,6 +408,27 @@ Yes: if x == 4: print x, y; x, y = y, x No: if x == 4 : print x , y ; x , y = y , x +- However, in a slice the colon acts like a binary operator, and + should have equal amounts on either side (treating it as the + operator with the lowest priority). In an extended slice, both + colons must have the same amount of spacing applied. Exception: + when a slice parameter is omitted, the space is omitted. + + Yes:: + + ham[1:9], ham[1:9:3], ham[:9:3], ham[1::3], ham[1:9:] + ham[lower:upper], ham[lower:upper:], ham[lower::step] + ham[lower+offset : upper+offset] + ham[: upper_fn(x) : step_fn(x)], ham[:: step_fn(x)] + ham[lower + offset : upper + offset] + + No:: + + ham[lower + offset:upper + offset] + ham[1: 9], ham[1 :9], ham[1:9 :3] + ham[lower : : upper] + ham[ : upper] + - Immediately before the open parenthesis that starts the argument list of a function call:: @@ -417,8 +438,8 @@ - Immediately before the open parenthesis that starts an indexing or slicing:: - Yes: dict['key'] = list[index] - No: dict ['key'] = list [index] + Yes: dct['key'] = lst[index] + No: dct ['key'] = lst [index] - More than one space around an assignment (or other) operator to align it with another. -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Mon Sep 1 09:08:43 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 1 Sep 2014 09:08:43 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMzIw?= =?utf-8?q?=3A_Fix_broken_link_in_the_General_Python_FAQ=2E?= Message-ID: <3hmjF74H5Bz7Lmn@mail.python.org> http://hg.python.org/cpython/rev/241f9aa9fb89 changeset: 92291:241f9aa9fb89 branch: 2.7 parent: 92281:446d4dfcc220 user: Ned Deily date: Sun Aug 31 23:57:13 2014 -0700 summary: Issue #22320: Fix broken link in the General Python FAQ. Original patch by Josh Lynn. files: Doc/faq/general.rst | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -167,7 +167,8 @@ several useful pieces of freely distributable software. The source will compile and run out of the box on most UNIX platforms. -Consult the `Developer FAQ `__ for more +Consult the `Getting Started section of the Python Developer's Guide +`__ for more information on getting the source code and compiling it. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 09:08:44 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 1 Sep 2014 09:08:44 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzIw?= =?utf-8?q?=3A_Fix_broken_link_in_the_General_Python_FAQ=2E?= Message-ID: <3hmjF85fmFz7LrC@mail.python.org> http://hg.python.org/cpython/rev/3eaba8a0cb3a changeset: 92292:3eaba8a0cb3a branch: 3.4 parent: 92289:75a5cc4ef31c user: Ned Deily date: Mon Sep 01 00:06:18 2014 -0700 summary: Issue #22320: Fix broken link in the General Python FAQ. Original patch by Josh Lynn. files: Doc/faq/general.rst | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -167,7 +167,8 @@ several useful pieces of freely distributable software. The source will compile and run out of the box on most UNIX platforms. -Consult the `Developer FAQ `__ for more +Consult the `Getting Started section of the Python Developer's Guide +`__ for more information on getting the source code and compiling it. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 09:08:46 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 1 Sep 2014 09:08:46 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322320=3A_merge_from_3=2E4?= Message-ID: <3hmjFB0SZ2z7Lr3@mail.python.org> http://hg.python.org/cpython/rev/c81e2b6eccd8 changeset: 92293:c81e2b6eccd8 parent: 92290:640c575ab3e1 parent: 92292:3eaba8a0cb3a user: Ned Deily date: Mon Sep 01 00:08:00 2014 -0700 summary: Issue #22320: merge from 3.4 files: Doc/faq/general.rst | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -167,7 +167,8 @@ several useful pieces of freely distributable software. The source will compile and run out of the box on most UNIX platforms. -Consult the `Developer FAQ `__ for more +Consult the `Getting Started section of the Python Developer's Guide +`__ for more information on getting the source code and compiling it. -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Mon Sep 1 10:41:45 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 01 Sep 2014 10:41:45 +0200 Subject: [Python-checkins] Daily reference leaks (640c575ab3e1): sum=151940 Message-ID: results for 640c575ab3e1 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [0, 2, 0] references, sum=2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 0, 2] references, sum=2 test_site leaked [0, 0, 2] memory blocks, sum=2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogHPiXJp', '-x'] From python-checkins at python.org Mon Sep 1 11:29:33 2014 From: python-checkins at python.org (berker.peksag) Date: Mon, 1 Sep 2014 11:29:33 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE5NDQ3?= =?utf-8?q?=3A_Suppress_output_of_py=5Fcompile=2Ecompile=28=29=2E?= Message-ID: <3hmmMd48fGz7Llx@mail.python.org> http://hg.python.org/cpython/rev/2d0bcb653085 changeset: 92294:2d0bcb653085 branch: 3.4 parent: 92292:3eaba8a0cb3a user: Berker Peksag date: Mon Sep 01 12:29:53 2014 +0300 summary: Issue #19447: Suppress output of py_compile.compile(). files: Lib/test/test_py_compile.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py --- a/Lib/test/test_py_compile.py +++ b/Lib/test/test_py_compile.py @@ -94,7 +94,8 @@ def test_bad_coding(self): bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py') - self.assertIsNone(py_compile.compile(bad_coding, doraise=False)) + with support.captured_stderr(): + self.assertIsNone(py_compile.compile(bad_coding, doraise=False)) self.assertFalse(os.path.exists( importlib.util.cache_from_source(bad_coding))) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 11:29:34 2014 From: python-checkins at python.org (berker.peksag) Date: Mon, 1 Sep 2014 11:29:34 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2319447=3A_Suppress_output_of_py=5Fcompile=2Ecomp?= =?utf-8?b?aWxlKCku?= Message-ID: <3hmmMf5rzCz7Ln0@mail.python.org> http://hg.python.org/cpython/rev/a8ef9d7c4d20 changeset: 92295:a8ef9d7c4d20 parent: 92293:c81e2b6eccd8 parent: 92294:2d0bcb653085 user: Berker Peksag date: Mon Sep 01 12:30:17 2014 +0300 summary: Issue #19447: Suppress output of py_compile.compile(). files: Lib/test/test_py_compile.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py --- a/Lib/test/test_py_compile.py +++ b/Lib/test/test_py_compile.py @@ -94,7 +94,8 @@ def test_bad_coding(self): bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py') - self.assertIsNone(py_compile.compile(bad_coding, doraise=False)) + with support.captured_stderr(): + self.assertIsNone(py_compile.compile(bad_coding, doraise=False)) self.assertFalse(os.path.exists( importlib.util.cache_from_source(bad_coding))) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 11:32:59 2014 From: python-checkins at python.org (berker.peksag) Date: Mon, 1 Sep 2014 11:32:59 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Remove_unused_?= =?utf-8?q?imports=2E?= Message-ID: <3hmmRb3jBvz7Llx@mail.python.org> http://hg.python.org/cpython/rev/45d9f4172451 changeset: 92296:45d9f4172451 branch: 3.4 parent: 92294:2d0bcb653085 user: Berker Peksag date: Mon Sep 01 12:33:12 2014 +0300 summary: Remove unused imports. files: Lib/test/test_py_compile.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py --- a/Lib/test/test_py_compile.py +++ b/Lib/test/test_py_compile.py @@ -3,11 +3,11 @@ import py_compile import shutil import stat -import sys import tempfile import unittest -from test import support, script_helper +from test import support + class PyCompileTests(unittest.TestCase): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 11:33:00 2014 From: python-checkins at python.org (berker.peksag) Date: Mon, 1 Sep 2014 11:33:00 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Remove_unused_imports=2E?= Message-ID: <3hmmRc5K9Tz7Llx@mail.python.org> http://hg.python.org/cpython/rev/5f1d5e1f6c1a changeset: 92297:5f1d5e1f6c1a parent: 92295:a8ef9d7c4d20 parent: 92296:45d9f4172451 user: Berker Peksag date: Mon Sep 01 12:33:41 2014 +0300 summary: Remove unused imports. files: Lib/test/test_py_compile.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py --- a/Lib/test/test_py_compile.py +++ b/Lib/test/test_py_compile.py @@ -3,11 +3,11 @@ import py_compile import shutil import stat -import sys import tempfile import unittest -from test import support, script_helper +from test import support + class PyCompileTests(unittest.TestCase): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 1 23:59:23 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 1 Sep 2014 23:59:23 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_a_more_universal_unicode_c?= =?utf-8?q?har_example?= Message-ID: <3hn50q73p4z7LkK@mail.python.org> http://hg.python.org/cpython/rev/68d08df36620 changeset: 92298:68d08df36620 user: Benjamin Peterson date: Mon Sep 01 17:59:18 2014 -0400 summary: a more universal unicode char example files: Doc/library/functions.rst | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -158,8 +158,7 @@ Return the string representing a character whose Unicode codepoint is the integer *i*. For example, ``chr(97)`` returns the string ``'a'``, while - ``chr(12491)`` returns the string ``'?'``. This is the inverse of - :func:`ord`. + ``chr(9731)`` returns the string ``'?'``. This is the inverse of :func:`ord`. The valid range for the argument is from 0 through 1,114,111 (0x10FFFF in base 16). :exc:`ValueError` will be raised if *i* is outside that range. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 00:04:08 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 2 Sep 2014 00:04:08 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_try_to_pick_a_unicode_char?= =?utf-8?q?_that_latex_understands?= Message-ID: <3hn56J3MDlz7LjM@mail.python.org> http://hg.python.org/cpython/rev/6f65c9d11cc7 changeset: 92299:6f65c9d11cc7 user: Benjamin Peterson date: Mon Sep 01 18:04:02 2014 -0400 summary: try to pick a unicode char that latex understands files: Doc/library/functions.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -158,7 +158,7 @@ Return the string representing a character whose Unicode codepoint is the integer *i*. For example, ``chr(97)`` returns the string ``'a'``, while - ``chr(9731)`` returns the string ``'?'``. This is the inverse of :func:`ord`. + ``chr(9835)`` returns the string ``'?'``. This is the inverse of :func:`ord`. The valid range for the argument is from 0 through 1,114,111 (0x10FFFF in base 16). :exc:`ValueError` will be raised if *i* is outside that range. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 00:26:40 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 2 Sep 2014 00:26:40 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_sigma_actually_works_in_la?= =?utf-8?b?dGV4Li4u?= Message-ID: <3hn5cJ1Qjgz7LmQ@mail.python.org> http://hg.python.org/cpython/rev/3dafa5f1dc04 changeset: 92300:3dafa5f1dc04 user: Benjamin Peterson date: Mon Sep 01 18:26:22 2014 -0400 summary: sigma actually works in latex... files: Doc/library/functions.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -158,7 +158,7 @@ Return the string representing a character whose Unicode codepoint is the integer *i*. For example, ``chr(97)`` returns the string ``'a'``, while - ``chr(9835)`` returns the string ``'?'``. This is the inverse of :func:`ord`. + ``chr(931)`` returns the string ``'?'``. This is the inverse of :func:`ord`. The valid range for the argument is from 0 through 1,114,111 (0x10FFFF in base 16). :exc:`ValueError` will be raised if *i* is outside that range. @@ -1061,8 +1061,8 @@ Given a string representing one Unicode character, return an integer representing the Unicode code point of that character. For example, - ``ord('a')`` returns the integer ``97`` and ``ord('?')`` returns - ``12491``. This is the inverse of :func:`chr`. + ``ord('a')`` returns the integer ``97`` and ``ord('?')`` returns ``931``. + This is the inverse of :func:`chr`. .. function:: pow(x, y[, z]) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 04:51:06 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 2 Sep 2014 04:51:06 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_docs_have_moved?= Message-ID: <3hnCTQ5Rjdz7Ljf@mail.python.org> http://hg.python.org/peps/rev/30a9bd37c3c9 changeset: 5539:30a9bd37c3c9 user: Benjamin Peterson date: Mon Sep 01 22:46:19 2014 -0400 summary: docs have moved files: pep-0101.txt | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -425,10 +425,10 @@ release for an older version, don't change the current link. ___ If this is a final release (even a maintenance release), also unpack - the HTML docs to - /data/ftp.python.org/pub/docs.python.org/release/X.Y.Z on dinsdale. - If it is a release of a security-fix-only version, tell the DE to - build a version with the "version switcher" and put it there. + the HTML docs to /srv/docs.python.org/release/X.Y.Z on + docs-backend.psf.io. Make sure the files are in group "docs". If it + is a release of a security-fix-only version, tell the DE to build a + version with the "version switcher" and put it there. ___ Let the DE check if the docs are built and work all right. -- Repository URL: http://hg.python.org/peps From solipsis at pitrou.net Tue Sep 2 09:34:02 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 02 Sep 2014 09:34:02 +0200 Subject: [Python-checkins] Daily reference leaks (3dafa5f1dc04): sum=151932 Message-ID: results for 3dafa5f1dc04 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [-2, 0, 0] references, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 2, -2] references, sum=0 test_site leaked [0, 2, -2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogQX61t6', '-x'] From python-checkins at python.org Tue Sep 2 11:50:08 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 11:50:08 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogQ2xvc2VzICMyMjI1?= =?utf-8?q?8=3A_Fix_the_the_internal_function_set=5Finheritable=28=29_on_I?= =?utf-8?q?llumos=2E?= Message-ID: <3hnNmw5wWdz7LjM@mail.python.org> http://hg.python.org/cpython/rev/27cef7476f2b changeset: 92301:27cef7476f2b branch: 3.4 parent: 92296:45d9f4172451 user: Victor Stinner date: Tue Sep 02 11:41:04 2014 +0200 summary: Closes #22258: Fix the the internal function set_inheritable() on Illumos. This platform exposes the function ioctl(FIOCLEX), but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() now falls back to the slower fcntl() (F_GETFD and then F_SETFD). files: Misc/NEWS | 5 +++ Python/fileutils.c | 48 ++++++++++++++++++++++++--------- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,11 @@ Core and Builtins ----------------- +- Issue #22258: Fix the the internal function set_inheritable() on Illumos. + This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails + with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() + now falls back to the slower ``fcntl()`` (``F_GETFD`` and then ``F_SETFD``). + - Issue #21669: With the aid of heuristics in SyntaxError.__init__, the parser now attempts to generate more meaningful (or at least more search engine friendly) error messages when "exec" and "print" are used as diff --git a/Python/fileutils.c b/Python/fileutils.c --- a/Python/fileutils.c +++ b/Python/fileutils.c @@ -622,10 +622,12 @@ #ifdef MS_WINDOWS HANDLE handle; DWORD flags; -#elif defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) +#else +#if defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) + static int ioctl_works = -1; int request; int err; -#elif defined(HAVE_FCNTL_H) +#endif int flags; int res; #endif @@ -671,20 +673,38 @@ } return 0; -#elif defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) - if (inheritable) - request = FIONCLEX; - else - request = FIOCLEX; - err = ioctl(fd, request, NULL); - if (err) { - if (raise) - PyErr_SetFromErrno(PyExc_OSError); - return -1; +#else + +#if defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) + if (ioctl_works != 0) { + /* fast-path: ioctl() only requires one syscall */ + if (inheritable) + request = FIONCLEX; + else + request = FIOCLEX; + err = ioctl(fd, request, NULL); + if (!err) { + ioctl_works = 1; + return 0; + } + + if (errno != ENOTTY) { + if (raise) + PyErr_SetFromErrno(PyExc_OSError); + return -1; + } + else { + /* Issue #22258: Here, ENOTTY means "Inappropriate ioctl for + device". The ioctl is declared but not supported by the kernel. + Remember that ioctl() doesn't work. It is the case on + Illumos-based OS for example. */ + ioctl_works = 0; + } + /* fallback to fcntl() if ioctl() does not work */ } - return 0; +#endif -#else + /* slow-path: fcntl() requires two syscalls */ flags = fcntl(fd, F_GETFD); if (flags < 0) { if (raise) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 11:50:10 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 11:50:10 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Closes_=2322258=3A_Fix_the_the_interna?= =?utf-8?q?l_function_set=5Finheritable=28=29_on?= Message-ID: <3hnNmy0Zzmz7LkG@mail.python.org> http://hg.python.org/cpython/rev/4a51c45f405b changeset: 92302:4a51c45f405b parent: 92300:3dafa5f1dc04 parent: 92301:27cef7476f2b user: Victor Stinner date: Tue Sep 02 11:49:48 2014 +0200 summary: (Merge 3.4) Closes #22258: Fix the the internal function set_inheritable() on Illumos. This platform exposes the function ioctl(FIOCLEX), but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() now falls back to the slower fcntl() (F_GETFD and then F_SETFD). files: Misc/NEWS | 5 +++ Python/fileutils.c | 48 ++++++++++++++++++++++++--------- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,11 @@ Core and Builtins ----------------- +- Issue #22258: Fix the the internal function set_inheritable() on Illumos. + This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails + with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() + now falls back to the slower ``fcntl()`` (``F_GETFD`` and then ``F_SETFD``). + - Issue #21389: Displaying the __qualname__ of the underlying function in the repr of a bound method. diff --git a/Python/fileutils.c b/Python/fileutils.c --- a/Python/fileutils.c +++ b/Python/fileutils.c @@ -625,10 +625,12 @@ #ifdef MS_WINDOWS HANDLE handle; DWORD flags; -#elif defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) +#else +#if defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) + static int ioctl_works = -1; int request; int err; -#elif defined(HAVE_FCNTL_H) +#endif int flags; int res; #endif @@ -674,20 +676,38 @@ } return 0; -#elif defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) - if (inheritable) - request = FIONCLEX; - else - request = FIOCLEX; - err = ioctl(fd, request, NULL); - if (err) { - if (raise) - PyErr_SetFromErrno(PyExc_OSError); - return -1; +#else + +#if defined(HAVE_SYS_IOCTL_H) && defined(FIOCLEX) && defined(FIONCLEX) + if (ioctl_works != 0) { + /* fast-path: ioctl() only requires one syscall */ + if (inheritable) + request = FIONCLEX; + else + request = FIOCLEX; + err = ioctl(fd, request, NULL); + if (!err) { + ioctl_works = 1; + return 0; + } + + if (errno != ENOTTY) { + if (raise) + PyErr_SetFromErrno(PyExc_OSError); + return -1; + } + else { + /* Issue #22258: Here, ENOTTY means "Inappropriate ioctl for + device". The ioctl is declared but not supported by the kernel. + Remember that ioctl() doesn't work. It is the case on + Illumos-based OS for example. */ + ioctl_works = 0; + } + /* fallback to fcntl() if ioctl() does not work */ } - return 0; +#endif -#else + /* slow-path: fcntl() requires two syscalls */ flags = fcntl(fd, F_GETFD); if (flags < 0) { if (raise) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 13:15:55 2014 From: python-checkins at python.org (nick.coghlan) Date: Tue, 2 Sep 2014 13:15:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_440=3A_Missed_one_update_?= =?utf-8?q?for_single_number_releases?= Message-ID: <3hnQgv6qnDz7Ljl@mail.python.org> http://hg.python.org/peps/rev/ff38b758e584 changeset: 5540:ff38b758e584 user: Nick Coghlan date: Tue Sep 02 21:15:46 2014 +1000 summary: PEP 440: Missed one update for single number releases files: pep-0440.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0440.txt b/pep-0440.txt --- a/pep-0440.txt +++ b/pep-0440.txt @@ -178,7 +178,7 @@ The release segment consists of one or more non-negative integer values, separated by dots:: - N[.N]+ + N(.N)* Final releases within a project MUST be numbered in a consistently increasing fashion, otherwise automated tools will not be able to upgrade -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Sep 2 19:39:28 2014 From: python-checkins at python.org (guido.van.rossum) Date: Tue, 2 Sep 2014 19:39:28 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2321527=3A_Add_def?= =?utf-8?q?ault_number_of_workers_to_ThreadPoolExecutor=2E_=28Claudiu?= Message-ID: <3hnbBS1dz0z7LjM@mail.python.org> http://hg.python.org/cpython/rev/2805b0dca798 changeset: 92303:2805b0dca798 user: Guido van Rossum date: Tue Sep 02 10:39:18 2014 -0700 summary: Closes #21527: Add default number of workers to ThreadPoolExecutor. (Claudiu Popa.) files: Doc/library/concurrent.futures.rst | 10 +++++++++- Lib/concurrent/futures/thread.py | 7 ++++++- Lib/test/test_concurrent_futures.py | 6 ++++++ Misc/NEWS | 3 +++ 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -115,11 +115,19 @@ executor.submit(wait_on_future) -.. class:: ThreadPoolExecutor(max_workers) +.. class:: ThreadPoolExecutor(max_workers=None) An :class:`Executor` subclass that uses a pool of at most *max_workers* threads to execute calls asynchronously. + .. versionchanged:: 3.5 + If *max_workers* is ``None`` or + not given, it will default to the number of processors on the machine, + multiplied by ``5``, assuming that :class:`ThreadPoolExecutor` is often + used to overlap I/O instead of CPU work and the number of workers + should be higher than the number of workers + for :class:`ProcessPoolExecutor`. + .. _threadpoolexecutor-example: diff --git a/Lib/concurrent/futures/thread.py b/Lib/concurrent/futures/thread.py --- a/Lib/concurrent/futures/thread.py +++ b/Lib/concurrent/futures/thread.py @@ -10,6 +10,7 @@ import queue import threading import weakref +import os # Workers are created as daemon threads. This is done to allow the interpreter # to exit when there are still idle threads in a ThreadPoolExecutor's thread @@ -80,13 +81,17 @@ _base.LOGGER.critical('Exception in worker', exc_info=True) class ThreadPoolExecutor(_base.Executor): - def __init__(self, max_workers): + def __init__(self, max_workers=None): """Initializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. """ + if max_workers is None: + # Use this number because ThreadPoolExecutor is often + # used to overlap I/O instead of CPU work. + max_workers = (os.cpu_count() or 1) * 5 if max_workers <= 0: raise ValueError("max_workers must be greater than 0") diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -11,6 +11,7 @@ from test.script_helper import assert_python_ok +import os import sys import threading import time @@ -444,6 +445,11 @@ self.executor.shutdown(wait=True) self.assertCountEqual(finished, range(10)) + def test_default_workers(self): + executor = self.executor_type() + self.assertEqual(executor._max_workers, + (os.cpu_count() or 1) * 5) + class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase): def test_killed_child(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,9 @@ Library ------- +- Issue #21527: Add a default number of workers to ThreadPoolExecutor equal + to 5 times the number of CPUs. Patch by Claudiu Popa. + - Issue #22216: smtplib now resets its state more completely after a quit. The most obvious consequence of the previous behavior was a STARTTLS failure during a connect/starttls/quit/connect/starttls sequence. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 21:12:25 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 21:12:25 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_471=3A_Minor_change=2C_re?= =?utf-8?q?name_scandir=28=29_parameter_from_=22directory=22_to_=22path=22?= Message-ID: <3hndFj64Bxz7LjM@mail.python.org> http://hg.python.org/peps/rev/c375c79c2d5c changeset: 5541:c375c79c2d5c user: Victor Stinner date: Tue Sep 02 21:12:14 2014 +0200 summary: PEP 471: Minor change, rename scandir() parameter from "directory" to "path" files: pep-0471.txt | 33 +++++++++++++++++---------------- 1 files changed, 17 insertions(+), 16 deletions(-) diff --git a/pep-0471.txt b/pep-0471.txt --- a/pep-0471.txt +++ b/pep-0471.txt @@ -91,11 +91,11 @@ module in the standard library, ``scandir``, that takes a single, optional string as its argument:: - scandir(directory='.') -> generator of DirEntry objects + scandir(path='.') -> generator of DirEntry objects Like ``listdir``, ``scandir`` calls the operating system's directory iteration system calls to get the names of the files in the given -``directory``, but it's different from ``listdir`` in two ways: +``path``, but it's different from ``listdir`` in two ways: * Instead of returning bare filename strings, it returns lightweight ``DirEntry`` objects that hold the filename string and provide @@ -106,16 +106,17 @@ as a true iterator instead of returning the full list immediately. ``scandir()`` yields a ``DirEntry`` object for each file and -sub-directory in ``directory``. Just like ``listdir``, the ``'.'`` +sub-directory in ``path``. Just like ``listdir``, the ``'.'`` and ``'..'`` pseudo-directories are skipped, and the entries are yielded in system-dependent order. Each ``DirEntry`` object has the following attributes and methods: -* ``name``: the entry's filename, relative to the ``directory`` +* ``name``: the entry's filename, relative to the scandir ``path`` argument (corresponds to the return values of ``os.listdir``) * ``path``: the entry's full path name (not necessarily an absolute - path) -- the equivalent of ``os.path.join(directory, entry.name)`` + path) -- the equivalent of ``os.path.join(scandir_path, + entry.name)`` * ``is_dir(*, follow_symlinks=True)``: similar to ``pathlib.Path.is_dir()``, but the return value is cached on the @@ -124,7 +125,7 @@ * ``is_file(*, follow_symlinks=True)``: similar to ``pathlib.Path.is_file()``, but the return value is cached on the - ``DirEntry`` object; doesn't require a system call in most cases; + ``DirEntry`` object; doesn't require a system call in most cases; don't follow symbolic links if ``follow_symlinks`` is False * ``is_symlink()``: similar to ``pathlib.Path.is_symlink()``, but the @@ -147,9 +148,9 @@ first call. Like the other functions in the ``os`` module, ``scandir()`` accepts -either a bytes or str object for the ``directory`` parameter, and +either a bytes or str object for the ``path`` parameter, and returns the ``DirEntry.name`` and ``DirEntry.path`` attributes with -the same type as ``directory``. However, it is *strongly recommended* +the same type as ``path``. However, it is *strongly recommended* to use the str type, as this ensures cross-platform support for Unicode filenames. (On Windows, bytes filenames have been deprecated since Python 3.3). @@ -183,10 +184,10 @@ use of the ``DirEntry.stat()`` method and ``DirEntry.path`` attribute:: - def get_tree_size(directory): - """Return total size of files in directory and subdirs.""" + def get_tree_size(path): + """Return total size of files in given path and subdirs.""" total = 0 - for entry in os.scandir(directory): + for entry in os.scandir(path): if entry.is_dir(follow_symlinks=False): total += get_tree_size(entry.path) else: @@ -256,13 +257,13 @@ For example, below is a version of the ``get_tree_size()`` example shown above, but with fine-grained error handling added:: - def get_tree_size(directory): - """Return total size of files in directory and subdirs. If + def get_tree_size(path): + """Return total size of files in path and subdirs. If is_dir() or stat() fails, print an error message to stderr and assume zero size (for example, file has been deleted). """ total = 0 - for entry in os.scandir(directory): + for entry in os.scandir(path): try: is_dir = entry.is_dir(follow_symlinks=False) except OSError as error: @@ -534,12 +535,12 @@ ``OSError``) during iteration, leading to a rather ugly, hand-made iteration loop:: - it = os.scandir(directory) + it = os.scandir(path) while True: try: entry = next(it) except OSError as error: - handle_error(directory, error) + handle_error(path, error) except StopIteration: break -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Sep 2 23:01:57 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 23:01:57 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_pytime=2Eh=3A_remove_dupli?= =?utf-8?q?cated_=22=23ifndef_Py=5FLIMITED=5FAPI=22?= Message-ID: <3hngh54MHBz7LjY@mail.python.org> http://hg.python.org/cpython/rev/41853d9d60ff changeset: 92304:41853d9d60ff user: Victor Stinner date: Sun Aug 31 15:48:55 2014 +0200 summary: pytime.h: remove duplicated "#ifndef Py_LIMITED_API" files: Include/pytime.h | 3 --- 1 files changed, 0 insertions(+), 3 deletions(-) diff --git a/Include/pytime.h b/Include/pytime.h --- a/Include/pytime.h +++ b/Include/pytime.h @@ -13,8 +13,6 @@ extern "C" { #endif -#ifndef Py_LIMITED_API - #ifdef HAVE_GETTIMEOFDAY typedef struct timeval _PyTime_timeval; #else @@ -96,7 +94,6 @@ /* Initialize time. Return 0 on success, raise an exception and return -1 on error. */ PyAPI_FUNC(int) _PyTime_Init(void); -#endif /* Py_LIMITED_API */ #ifdef __cplusplus } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 23:01:58 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 23:01:58 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322043=3A_Fix_=5FP?= =?utf-8?q?yTime=5Fgettimeofday=28=29_if_HAVE=5FGETTIMEOFDAY?= Message-ID: <3hngh65j6dz7LjY@mail.python.org> http://hg.python.org/cpython/rev/330bd57685fc changeset: 92305:330bd57685fc user: Victor Stinner date: Tue Sep 02 23:01:40 2014 +0200 summary: Issue #22043: Fix _PyTime_gettimeofday() if HAVE_GETTIMEOFDAY Ensure also that the tv_usec field is consistent: in range [0; 999999]. files: Python/pytime.c | 7 ++----- 1 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Python/pytime.c b/Python/pytime.c --- a/Python/pytime.c +++ b/Python/pytime.c @@ -37,7 +37,6 @@ info->resolution = timeIncrement * 1e-7; info->adjustable = 1; } - return 0; #else /* MS_WINDOWS */ int err; @@ -67,11 +66,9 @@ else info->resolution = 1e-9; } - return 0; #else /* HAVE_CLOCK_GETTIME */ /* test gettimeofday() */ -#ifdef HAVE_GETTIMEOFDAY #ifdef GETTIMEOFDAY_NO_TZ err = gettimeofday(tp); #else @@ -89,10 +86,10 @@ info->monotonic = 0; info->adjustable = 1; } - return 0; -#endif /* HAVE_GETTIMEOFDAY */ #endif /* !HAVE_CLOCK_GETTIME */ #endif /* !MS_WINDOWS */ + assert(0 <= tp->tv_usec && tp->tv_usec < 1000 * 1000); + return 0; } void -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 2 23:26:17 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 2 Sep 2014 23:26:17 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322043=3A_time=2Em?= =?utf-8?q?onotonic=28=29_is_now_always_available?= Message-ID: <3hnhD92xYvz7LjT@mail.python.org> http://hg.python.org/cpython/rev/b12857782041 changeset: 92306:b12857782041 user: Victor Stinner date: Tue Sep 02 23:18:25 2014 +0200 summary: Issue #22043: time.monotonic() is now always available threading.Lock.acquire(), threading.RLock.acquire() and socket operations now use a monotonic clock, instead of the system clock, when a timeout is used. files: Doc/library/time.rst | 4 +- Doc/whatsnew/3.5.rst | 5 + Include/pytime.h | 18 ++ Lib/queue.py | 5 +- Lib/sched.py | 5 +- Lib/socketserver.py | 5 +- Lib/subprocess.py | 5 +- Lib/telnetlib.py | 5 +- Lib/test/test_selectors.py | 5 +- Lib/threading.py | 5 +- Lib/trace.py | 5 +- Misc/NEWS | 5 + Modules/_threadmodule.c | 4 +- Modules/gcmodule.c | 6 +- Modules/socketmodule.c | 4 +- Modules/timemodule.c | 141 +------------------- Python/pytime.c | 175 +++++++++++++++++++++++++ 17 files changed, 226 insertions(+), 176 deletions(-) diff --git a/Doc/library/time.rst b/Doc/library/time.rst --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -315,9 +315,9 @@ processes running for more than 49 days. On more recent versions of Windows and on other operating systems, :func:`monotonic` is system-wide. - Availability: Windows, Mac OS X, Linux, FreeBSD, OpenBSD, Solaris. - .. versionadded:: 3.3 + .. versionchanged:: 3.5 + The function is now always available. .. function:: perf_counter() diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -238,6 +238,11 @@ :meth:`socket.socket.send`. (contributed by Giampaolo Rodola' in :issue:`17552`) +time +---- + +The :func:`time.monotonic` function is now always available (:issue`22043`). + wsgiref ------- diff --git a/Include/pytime.h b/Include/pytime.h --- a/Include/pytime.h +++ b/Include/pytime.h @@ -91,6 +91,24 @@ long *nsec, _PyTime_round_t); +/* Get the time of a monotonic clock, i.e. a clock that cannot go backwards. + The clock is not affected by system clock updates. The reference point of + the returned value is undefined, so that only the difference between the + results of consecutive calls is valid. + + The function never fails. _PyTime_Init() ensures that a monotonic clock + is available and works. */ +PyAPI_FUNC(void) _PyTime_monotonic( + _PyTime_timeval *tp); + +/* Similar to _PyTime_monotonic(), fill also info (if set) with information of + the function used to get the time. + + Return 0 on success, raise an exception and return -1 on error. */ +PyAPI_FUNC(int) _PyTime_monotonic_info( + _PyTime_timeval *tp, + _Py_clock_info_t *info); + /* Initialize time. Return 0 on success, raise an exception and return -1 on error. */ PyAPI_FUNC(int) _PyTime_Init(void); diff --git a/Lib/queue.py b/Lib/queue.py --- a/Lib/queue.py +++ b/Lib/queue.py @@ -6,10 +6,7 @@ import dummy_threading as threading from collections import deque from heapq import heappush, heappop -try: - from time import monotonic as time -except ImportError: - from time import time +from time import monotonic as time __all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue'] diff --git a/Lib/sched.py b/Lib/sched.py --- a/Lib/sched.py +++ b/Lib/sched.py @@ -35,10 +35,7 @@ import threading except ImportError: import dummy_threading as threading -try: - from time import monotonic as _time -except ImportError: - from time import time as _time +from time import monotonic as _time __all__ = ["scheduler"] diff --git a/Lib/socketserver.py b/Lib/socketserver.py --- a/Lib/socketserver.py +++ b/Lib/socketserver.py @@ -136,10 +136,7 @@ import threading except ImportError: import dummy_threading as threading -try: - from time import monotonic as time -except ImportError: - from time import time as time +from time import monotonic as time __all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer", "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler", diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -365,10 +365,7 @@ import builtins import warnings import errno -try: - from time import monotonic as _time -except ImportError: - from time import time as _time +from time import monotonic as _time # Exception classes used by this module. class SubprocessError(Exception): pass diff --git a/Lib/telnetlib.py b/Lib/telnetlib.py --- a/Lib/telnetlib.py +++ b/Lib/telnetlib.py @@ -36,10 +36,7 @@ import sys import socket import selectors -try: - from time import monotonic as _time -except ImportError: - from time import time as _time +from time import monotonic as _time __all__ = ["Telnet"] diff --git a/Lib/test/test_selectors.py b/Lib/test/test_selectors.py --- a/Lib/test/test_selectors.py +++ b/Lib/test/test_selectors.py @@ -8,10 +8,7 @@ from time import sleep import unittest import unittest.mock -try: - from time import monotonic as time -except ImportError: - from time import time as time +from time import monotonic as time try: import resource except ImportError: diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -3,10 +3,7 @@ import sys as _sys import _thread -try: - from time import monotonic as _time -except ImportError: - from time import time as _time +from time import monotonic as _time from traceback import format_exc as _format_exc from _weakrefset import WeakSet from itertools import islice as _islice diff --git a/Lib/trace.py b/Lib/trace.py --- a/Lib/trace.py +++ b/Lib/trace.py @@ -59,10 +59,7 @@ import dis import pickle from warnings import warn as _warn -try: - from time import monotonic as _time -except ImportError: - from time import time as _time +from time import monotonic as _time try: import threading diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,11 @@ Library ------- +- Issue #22043: time.monotonic() is now always available. + ``threading.Lock.acquire()``, ``threading.RLock.acquire()`` and socket + operations now use a monotonic clock, instead of the system clock, when a + timeout is used. + - Issue #21527: Add a default number of workers to ThreadPoolExecutor equal to 5 times the number of CPUs. Patch by Claudiu Popa. diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -57,7 +57,7 @@ if (microseconds > 0) { - _PyTime_gettimeofday(&endtime); + _PyTime_monotonic(&endtime); endtime.tv_sec += microseconds / (1000 * 1000); endtime.tv_usec += microseconds % (1000 * 1000); } @@ -83,7 +83,7 @@ /* If we're using a timeout, recompute the timeout after processing * signals, since those can take time. */ if (microseconds > 0) { - _PyTime_gettimeofday(&curtime); + _PyTime_monotonic(&curtime); microseconds = ((endtime.tv_sec - curtime.tv_sec) * 1000000 + (endtime.tv_usec - curtime.tv_usec)); diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -25,7 +25,7 @@ #include "Python.h" #include "frameobject.h" /* for PyFrame_ClearFreeList */ -#include "pytime.h" /* for _PyTime_gettimeofday, _PyTime_INTERVAL */ +#include "pytime.h" /* for _PyTime_monotonic, _PyTime_INTERVAL */ /* Get an object's GC head */ #define AS_GC(o) ((PyGC_Head *)(o)-1) @@ -919,7 +919,7 @@ for (i = 0; i < NUM_GENERATIONS; i++) PySys_FormatStderr(" %zd", gc_list_size(GEN_HEAD(i))); - _PyTime_gettimeofday(&t1); + _PyTime_monotonic(&t1); PySys_WriteStderr("\n"); } @@ -1025,7 +1025,7 @@ } if (debug & DEBUG_STATS) { _PyTime_timeval t2; - _PyTime_gettimeofday(&t2); + _PyTime_monotonic(&t2); if (m == 0 && n == 0) PySys_WriteStderr("gc: done"); diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -680,7 +680,7 @@ double interval = s->sock_timeout; \ int has_timeout = s->sock_timeout > 0.0; \ if (has_timeout) { \ - _PyTime_gettimeofday(&now); \ + _PyTime_monotonic(&now); \ deadline = now; \ _PyTime_ADD_SECONDS(deadline, s->sock_timeout); \ } \ @@ -691,7 +691,7 @@ if (!has_timeout || \ (!CHECK_ERRNO(EWOULDBLOCK) && !CHECK_ERRNO(EAGAIN))) \ break; \ - _PyTime_gettimeofday(&now); \ + _PyTime_monotonic(&now); \ interval = _PyTime_INTERVAL(now, deadline); \ } \ } \ diff --git a/Modules/timemodule.c b/Modules/timemodule.c --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -37,10 +37,6 @@ #endif /* MS_WINDOWS */ #endif /* !__WATCOMC__ || __QNX__ */ -#if defined(__APPLE__) -#include -#endif - /* Forward declarations */ static int floatsleep(double); static PyObject* floattime(_Py_clock_info_t *info); @@ -899,122 +895,15 @@ should not be relied on."); #endif /* HAVE_WORKING_TZSET */ -#if defined(MS_WINDOWS) || defined(__APPLE__) \ - || (defined(HAVE_CLOCK_GETTIME) \ - && (defined(CLOCK_HIGHRES) || defined(CLOCK_MONOTONIC))) -#define PYMONOTONIC -#endif - -#ifdef PYMONOTONIC -static PyObject* +static PyObject * pymonotonic(_Py_clock_info_t *info) { -#if defined(MS_WINDOWS) - static ULONGLONG (*GetTickCount64) (void) = NULL; - static ULONGLONG (CALLBACK *Py_GetTickCount64)(void); - static int has_getickcount64 = -1; - double result; - - if (has_getickcount64 == -1) { - /* GetTickCount64() was added to Windows Vista */ - if (winver.dwMajorVersion >= 6) { - HINSTANCE hKernel32; - hKernel32 = GetModuleHandleW(L"KERNEL32"); - *(FARPROC*)&Py_GetTickCount64 = GetProcAddress(hKernel32, - "GetTickCount64"); - has_getickcount64 = (Py_GetTickCount64 != NULL); - } - else - has_getickcount64 = 0; - } - - if (has_getickcount64) { - ULONGLONG ticks; - ticks = Py_GetTickCount64(); - result = (double)ticks * 1e-3; - } - else { - static DWORD last_ticks = 0; - static DWORD n_overflow = 0; - DWORD ticks; - - ticks = GetTickCount(); - if (ticks < last_ticks) - n_overflow++; - last_ticks = ticks; - - result = ldexp(n_overflow, 32); - result += ticks; - result *= 1e-3; - } - - if (info) { - DWORD timeAdjustment, timeIncrement; - BOOL isTimeAdjustmentDisabled, ok; - if (has_getickcount64) - info->implementation = "GetTickCount64()"; - else - info->implementation = "GetTickCount()"; - info->monotonic = 1; - ok = GetSystemTimeAdjustment(&timeAdjustment, &timeIncrement, - &isTimeAdjustmentDisabled); - if (!ok) { - PyErr_SetFromWindowsErr(0); - return NULL; - } - info->resolution = timeIncrement * 1e-7; - info->adjustable = 0; - } - return PyFloat_FromDouble(result); - -#elif defined(__APPLE__) - static mach_timebase_info_data_t timebase; - uint64_t time; - double secs; - - if (timebase.denom == 0) { - /* According to the Technical Q&A QA1398, mach_timebase_info() cannot - fail: https://developer.apple.com/library/mac/#qa/qa1398/ */ - (void)mach_timebase_info(&timebase); - } - - time = mach_absolute_time(); - secs = (double)time * timebase.numer / timebase.denom * 1e-9; - if (info) { - info->implementation = "mach_absolute_time()"; - info->resolution = (double)timebase.numer / timebase.denom * 1e-9; - info->monotonic = 1; - info->adjustable = 0; - } - return PyFloat_FromDouble(secs); - -#elif defined(HAVE_CLOCK_GETTIME) && (defined(CLOCK_HIGHRES) || defined(CLOCK_MONOTONIC)) - struct timespec tp; -#ifdef CLOCK_HIGHRES - const clockid_t clk_id = CLOCK_HIGHRES; - const char *function = "clock_gettime(CLOCK_HIGHRES)"; -#else - const clockid_t clk_id = CLOCK_MONOTONIC; - const char *function = "clock_gettime(CLOCK_MONOTONIC)"; -#endif - - if (clock_gettime(clk_id, &tp) != 0) { - PyErr_SetFromErrno(PyExc_OSError); + _PyTime_timeval tv; + if (_PyTime_monotonic_info(&tv, info) < 0) { + assert(info != NULL); return NULL; } - - if (info) { - struct timespec res; - info->monotonic = 1; - info->implementation = function; - info->adjustable = 0; - if (clock_getres(clk_id, &res) == 0) - info->resolution = res.tv_sec + res.tv_nsec * 1e-9; - else - info->resolution = 1e-9; - } - return PyFloat_FromDouble(tp.tv_sec + tp.tv_nsec * 1e-9); -#endif + return PyFloat_FromDouble((double)tv.tv_sec + tv.tv_usec * 1e-6); } static PyObject * @@ -1027,7 +916,6 @@ "monotonic() -> float\n\ \n\ Monotonic clock, cannot go backward."); -#endif /* PYMONOTONIC */ static PyObject* perf_counter(_Py_clock_info_t *info) @@ -1035,20 +923,7 @@ #ifdef WIN32_PERF_COUNTER return win_perf_counter(info); #else - -#ifdef PYMONOTONIC - static int use_monotonic = 1; - - if (use_monotonic) { - PyObject *res = pymonotonic(info); - if (res != NULL) - return res; - use_monotonic = 0; - PyErr_Clear(); - } -#endif - return floattime(info); - + return pymonotonic(info); #endif } @@ -1216,10 +1091,8 @@ else if (strcmp(name, "clock") == 0) obj = pyclock(&info); #endif -#ifdef PYMONOTONIC else if (strcmp(name, "monotonic") == 0) obj = pymonotonic(&info); -#endif else if (strcmp(name, "perf_counter") == 0) obj = perf_counter(&info); else if (strcmp(name, "process_time") == 0) @@ -1411,9 +1284,7 @@ #ifdef HAVE_WORKING_TZSET {"tzset", time_tzset, METH_NOARGS, tzset_doc}, #endif -#ifdef PYMONOTONIC {"monotonic", time_monotonic, METH_NOARGS, monotonic_doc}, -#endif {"process_time", time_process_time, METH_NOARGS, process_time_doc}, {"perf_counter", time_perf_counter, METH_NOARGS, perf_counter_doc}, {"get_clock_info", time_get_clock_info, METH_VARARGS, get_clock_info_doc}, diff --git a/Python/pytime.c b/Python/pytime.c --- a/Python/pytime.c +++ b/Python/pytime.c @@ -3,6 +3,14 @@ #include #endif +#if defined(__APPLE__) +#include /* mach_absolute_time(), mach_timebase_info() */ +#endif + +#ifdef MS_WINDOWS +static OSVERSIONINFOEX winver; +#endif + static int pygettimeofday(_PyTime_timeval *tp, _Py_clock_info_t *info, int raise) { @@ -109,6 +117,160 @@ return pygettimeofday(tp, info, 1); } +static int +pymonotonic(_PyTime_timeval *tp, _Py_clock_info_t *info, int raise) +{ +#ifdef Py_DEBUG + static _PyTime_timeval last = {-1, -1}; +#endif +#if defined(MS_WINDOWS) + static ULONGLONG (*GetTickCount64) (void) = NULL; + static ULONGLONG (CALLBACK *Py_GetTickCount64)(void); + static int has_gettickcount64 = -1; + ULONGLONG result; + + assert(info == NULL || raise); + + if (has_gettickcount64 == -1) { + /* GetTickCount64() was added to Windows Vista */ + has_gettickcount64 = (winver.dwMajorVersion >= 6); + if (has_gettickcount64) { + HINSTANCE hKernel32; + hKernel32 = GetModuleHandleW(L"KERNEL32"); + *(FARPROC*)&Py_GetTickCount64 = GetProcAddress(hKernel32, + "GetTickCount64"); + assert(Py_GetTickCount64 != NULL); + } + } + + if (has_gettickcount64) { + result = Py_GetTickCount64(); + } + else { + static DWORD last_ticks = 0; + static DWORD n_overflow = 0; + DWORD ticks; + + ticks = GetTickCount(); + if (ticks < last_ticks) + n_overflow++; + last_ticks = ticks; + + result = (ULONGLONG)n_overflow << 32; + result += ticks; + } + + tp->tv_sec = result / 1000; + tp->tv_usec = (result % 1000) * 1000; + + if (info) { + DWORD timeAdjustment, timeIncrement; + BOOL isTimeAdjustmentDisabled, ok; + if (has_gettickcount64) + info->implementation = "GetTickCount64()"; + else + info->implementation = "GetTickCount()"; + info->monotonic = 1; + ok = GetSystemTimeAdjustment(&timeAdjustment, &timeIncrement, + &isTimeAdjustmentDisabled); + if (!ok) { + PyErr_SetFromWindowsErr(0); + return -1; + } + info->resolution = timeIncrement * 1e-7; + info->adjustable = 0; + } + +#elif defined(__APPLE__) + static mach_timebase_info_data_t timebase; + uint64_t time; + + if (timebase.denom == 0) { + /* According to the Technical Q&A QA1398, mach_timebase_info() cannot + fail: https://developer.apple.com/library/mac/#qa/qa1398/ */ + (void)mach_timebase_info(&timebase); + } + + time = mach_absolute_time(); + + /* nanoseconds => microseconds */ + time /= 1000; + /* apply timebase factor */ + time *= timebase.numer; + time /= timebase.denom; + tp->tv_sec = time / (1000 * 1000); + tp->tv_usec = time % (1000 * 1000); + + if (info) { + info->implementation = "mach_absolute_time()"; + info->resolution = (double)timebase.numer / timebase.denom * 1e-9; + info->monotonic = 1; + info->adjustable = 0; + } + +#else + struct timespec ts; +#ifdef CLOCK_HIGHRES + const clockid_t clk_id = CLOCK_HIGHRES; + const char *implementation = "clock_gettime(CLOCK_HIGHRES)"; +#else + const clockid_t clk_id = CLOCK_MONOTONIC; + const char *implementation = "clock_gettime(CLOCK_MONOTONIC)"; +#endif + + assert(info == NULL || raise); + + if (clock_gettime(clk_id, &ts) != 0) { + if (raise) { + PyErr_SetFromErrno(PyExc_OSError); + return -1; + } + tp->tv_sec = 0; + tp->tv_usec = 0; + return -1; + } + + if (info) { + struct timespec res; + info->monotonic = 1; + info->implementation = implementation; + info->adjustable = 0; + if (clock_getres(clk_id, &res) != 0) { + PyErr_SetFromErrno(PyExc_OSError); + return -1; + } + info->resolution = res.tv_sec + res.tv_nsec * 1e-9; + } + tp->tv_sec = ts.tv_sec; + tp->tv_usec = ts.tv_nsec / 1000; +#endif + assert(0 <= tp->tv_usec && tp->tv_usec < 1000 * 1000); +#ifdef Py_DEBUG + /* monotonic clock cannot go backward */ + assert(tp->tv_sec > last.tv_sec + || (tp->tv_sec == last.tv_sec && tp->tv_usec >= last.tv_usec)); + last = *tp; +#endif + return 0; +} + +void +_PyTime_monotonic(_PyTime_timeval *tp) +{ + if (pymonotonic(tp, NULL, 0) < 0) { + /* cannot happen, _PyTime_Init() checks that pymonotonic() works */ + assert(0); + tp->tv_sec = 0; + tp->tv_usec = 0; + } +} + +int +_PyTime_monotonic_info(_PyTime_timeval *tp, _Py_clock_info_t *info) +{ + return pymonotonic(tp, info, 1); +} + static void error_time_t_overflow(void) { @@ -245,8 +407,21 @@ _PyTime_Init(void) { _PyTime_timeval tv; + +#ifdef MS_WINDOWS + winver.dwOSVersionInfoSize = sizeof(winver); + if (!GetVersionEx((OSVERSIONINFO*)&winver)) { + PyErr_SetFromWindowsErr(0); + return -1; + } +#endif + /* ensure that the system clock works */ if (_PyTime_gettimeofday_info(&tv, NULL) < 0) return -1; + + /* ensure that the operating system provides a monotonic clock */ + if (_PyTime_monotonic_info(&tv, NULL) < 0) + return -1; return 0; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 09:44:06 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 3 Sep 2014 09:44:06 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322043=3A_Fix_pymo?= =?utf-8?q?notonic=28=29=2C_use_tv=5Fusec=3D-1_as_a_marker_to_skip?= Message-ID: <3hnxx25gtyz7LjX@mail.python.org> http://hg.python.org/cpython/rev/9deef14393d5 changeset: 92307:9deef14393d5 user: Victor Stinner date: Wed Sep 03 09:43:48 2014 +0200 summary: Issue #22043: Fix pymonotonic(), use tv_usec=-1 as a marker to skip the monotonic test files: Python/pytime.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Python/pytime.c b/Python/pytime.c --- a/Python/pytime.c +++ b/Python/pytime.c @@ -121,7 +121,7 @@ pymonotonic(_PyTime_timeval *tp, _Py_clock_info_t *info, int raise) { #ifdef Py_DEBUG - static _PyTime_timeval last = {-1, -1}; + static _PyTime_timeval last = {0, -1}; #endif #if defined(MS_WINDOWS) static ULONGLONG (*GetTickCount64) (void) = NULL; @@ -247,7 +247,8 @@ assert(0 <= tp->tv_usec && tp->tv_usec < 1000 * 1000); #ifdef Py_DEBUG /* monotonic clock cannot go backward */ - assert(tp->tv_sec > last.tv_sec + assert(last.tv_usec == -1 + || tp->tv_sec > last.tv_sec || (tp->tv_sec == last.tv_sec && tp->tv_usec >= last.tv_usec)); last = *tp; #endif -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Wed Sep 3 10:36:00 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 03 Sep 2014 10:36:00 +0200 Subject: [Python-checkins] Daily reference leaks (b12857782041): sum=151934 Message-ID: results for b12857782041 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [2, 0, -2] references, sum=0 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 2] references, sum=0 test_site leaked [-2, 0, 2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/refloggfQtJ8', '-x'] From python-checkins at python.org Wed Sep 3 22:19:01 2014 From: python-checkins at python.org (terry.reedy) Date: Wed, 3 Sep 2014 22:19:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxOTMz?= =?utf-8?q?=3A_Users_can_now_change_the_font_size_for_example_code=2E?= Message-ID: <3hpGh52w3jz7LjQ@mail.python.org> http://hg.python.org/cpython/rev/ce14092430b6 changeset: 92308:ce14092430b6 branch: 3.4 parent: 92301:27cef7476f2b user: Terry Jan Reedy date: Wed Sep 03 16:17:41 2014 -0400 summary: Issue #21933: Users can now change the font size for example code. Original patch by Lita Cho. files: Lib/turtledemo/__main__.py | 93 +++++++++++++++++++------ 1 files changed, 71 insertions(+), 22 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -17,14 +17,17 @@ The (syntax coloured) source code appears in the left source code window. IT CANNOT BE EDITED, but ONLY VIEWED! - - Press START button to start the demo. - - Stop execution by pressing the STOP button. - - Clear screen by pressing the CLEAR button. - - Restart by pressing the START button again. + The demo viewer windows can be resized. The divider between text + and canvas can be moved by grabbing it with the mouse. The text font + size can be changed from the menu and with Control/Command- '-'/'+'. + It can also be changed on most systems with Control-mousewheel. - SPECIAL demos are those which run EVENTDRIVEN. - (For example clock.py - or oldTurtleDemo.py which - in the end expects a mouse click.): + Press START button to start the demo. + Stop execution by pressing the STOP button. + Clear screen by pressing the CLEAR button. + Restart by pressing the START button again. + + SPECIAL demos, such as clock.py are those which run EVENTDRIVEN. Press START button to start the demo. @@ -87,7 +90,7 @@ from tkinter import * from idlelib.Percolator import Percolator from idlelib.ColorDelegator import ColorDelegator -from idlelib.textView import view_text # TextViewer +from idlelib.textView import view_text from importlib import reload from turtledemo import __doc__ as about_turtledemo @@ -95,6 +98,7 @@ import time demo_dir = os.path.dirname(os.path.abspath(__file__)) +darwin = sys.platform == 'darwin' STARTUP = 1 READY = 2 @@ -104,7 +108,11 @@ menufont = ("Arial", 12, NORMAL) btnfont = ("Arial", 12, 'bold') -txtfont = ('Lucida Console', 8, 'normal') +txtfont = ['Lucida Console', 10, 'normal'] + +MINIMUM_FONT_SIZE = 6 +MAXIMUM_FONT_SIZE = 100 +font_sizes = [8, 9, 10, 11, 12, 14, 18, 20, 22, 24, 30] def getExampleEntries(): return [entry[:-3] for entry in os.listdir(demo_dir) if @@ -123,7 +131,7 @@ root.title('Python turtle-graphics examples') root.wm_protocol("WM_DELETE_WINDOW", self._destroy) - if sys.platform == 'darwin': + if darwin: import subprocess # Make sure we are the currently activated OS X application # so that our menu bar appears. @@ -136,8 +144,7 @@ '-e', 'end tell', ], stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL, - ) + stdout=subprocess.DEVNULL,) root.grid_rowconfigure(0, weight=1) root.grid_columnconfigure(0, weight=1) @@ -147,9 +154,11 @@ self.mBar = Menu(root, relief=RAISED, borderwidth=2) self.mBar.add_cascade(menu=self.makeLoadDemoMenu(self.mBar), - label='Examples', underline=0, font=menufont) + label='Examples', underline=0) + self.mBar.add_cascade(menu=self.makeFontMenu(self.mBar), + label='Fontsize', underline=0) self.mBar.add_cascade(menu=self.makeHelpMenu(self.mBar), - label='Help', underline=0, font=menufont) + label='Help', underline=0) root['menu'] = self.mBar pane = PanedWindow(orient=HORIZONTAL, sashwidth=5, @@ -203,7 +212,7 @@ hbar['command'] = text.xview hbar.pack(side=BOTTOM, fill=X) - text['font'] = txtfont + text['font'] = tuple(txtfont) text['yscrollcommand'] = vbar.set text['xscrollcommand'] = hbar.set text.pack(side=LEFT, fill=BOTH, expand=1) @@ -216,7 +225,7 @@ turtle._Screen._canvas = self._canvas = canvas = turtle.ScrolledCanvas( root, 800, 600, self.canvwidth, self.canvheight) canvas.adjustScrolls() - canvas._rootwindow.bind('', self.onResize) + self.makeBindings(canvas._rootwindow) canvas._canvas['borderwidth'] = 0 self.screen = _s_ = turtle.Screen() @@ -225,6 +234,35 @@ turtle.RawTurtle.screens = [_s_] return canvas + def makeBindings(self, widget): + widget.bind('', self.onResize) + + shortcut = 'Command' if darwin else 'Control' + widget.bind_all('<%s-minus>' % shortcut, self.decrease_size) + widget.bind_all('<%s-underscore>' % shortcut, self.decrease_size) + widget.bind_all('<%s-equal>' % shortcut, self.increase_size) + widget.bind_all('<%s-plus>' % shortcut, self.increase_size) + widget.bind_all('', self.update_mousewheel) + widget.bind('', self.increase_size) + widget.bind('', self.decrease_size) + + def set_txtsize(self, size): + txtfont[1] = size + self.text['font'] = tuple(txtfont) + self.output_lbl['text'] = 'Font size %d' % size + + def decrease_size(self, dummy=None): + self.set_txtsize(max(txtfont[1] - 1, MINIMUM_FONT_SIZE)) + + def increase_size(self, dummy=None): + self.set_txtsize(min(txtfont[1] + 1, MAXIMUM_FONT_SIZE)) + + def update_mousewheel(self, event): + # For wheel up, event.delte = 120 on Windows, -1 on darwin. + # X-11 sends Control-Button-4 event instead. + (self.decrease_size() if (event.delta < 0 and not darwin) + else self.increase_size()) + def configGUI(self, start, stop, clear, txt="", color="blue"): self.start_btn.config(state=start, bg="#d00" if start == NORMAL else "#fca") @@ -238,13 +276,25 @@ menu = Menu(master) for entry in getExampleEntries(): - def loadexample(x): - def emit(): - self.loadfile(x) - return emit + def load(entry=entry): + self.loadfile(entry) menu.add_command(label=entry, underline=0, - font=menufont, command=loadexample(entry)) + font=menufont, command=load) + return menu + def makeFontMenu(self, master): + menu = Menu(master) + menu.add_command(label="Decrease (C-'-')", command=self.decrease_size, + font=menufont) + menu.add_command(label="Increase (C-'+')", command=self.increase_size, + font=menufont) + menu.add_separator() + + for size in font_sizes: + def resize(size=size): + self.set_txtsize(size) + menu.add_command(label=str(size), underline=0, + font=menufont, command=resize) return menu def makeHelpMenu(self, master): @@ -254,7 +304,6 @@ def show(help_label=help_label, help_file=help_file): view_text(self.root, help_label, help_file) menu.add_command(label=help_label, font=menufont, command=show) - return menu def refreshCanvas(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 22:19:02 2014 From: python-checkins at python.org (terry.reedy) Date: Wed, 3 Sep 2014 22:19:02 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321933=3A_Merge_with_3=2E4?= Message-ID: <3hpGh65dHfz7Ljn@mail.python.org> http://hg.python.org/cpython/rev/e2e0c9f90a81 changeset: 92309:e2e0c9f90a81 parent: 92307:9deef14393d5 parent: 92308:ce14092430b6 user: Terry Jan Reedy date: Wed Sep 03 16:18:34 2014 -0400 summary: Issue #21933: Merge with 3.4 files: Lib/turtledemo/__main__.py | 93 +++++++++++++++++++------ 1 files changed, 71 insertions(+), 22 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -17,14 +17,17 @@ The (syntax coloured) source code appears in the left source code window. IT CANNOT BE EDITED, but ONLY VIEWED! - - Press START button to start the demo. - - Stop execution by pressing the STOP button. - - Clear screen by pressing the CLEAR button. - - Restart by pressing the START button again. + The demo viewer windows can be resized. The divider between text + and canvas can be moved by grabbing it with the mouse. The text font + size can be changed from the menu and with Control/Command- '-'/'+'. + It can also be changed on most systems with Control-mousewheel. - SPECIAL demos are those which run EVENTDRIVEN. - (For example clock.py - or oldTurtleDemo.py which - in the end expects a mouse click.): + Press START button to start the demo. + Stop execution by pressing the STOP button. + Clear screen by pressing the CLEAR button. + Restart by pressing the START button again. + + SPECIAL demos, such as clock.py are those which run EVENTDRIVEN. Press START button to start the demo. @@ -87,7 +90,7 @@ from tkinter import * from idlelib.Percolator import Percolator from idlelib.ColorDelegator import ColorDelegator -from idlelib.textView import view_text # TextViewer +from idlelib.textView import view_text from importlib import reload from turtledemo import __doc__ as about_turtledemo @@ -95,6 +98,7 @@ import time demo_dir = os.path.dirname(os.path.abspath(__file__)) +darwin = sys.platform == 'darwin' STARTUP = 1 READY = 2 @@ -104,7 +108,11 @@ menufont = ("Arial", 12, NORMAL) btnfont = ("Arial", 12, 'bold') -txtfont = ('Lucida Console', 8, 'normal') +txtfont = ['Lucida Console', 10, 'normal'] + +MINIMUM_FONT_SIZE = 6 +MAXIMUM_FONT_SIZE = 100 +font_sizes = [8, 9, 10, 11, 12, 14, 18, 20, 22, 24, 30] def getExampleEntries(): return [entry[:-3] for entry in os.listdir(demo_dir) if @@ -123,7 +131,7 @@ root.title('Python turtle-graphics examples') root.wm_protocol("WM_DELETE_WINDOW", self._destroy) - if sys.platform == 'darwin': + if darwin: import subprocess # Make sure we are the currently activated OS X application # so that our menu bar appears. @@ -136,8 +144,7 @@ '-e', 'end tell', ], stderr=subprocess.DEVNULL, - stdout=subprocess.DEVNULL, - ) + stdout=subprocess.DEVNULL,) root.grid_rowconfigure(0, weight=1) root.grid_columnconfigure(0, weight=1) @@ -147,9 +154,11 @@ self.mBar = Menu(root, relief=RAISED, borderwidth=2) self.mBar.add_cascade(menu=self.makeLoadDemoMenu(self.mBar), - label='Examples', underline=0, font=menufont) + label='Examples', underline=0) + self.mBar.add_cascade(menu=self.makeFontMenu(self.mBar), + label='Fontsize', underline=0) self.mBar.add_cascade(menu=self.makeHelpMenu(self.mBar), - label='Help', underline=0, font=menufont) + label='Help', underline=0) root['menu'] = self.mBar pane = PanedWindow(orient=HORIZONTAL, sashwidth=5, @@ -203,7 +212,7 @@ hbar['command'] = text.xview hbar.pack(side=BOTTOM, fill=X) - text['font'] = txtfont + text['font'] = tuple(txtfont) text['yscrollcommand'] = vbar.set text['xscrollcommand'] = hbar.set text.pack(side=LEFT, fill=BOTH, expand=1) @@ -216,7 +225,7 @@ turtle._Screen._canvas = self._canvas = canvas = turtle.ScrolledCanvas( root, 800, 600, self.canvwidth, self.canvheight) canvas.adjustScrolls() - canvas._rootwindow.bind('', self.onResize) + self.makeBindings(canvas._rootwindow) canvas._canvas['borderwidth'] = 0 self.screen = _s_ = turtle.Screen() @@ -225,6 +234,35 @@ turtle.RawTurtle.screens = [_s_] return canvas + def makeBindings(self, widget): + widget.bind('', self.onResize) + + shortcut = 'Command' if darwin else 'Control' + widget.bind_all('<%s-minus>' % shortcut, self.decrease_size) + widget.bind_all('<%s-underscore>' % shortcut, self.decrease_size) + widget.bind_all('<%s-equal>' % shortcut, self.increase_size) + widget.bind_all('<%s-plus>' % shortcut, self.increase_size) + widget.bind_all('', self.update_mousewheel) + widget.bind('', self.increase_size) + widget.bind('', self.decrease_size) + + def set_txtsize(self, size): + txtfont[1] = size + self.text['font'] = tuple(txtfont) + self.output_lbl['text'] = 'Font size %d' % size + + def decrease_size(self, dummy=None): + self.set_txtsize(max(txtfont[1] - 1, MINIMUM_FONT_SIZE)) + + def increase_size(self, dummy=None): + self.set_txtsize(min(txtfont[1] + 1, MAXIMUM_FONT_SIZE)) + + def update_mousewheel(self, event): + # For wheel up, event.delte = 120 on Windows, -1 on darwin. + # X-11 sends Control-Button-4 event instead. + (self.decrease_size() if (event.delta < 0 and not darwin) + else self.increase_size()) + def configGUI(self, start, stop, clear, txt="", color="blue"): self.start_btn.config(state=start, bg="#d00" if start == NORMAL else "#fca") @@ -238,13 +276,25 @@ menu = Menu(master) for entry in getExampleEntries(): - def loadexample(x): - def emit(): - self.loadfile(x) - return emit + def load(entry=entry): + self.loadfile(entry) menu.add_command(label=entry, underline=0, - font=menufont, command=loadexample(entry)) + font=menufont, command=load) + return menu + def makeFontMenu(self, master): + menu = Menu(master) + menu.add_command(label="Decrease (C-'-')", command=self.decrease_size, + font=menufont) + menu.add_command(label="Increase (C-'+')", command=self.increase_size, + font=menufont) + menu.add_separator() + + for size in font_sizes: + def resize(size=size): + self.set_txtsize(size) + menu.add_command(label=str(size), underline=0, + font=menufont, command=resize) return menu def makeHelpMenu(self, master): @@ -254,7 +304,6 @@ def show(help_label=help_label, help_file=help_file): view_text(self.root, help_label, help_file) menu.add_command(label=help_label, font=menufont, command=show) - return menu def refreshCanvas(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 23:33:54 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 3 Sep 2014 23:33:54 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzMx?= =?utf-8?q?=3A_Skip_test=5Finterrupted=5Fwrite=5Ftext=28=29_on_FreeBSD_old?= =?utf-8?q?er_than_8=2E0?= Message-ID: <3hpJLV4f3Kz7LjP@mail.python.org> http://hg.python.org/cpython/rev/baa372eb731c changeset: 92310:baa372eb731c branch: 3.4 parent: 92308:ce14092430b6 user: Victor Stinner date: Wed Sep 03 23:32:28 2014 +0200 summary: Issue #22331: Skip test_interrupted_write_text() on FreeBSD older than 8.0 files: Lib/test/test_io.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -3298,6 +3298,8 @@ def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") + # Issue #22331: The test hangs on FreeBSD 7.2 + @support.requires_freebsd_version(8) def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 23:33:55 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 3 Sep 2014 23:33:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2322331=3A_Skip_test=5Finterrup?= =?utf-8?q?ted=5Fwrite=5Ftext=28=29_on_FreeBSD_older?= Message-ID: <3hpJLW6Md9z7LkG@mail.python.org> http://hg.python.org/cpython/rev/340d48347295 changeset: 92311:340d48347295 parent: 92309:e2e0c9f90a81 parent: 92310:baa372eb731c user: Victor Stinner date: Wed Sep 03 23:33:43 2014 +0200 summary: (Merge 3.4) Issue #22331: Skip test_interrupted_write_text() on FreeBSD older than 8.0 files: Lib/test/test_io.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -3383,6 +3383,8 @@ def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") + # Issue #22331: The test hangs on FreeBSD 7.2 + @support.requires_freebsd_version(8) def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 23:48:55 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 3 Sep 2014 23:48:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzMy?= =?utf-8?q?=3A_test=5Fmultiprocessing=5Fmain=5Fhandling_is_now_skipped_if_?= =?utf-8?q?sem=5Fopen?= Message-ID: <3hpJgq2qVBz7LjZ@mail.python.org> http://hg.python.org/cpython/rev/db9eb9cba1ec changeset: 92312:db9eb9cba1ec branch: 3.4 parent: 92310:baa372eb731c user: Victor Stinner date: Wed Sep 03 23:48:08 2014 +0200 summary: Issue #22332: test_multiprocessing_main_handling is now skipped if sem_open implementation is broken (ex: skipped on FreeBSD 6.4). files: Lib/test/test_multiprocessing_main_handling.py | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_multiprocessing_main_handling.py b/Lib/test/test_multiprocessing_main_handling.py --- a/Lib/test/test_multiprocessing_main_handling.py +++ b/Lib/test/test_multiprocessing_main_handling.py @@ -22,6 +22,9 @@ import multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) +# Issue #22332: Skip tests if sem_open implementation is broken. +support.import_module('multiprocessing.synchronize') + verbose = support.verbose test_source = """\ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 3 23:48:56 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 3 Sep 2014 23:48:56 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2322332=3A_test=5Fmultiprocessi?= =?utf-8?q?ng=5Fmain=5Fhandling_is_now_skipped_if?= Message-ID: <3hpJgr4WKcz7Ljf@mail.python.org> http://hg.python.org/cpython/rev/2f21d920d00d changeset: 92313:2f21d920d00d parent: 92311:340d48347295 parent: 92312:db9eb9cba1ec user: Victor Stinner date: Wed Sep 03 23:48:37 2014 +0200 summary: (Merge 3.4) Issue #22332: test_multiprocessing_main_handling is now skipped if sem_open implementation is broken (ex: skipped on FreeBSD 6.4). files: Lib/test/test_multiprocessing_main_handling.py | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_multiprocessing_main_handling.py b/Lib/test/test_multiprocessing_main_handling.py --- a/Lib/test/test_multiprocessing_main_handling.py +++ b/Lib/test/test_multiprocessing_main_handling.py @@ -22,6 +22,9 @@ import multiprocessing AVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods()) +# Issue #22332: Skip tests if sem_open implementation is broken. +support.import_module('multiprocessing.synchronize') + verbose = support.verbose test_source = """\ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 00:36:27 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 00:36:27 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIwOTU3?= =?utf-8?q?=3A_test=5Fsmtpnet_now_uses_support=2Etransient=5Finternet=28?= =?utf-8?q?=29_to_call?= Message-ID: <3hpKkg1DNZz7Ljb@mail.python.org> http://hg.python.org/cpython/rev/b7200cde1b68 changeset: 92314:b7200cde1b68 branch: 3.4 parent: 92312:db9eb9cba1ec user: Victor Stinner date: Thu Sep 04 00:35:43 2014 +0200 summary: Issue #20957: test_smtpnet now uses support.transient_internet() to call check_ssl_verifiy(), so only test_connect_using_sslcontext_verified() is skipped if smtp.gmail.com cannot be joined, not the whole file. files: Lib/test/test_smtpnet.py | 7 +++++-- 1 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_smtpnet.py b/Lib/test/test_smtpnet.py --- a/Lib/test/test_smtpnet.py +++ b/Lib/test/test_smtpnet.py @@ -42,7 +42,6 @@ class SmtpSSLTest(unittest.TestCase): testServer = 'smtp.gmail.com' remotePort = 465 - can_verify = check_ssl_verifiy(testServer, remotePort) def test_connect(self): support.get_attribute(smtplib, 'SMTP_SSL') @@ -66,8 +65,12 @@ server.ehlo() server.quit() - @unittest.skipUnless(can_verify, "SSL certificate can't be verified") def test_connect_using_sslcontext_verified(self): + with support.transient_internet(self.testServer): + can_verify = check_ssl_verifiy(self.testServer, self.remotePort) + if not can_verify: + self.skipTest("SSL certificate can't be verified") + support.get_attribute(smtplib, 'SMTP_SSL') context = ssl.create_default_context() with support.transient_internet(self.testServer): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 00:36:28 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 00:36:28 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2320957=3A_test=5Fsmtpnet_now_u?= =?utf-8?q?ses_support=2Etransient=5Finternet=28=29_to?= Message-ID: <3hpKkh2txsz7LkT@mail.python.org> http://hg.python.org/cpython/rev/85511d4a846e changeset: 92315:85511d4a846e parent: 92313:2f21d920d00d parent: 92314:b7200cde1b68 user: Victor Stinner date: Thu Sep 04 00:36:09 2014 +0200 summary: (Merge 3.4) Issue #20957: test_smtpnet now uses support.transient_internet() to call check_ssl_verifiy(), so only test_connect_using_sslcontext_verified() is skipped if smtp.gmail.com cannot be joined, not the whole file. files: Lib/test/test_smtpnet.py | 7 +++++-- 1 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_smtpnet.py b/Lib/test/test_smtpnet.py --- a/Lib/test/test_smtpnet.py +++ b/Lib/test/test_smtpnet.py @@ -42,7 +42,6 @@ class SmtpSSLTest(unittest.TestCase): testServer = 'smtp.gmail.com' remotePort = 465 - can_verify = check_ssl_verifiy(testServer, remotePort) def test_connect(self): support.get_attribute(smtplib, 'SMTP_SSL') @@ -66,8 +65,12 @@ server.ehlo() server.quit() - @unittest.skipUnless(can_verify, "SSL certificate can't be verified") def test_connect_using_sslcontext_verified(self): + with support.transient_internet(self.testServer): + can_verify = check_ssl_verifiy(self.testServer, self.remotePort) + if not can_verify: + self.skipTest("SSL certificate can't be verified") + support.get_attribute(smtplib, 'SMTP_SSL') context = ssl.create_default_context() with support.transient_internet(self.testServer): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 00:54:02 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 00:54:02 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxNDQw?= =?utf-8?q?=3A_Backport_changeset_4ebf97299b18_to_branch_3=2E4=2C_use?= Message-ID: <3hpL6y4b5Tz7LjR@mail.python.org> http://hg.python.org/cpython/rev/706ec07100d6 changeset: 92316:706ec07100d6 branch: 3.4 parent: 92314:b7200cde1b68 user: Victor Stinner date: Thu Sep 04 00:49:01 2014 +0200 summary: Issue #21440: Backport changeset 4ebf97299b18 to branch 3.4, use support.rmtree() and support.unlink() in test_zipfile & test_tarfile files: Lib/test/test_tarfile.py | 39 +++++++++++++-------------- Lib/test/test_zipfile.py | 23 +++++++-------- 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -1,7 +1,6 @@ import sys import os import io -import shutil from hashlib import md5 import unittest @@ -480,16 +479,16 @@ # Test hardlink extraction (e.g. bug #857297). with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: tar.extract("ustar/regtype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) + self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/regtype")) tar.extract("ustar/lnktype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/lnktype")) with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: data = f.read() self.assertEqual(md5sum(data), md5_regtype) tar.extract("ustar/symtype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/symtype")) with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: data = f.read() self.assertEqual(md5sum(data), md5_regtype) @@ -522,7 +521,7 @@ self.assertEqual(tarinfo.mtime, file_mtime, errmsg) finally: tar.close() - shutil.rmtree(DIR) + support.rmtree(DIR) def test_extract_directory(self): dirtype = "ustar/dirtype" @@ -537,7 +536,7 @@ if sys.platform != "win32": self.assertEqual(os.stat(extracted).st_mode & 0o777, 0o755) finally: - shutil.rmtree(DIR) + support.rmtree(DIR) def test_init_close_fobj(self): # Issue #7341: Close the internal file object in the TarFile @@ -901,7 +900,7 @@ fobj.seek(4096) fobj.truncate() s = os.stat(name) - os.remove(name) + support.unlink(name) return s.st_blocks == 0 else: return False @@ -1034,7 +1033,7 @@ finally: tar.close() finally: - os.rmdir(path) + support.rmdir(path) @unittest.skipUnless(hasattr(os, "link"), "Missing hardlink implementation") @@ -1054,8 +1053,8 @@ finally: tar.close() finally: - os.remove(target) - os.remove(link) + support.unlink(target) + support.unlink(link) @support.skip_unless_symlink def test_symlink_size(self): @@ -1069,7 +1068,7 @@ finally: tar.close() finally: - os.remove(path) + support.unlink(path) def test_add_self(self): # Test for #1257255. @@ -1116,7 +1115,7 @@ finally: tar.close() finally: - shutil.rmtree(tempdir) + support.rmtree(tempdir) def test_filter(self): tempdir = os.path.join(TEMPDIR, "filter") @@ -1152,7 +1151,7 @@ finally: tar.close() finally: - shutil.rmtree(tempdir) + support.rmtree(tempdir) # Guarantee that stored pathnames are not modified. Don't # remove ./ or ../ or double slashes. Still make absolute @@ -1180,9 +1179,9 @@ tar.close() if not dir: - os.remove(foo) + support.unlink(foo) else: - os.rmdir(foo) + support.rmdir(foo) self.assertEqual(t.name, cmp_path or path.replace(os.sep, "/")) @@ -1213,8 +1212,8 @@ finally: tar.close() finally: - os.unlink(temparchive) - shutil.rmtree(tempdir) + support.unlink(temparchive) + support.rmtree(tempdir) def test_pathnames(self): self._test_pathname("foo") @@ -1314,7 +1313,7 @@ # Test for issue #8464: Create files with correct # permissions. if os.path.exists(tmpname): - os.remove(tmpname) + support.unlink(tmpname) original_umask = os.umask(0o022) try: @@ -1668,7 +1667,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + support.unlink(self.tarname) def _create_testtar(self, mode="w:"): with tarfile.open(tarname, encoding="iso8859-1") as src: @@ -2175,7 +2174,7 @@ def tearDownModule(): if os.path.exists(TEMPDIR): - shutil.rmtree(TEMPDIR) + support.rmtree(TEMPDIR) if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -3,7 +3,6 @@ import sys import importlib.util import time -import shutil import struct import zipfile import unittest @@ -12,7 +11,7 @@ from tempfile import TemporaryFile from random import randint, random, getrandbits -from test.support import (TESTFN, findfile, unlink, +from test.support import (TESTFN, findfile, unlink, rmtree, requires_zlib, requires_bz2, requires_lzma, captured_stdout, check_warnings) @@ -691,7 +690,7 @@ self.assertNotIn('mod2.txt', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) def test_write_python_directory_filtered(self): os.mkdir(TESTFN2) @@ -711,7 +710,7 @@ self.assertNotIn('mod2.py', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) def test_write_non_pyfile(self): with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp: @@ -741,7 +740,7 @@ self.assertNotIn('mod1.pyo', names) finally: - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) class ExtractTests(unittest.TestCase): @@ -767,7 +766,7 @@ os.remove(writtenfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def test_extract_all(self): with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp: @@ -785,7 +784,7 @@ os.remove(outfile) # remove the test file subdirectories - shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) + rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) def check_file(self, filename, content): self.assertTrue(os.path.isfile(filename)) @@ -867,12 +866,12 @@ msg='extract %r: %r != %r' % (arcname, writtenfile, correctfile)) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall(targetpath) self.check_file(correctfile, content) - shutil.rmtree('target') + rmtree('target') correctfile = os.path.join(os.getcwd(), *fixedname.split('/')) @@ -881,12 +880,12 @@ self.assertEqual(writtenfile, correctfile, msg="extract %r" % arcname) self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) with zipfile.ZipFile(TESTFN2, 'r') as zipfp: zipfp.extractall() self.check_file(correctfile, content) - shutil.rmtree(fixedname.split('/')[0]) + rmtree(fixedname.split('/')[0]) os.remove(TESTFN2) @@ -1643,7 +1642,7 @@ self.assertTrue(zipf.filelist[0].filename.endswith("x/")) def tearDown(self): - shutil.rmtree(TESTFN2) + rmtree(TESTFN2) if os.path.exists(TESTFN): unlink(TESTFN) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 00:54:03 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 00:54:03 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxNDQw?= =?utf-8?q?=3A_test=5Fzipfile=3A_replace_last_direct_calls_to_os=2Eremove?= =?utf-8?q?=28=29_with?= Message-ID: <3hpL6z6q6Mz7Ljp@mail.python.org> http://hg.python.org/cpython/rev/6b4d31641109 changeset: 92317:6b4d31641109 branch: 3.4 user: Victor Stinner date: Thu Sep 04 00:51:09 2014 +0200 summary: Issue #21440: test_zipfile: replace last direct calls to os.remove() with support.unlink() files: Lib/test/test_zipfile.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -717,7 +717,7 @@ with open(TESTFN, 'w') as f: f.write('most definitely not a python file') self.assertRaises(RuntimeError, zipfp.writepy, TESTFN) - os.remove(TESTFN) + unlink(TESTFN) def test_write_pyfile_bad_syntax(self): os.mkdir(TESTFN2) @@ -763,7 +763,7 @@ with open(writtenfile, "rb") as f: self.assertEqual(fdata.encode(), f.read()) - os.remove(writtenfile) + unlink(writtenfile) # remove the test file subdirectories rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) @@ -781,7 +781,7 @@ with open(outfile, "rb") as f: self.assertEqual(fdata.encode(), f.read()) - os.remove(outfile) + unlink(outfile) # remove the test file subdirectories rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) @@ -887,7 +887,7 @@ self.check_file(correctfile, content) rmtree(fixedname.split('/')[0]) - os.remove(TESTFN2) + unlink(TESTFN2) class OtherTests(unittest.TestCase): @@ -1755,7 +1755,7 @@ def tearDown(self): for sep, fn in self.arcfiles.items(): - os.remove(fn) + unlink(fn) unlink(TESTFN) unlink(TESTFN2) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 00:54:05 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 00:54:05 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogKE1lcmdlIDMuNCkgSXNzdWUgIzIxNDQwOiB0ZXN0X3ppcGZpbGU6IHJl?= =?utf-8?q?place_last_direct_calls_to?= Message-ID: <3hpL711cPyz7LkJ@mail.python.org> http://hg.python.org/cpython/rev/73ce1afc6ee2 changeset: 92318:73ce1afc6ee2 parent: 92315:85511d4a846e parent: 92317:6b4d31641109 user: Victor Stinner date: Thu Sep 04 00:51:25 2014 +0200 summary: (Merge 3.4) Issue #21440: test_zipfile: replace last direct calls to os.remove() with support.unlink() files: Lib/test/test_zipfile.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -717,7 +717,7 @@ with open(TESTFN, 'w') as f: f.write('most definitely not a python file') self.assertRaises(RuntimeError, zipfp.writepy, TESTFN) - os.remove(TESTFN) + unlink(TESTFN) def test_write_pyfile_bad_syntax(self): os.mkdir(TESTFN2) @@ -763,7 +763,7 @@ with open(writtenfile, "rb") as f: self.assertEqual(fdata.encode(), f.read()) - os.remove(writtenfile) + unlink(writtenfile) # remove the test file subdirectories rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) @@ -781,7 +781,7 @@ with open(outfile, "rb") as f: self.assertEqual(fdata.encode(), f.read()) - os.remove(outfile) + unlink(outfile) # remove the test file subdirectories rmtree(os.path.join(os.getcwd(), 'ziptest2dir')) @@ -887,7 +887,7 @@ self.check_file(correctfile, content) rmtree(fixedname.split('/')[0]) - os.remove(TESTFN2) + unlink(TESTFN2) class OtherTests(unittest.TestCase): @@ -1755,7 +1755,7 @@ def tearDown(self): for sep, fn in self.arcfiles.items(): - os.remove(fn) + unlink(fn) unlink(TESTFN) unlink(TESTFN2) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:03:03 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 01:03:03 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_timeout_in_test=2Efork?= =?utf-8?q?=5Fwait?= Message-ID: <3hpLKM6pM4z7LjN@mail.python.org> http://hg.python.org/cpython/rev/797c68e67188 changeset: 92319:797c68e67188 user: Victor Stinner date: Thu Sep 04 01:02:17 2014 +0200 summary: Fix timeout in test.fork_wait files: Lib/test/fork_wait.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/fork_wait.py b/Lib/test/fork_wait.py --- a/Lib/test/fork_wait.py +++ b/Lib/test/fork_wait.py @@ -52,7 +52,7 @@ deadline = time.monotonic() + 10.0 while len(self.alive) < NUM_THREADS: time.sleep(0.1) - if time.monotonic() <= deadline: + if deadline < time.monotonic(): break a = sorted(self.alive.keys()) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:25:03 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 4 Sep 2014 01:25:03 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_this_is_a_standards_track_pep?= Message-ID: <3hpLpl3chQz7Ljk@mail.python.org> http://hg.python.org/peps/rev/6d6de2e0749f changeset: 5542:6d6de2e0749f user: Benjamin Peterson date: Wed Sep 03 19:24:58 2014 -0400 summary: this is a standards track pep files: pep-0477.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0477.txt b/pep-0477.txt --- a/pep-0477.txt +++ b/pep-0477.txt @@ -5,7 +5,7 @@ Author: Donald Stufft Nick Coghlan Status: Active -Type: Process +Type: Standards Track Content-Type: text/x-rst Created: 26-Aug-2014 Post-History: 1-Sep-2014 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Thu Sep 4 01:29:58 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:29:58 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMDUx?= =?utf-8?q?=3A_remove_unneeded_reload_that_allowed_bad_code=2E?= Message-ID: <3hpLwQ5J42z7LkS@mail.python.org> http://hg.python.org/cpython/rev/b76d854f580e changeset: 92320:b76d854f580e branch: 2.7 parent: 92291:241f9aa9fb89 user: Terry Jan Reedy date: Wed Sep 03 19:29:11 2014 -0400 summary: Issue #22051: remove unneeded reload that allowed bad code. files: Demo/turtle/turtleDemo.py | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Demo/turtle/turtleDemo.py b/Demo/turtle/turtleDemo.py --- a/Demo/turtle/turtleDemo.py +++ b/Demo/turtle/turtleDemo.py @@ -210,7 +210,6 @@ direc, fname = os.path.split(filename) self.root.title(fname[6:-3]+" - a Python turtle graphics example") self.module = __import__(fname[:-3]) - reload(self.module) self.configGUI(NORMAL, NORMAL, DISABLED, DISABLED, "Press start button", "red") self.state = READY -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:29:59 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:29:59 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMDUx?= =?utf-8?q?=3A_remove_unneeded_reload_that_allowed_bad_code=2E?= Message-ID: <3hpLwR6yXsz7LlV@mail.python.org> http://hg.python.org/cpython/rev/55d4f6c2be2d changeset: 92321:55d4f6c2be2d branch: 3.4 parent: 92317:6b4d31641109 user: Terry Jan Reedy date: Wed Sep 03 19:29:17 2014 -0400 summary: Issue #22051: remove unneeded reload that allowed bad code. files: Lib/turtledemo/__main__.py | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -91,7 +91,6 @@ from idlelib.Percolator import Percolator from idlelib.ColorDelegator import ColorDelegator from idlelib.textView import view_text -from importlib import reload from turtledemo import __doc__ as about_turtledemo import turtle @@ -322,7 +321,6 @@ self.text.delete("1.0", "end") self.text.insert("1.0", chars) self.root.title(filename + " - a Python turtle graphics example") - reload(self.module) self.configGUI(NORMAL, DISABLED, DISABLED, "Press start button", "red") self.state = READY -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:30:01 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:30:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E4?= Message-ID: <3hpLwT1Z7rz7LlT@mail.python.org> http://hg.python.org/cpython/rev/9569b52e6e21 changeset: 92322:9569b52e6e21 parent: 92319:797c68e67188 parent: 92321:55d4f6c2be2d user: Terry Jan Reedy date: Wed Sep 03 19:29:31 2014 -0400 summary: Merge with 3.4 files: Lib/turtledemo/__main__.py | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -91,7 +91,6 @@ from idlelib.Percolator import Percolator from idlelib.ColorDelegator import ColorDelegator from idlelib.textView import view_text -from importlib import reload from turtledemo import __doc__ as about_turtledemo import turtle @@ -322,7 +321,6 @@ self.text.delete("1.0", "end") self.text.insert("1.0", chars) self.root.title(filename + " - a Python turtle graphics example") - reload(self.module) self.configGUI(NORMAL, DISABLED, DISABLED, "Press start button", "red") self.state = READY -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:43:37 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:43:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_NEWS_items_for?= =?utf-8?q?_turtledemo=2E?= Message-ID: <3hpMD973VYz7LjR@mail.python.org> http://hg.python.org/cpython/rev/78a61a78969a changeset: 92323:78a61a78969a branch: 2.7 parent: 92320:b76d854f580e user: Terry Jan Reedy date: Wed Sep 03 19:39:18 2014 -0400 summary: NEWS items for turtledemo. files: Misc/NEWS | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,18 @@ Library ------- +- Issue #22051: turtledemo no longer reloads examples to re-run them. + Initialization of variables and gui setup should be done in main(), + which is called each time a demo is run, but not on import. + +_ Issue #21597: The separator between the turtledemo text pane and the drawing + canvas can now be grabbed and dragged with a mouse. The code text pane can + be widened to easily view or copy the full width of the text. The canvas + can be widened on small screens. Original patches by Jan Kanis and Lita Cho. + +- Issue #18132: Turtledemo buttons no longer disappear when the window is + shrunk. Original patches by Jan Kanis and Lita Cho. + - Issue #22312: Fix ntpath.splitdrive IndexError. - Issue #22216: smtplib now resets its state more completely after a quit. The -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:43:39 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:43:39 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_NEWS_items_for?= =?utf-8?q?_turtledemo=2E?= Message-ID: <3hpMDC1qjYz7LjR@mail.python.org> http://hg.python.org/cpython/rev/948be7abfc45 changeset: 92324:948be7abfc45 branch: 3.4 parent: 92321:55d4f6c2be2d user: Terry Jan Reedy date: Wed Sep 03 19:39:25 2014 -0400 summary: NEWS items for turtledemo. files: Misc/NEWS | 16 ++++++++++++++++ 1 files changed, 16 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,22 @@ Library ------- +- Issue #22051: turtledemo no longer reloads examples to re-run them. + Initialization of variables and gui setup should be done in main(), + which is called each time a demo is run, but not on import. + +- Issue #21933: Turtledemo users can change the code font size with a menu + selection or control(command) '-' or '+' or control-mousewheel. + Original patch by Lita Cho. + +_ Issue #21597: The separator between the turtledemo text pane and the drawing + canvas can now be grabbed and dragged with a mouse. The code text pane can + be widened to easily view or copy the full width of the text. The canvas + can be widened on small screens. Original patches by Jan Kanis and Lita Cho. + +- Issue #18132: Turtledemo buttons no longer disappear when the window is + shrunk. Original patches by Jan Kanis and Lita Cho. + - Issue #22216: smtplib now resets its state more completely after a quit. The most obvious consequence of the previous behavior was a STARTTLS failure during a connect/starttls/quit/connect/starttls sequence. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:43:40 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:43:40 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_with_3=2E4?= Message-ID: <3hpMDD3lTrz7Lk6@mail.python.org> http://hg.python.org/cpython/rev/f7bc5ecb0a95 changeset: 92325:f7bc5ecb0a95 parent: 92322:9569b52e6e21 parent: 92324:948be7abfc45 user: Terry Jan Reedy date: Wed Sep 03 19:43:12 2014 -0400 summary: merge with 3.4 files: Misc/NEWS | 16 ++++++++++++++++ 1 files changed, 16 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,22 @@ Library ------- +- Issue #22051: turtledemo no longer reloads examples to re-run them. + Initialization of variables and gui setup should be done in main(), + which is called each time a demo is run, but not on import. + +- Issue #21933: Turtledemo users can change the code font size with a menu + selection or control(command) '-' or '+' or control-mousewheel. + Original patch by Lita Cho. + +_ Issue #21597: The separator between the turtledemo text pane and the drawing + canvas can now be grabbed and dragged with a mouse. The code text pane can + be widened to easily view or copy the full width of the text. The canvas + can be widened on small screens. Original patches by Jan Kanis and Lita Cho. + +- Issue #18132: Turtledemo buttons no longer disappear when the window is + shrunk. Original patches by Jan Kanis and Lita Cho. + - Issue #22043: time.monotonic() is now always available. ``threading.Lock.acquire()``, ``threading.RLock.acquire()`` and socket operations now use a monotonic clock, instead of the system clock, when a -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:54:32 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:54:32 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_NEWS_items_for?= =?utf-8?q?_Idle=2E?= Message-ID: <3hpMSm58f9z7Ljk@mail.python.org> http://hg.python.org/cpython/rev/db95df7f309e changeset: 92326:db95df7f309e branch: 2.7 parent: 92323:78a61a78969a user: Terry Jan Reedy date: Wed Sep 03 19:52:39 2014 -0400 summary: NEWS items for Idle. files: Misc/NEWS | 9 +++++++++ 1 files changed, 9 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -121,6 +121,15 @@ - Issue #22199: Make get_makefile_filename() available in Lib/sysconfig.py for 2.7 to match other versions of sysconfig. +IDLE +---- + +- Issue #17390: Adjust Editor window title; remove 'Python', + move version to end. + +- Issue #14105: Idle debugger breakpoints no longer disappear + when inseting or deleting lines. + Extension Modules ----------------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:54:33 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:54:33 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_NEWS_items_for?= =?utf-8?q?_Idle=2E?= Message-ID: <3hpMSn725Gz7Ljk@mail.python.org> http://hg.python.org/cpython/rev/b2e9d845bc6c changeset: 92327:b2e9d845bc6c branch: 3.4 parent: 92324:948be7abfc45 user: Terry Jan Reedy date: Wed Sep 03 19:52:46 2014 -0400 summary: NEWS items for Idle. files: Misc/NEWS | 10 ++++++++-- 1 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -46,7 +46,7 @@ can be widened on small screens. Original patches by Jan Kanis and Lita Cho. - Issue #18132: Turtledemo buttons no longer disappear when the window is - shrunk. Original patches by Jan Kanis and Lita Cho. + shrunk. Original patches by Jan Kanis and Lita Cho. - Issue #22216: smtplib now resets its state more completely after a quit. The most obvious consequence of the previous behavior was a STARTTLS failure @@ -285,7 +285,13 @@ IDLE ---- -- Issue #17172: Add the ability to run turtledemo from Idle. +- Issue #17390: Adjust Editor window title; remove 'Python', + move version to end. + +- Issue #14105: Idle debugger breakpoints no longer disappear + when inseting or deleting lines. + +- Issue #17172: Turtledemo can now be run from Idle. Currently, the entry is on the Help menu, but it may move to Run. Patch by Ramchandra Apt and Lita Cho. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:54:35 2014 From: python-checkins at python.org (terry.reedy) Date: Thu, 4 Sep 2014 01:54:35 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_NEWS_items_for_Idle=2E?= Message-ID: <3hpMSq1mBYz7LkY@mail.python.org> http://hg.python.org/cpython/rev/c69be794a9c6 changeset: 92328:c69be794a9c6 parent: 92325:f7bc5ecb0a95 parent: 92327:b2e9d845bc6c user: Terry Jan Reedy date: Wed Sep 03 19:54:06 2014 -0400 summary: NEWS items for Idle. files: Misc/NEWS | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -802,7 +802,13 @@ IDLE ---- -- Issue #17172: Add the ability to run turtledemo from Idle. +- Issue #17390: Adjust Editor window title; remove 'Python', + move version to end. + +- Issue #14105: Idle debugger breakpoints no longer disappear + when inseting or deleting lines. + +- Issue #17172: Turtledemo can now be run from Idle. Currently, the entry is on the Help menu, but it may move to Run. Patch by Ramchandra Apt and Lita Cho. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 01:55:09 2014 From: python-checkins at python.org (nick.coghlan) Date: Thu, 4 Sep 2014 01:55:09 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_477=3A_With_GvR=27s_appro?= =?utf-8?q?val=2C_Benjamin_is_the_BDFL-Delegate?= Message-ID: <3hpMTT6YZ1z7Ljk@mail.python.org> http://hg.python.org/peps/rev/909954e4ba43 changeset: 5543:909954e4ba43 user: Nick Coghlan date: Thu Sep 04 09:54:58 2014 +1000 summary: PEP 477: With GvR's approval, Benjamin is the BDFL-Delegate files: pep-0477.txt | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/pep-0477.txt b/pep-0477.txt --- a/pep-0477.txt +++ b/pep-0477.txt @@ -4,7 +4,8 @@ Last-Modified: $Date$ Author: Donald Stufft Nick Coghlan -Status: Active +BDFL-Delegate: Benjamin Peterson +Status: Draft Type: Standards Track Content-Type: text/x-rst Created: 26-Aug-2014 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Thu Sep 4 04:22:24 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 4 Sep 2014 04:22:24 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_accept_pep_477?= Message-ID: <3hpQlN3jc5z7Lkc@mail.python.org> http://hg.python.org/peps/rev/6d1b12bd4a3a changeset: 5544:6d1b12bd4a3a user: Benjamin Peterson date: Wed Sep 03 22:22:19 2014 -0400 summary: accept pep 477 files: pep-0477.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0477.txt b/pep-0477.txt --- a/pep-0477.txt +++ b/pep-0477.txt @@ -5,7 +5,7 @@ Author: Donald Stufft Nick Coghlan BDFL-Delegate: Benjamin Peterson -Status: Draft +Status: Accepted Type: Standards Track Content-Type: text/x-rst Created: 26-Aug-2014 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Thu Sep 4 06:20:08 2014 From: python-checkins at python.org (nick.coghlan) Date: Thu, 4 Sep 2014 06:20:08 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_Resolution_link_for_PEP_477_a?= =?utf-8?q?cceptance?= Message-ID: <3hpTMD0Sn8z7LlB@mail.python.org> http://hg.python.org/peps/rev/da85a173e556 changeset: 5545:da85a173e556 user: Nick Coghlan date: Thu Sep 04 14:19:57 2014 +1000 summary: Resolution link for PEP 477 acceptance files: pep-0477.txt | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/pep-0477.txt b/pep-0477.txt --- a/pep-0477.txt +++ b/pep-0477.txt @@ -10,6 +10,7 @@ Content-Type: text/x-rst Created: 26-Aug-2014 Post-History: 1-Sep-2014 +Resolution: https://mail.python.org/pipermail/python-dev/2014-September/136238.html Abstract -- Repository URL: http://hg.python.org/peps From solipsis at pitrou.net Thu Sep 4 09:27:41 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 04 Sep 2014 09:27:41 +0200 Subject: [Python-checkins] Daily reference leaks (c69be794a9c6): sum=151928 Message-ID: results for c69be794a9c6 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [-4, 0, 0] references, sum=-4 test_collections leaked [-2, 0, 0] memory blocks, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogB8sGiT', '-x'] From python-checkins at python.org Thu Sep 4 09:29:49 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 09:29:49 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322258=3A_Fix_typo?= =?utf-8?q?_in_Misc/NEWS?= Message-ID: <3hpYZ519FQz7Lk2@mail.python.org> http://hg.python.org/cpython/rev/9d5386a22e68 changeset: 92329:9d5386a22e68 user: Victor Stinner date: Thu Sep 04 09:29:39 2014 +0200 summary: Issue #22258: Fix typo in Misc/NEWS files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,7 +10,7 @@ Core and Builtins ----------------- -- Issue #22258: Fix the the internal function set_inheritable() on Illumos. +- Issue #22258: Fix the internal function set_inheritable() on Illumos. This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() now falls back to the slower ``fcntl()`` (``F_GETFD`` and then ``F_SETFD``). -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 09:38:55 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 09:38:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322333=3A_Add_debu?= =?utf-8?q?g_traces_to_test=5Fthreaded=5Fimport?= Message-ID: <3hpYmb52Vmz7Lk2@mail.python.org> http://hg.python.org/cpython/rev/f8d2834e1269 changeset: 92330:f8d2834e1269 user: Victor Stinner date: Thu Sep 04 09:38:38 2014 +0200 summary: Issue #22333: Add debug traces to test_threaded_import files: Lib/test/test_threaded_import.py | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_threaded_import.py b/Lib/test/test_threaded_import.py --- a/Lib/test/test_threaded_import.py +++ b/Lib/test/test_threaded_import.py @@ -118,8 +118,10 @@ t = threading.Thread(target=task, args=(N, done, done_tasks, errors,)) t.start() - self.assertTrue(done.wait(60)) - self.assertFalse(errors) + completed = done.wait(60) + dbg_info = 'done: %s/%s' % (len(done_tasks), N) + self.assertFalse(errors, dbg_info) + self.assertTrue(completed, dbg_info) if verbose: print("OK.") -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 09:53:32 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 09:53:32 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322334=3A_Add_debu?= =?utf-8?q?g_traces_to_test=5Ftcl?= Message-ID: <3hpZ5S1zS8z7Lk9@mail.python.org> http://hg.python.org/cpython/rev/e6cfbc3e2ed9 changeset: 92331:e6cfbc3e2ed9 user: Victor Stinner date: Thu Sep 04 09:53:16 2014 +0200 summary: Issue #22334: Add debug traces to test_tcl files: Lib/test/test_tcl.py | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -499,8 +499,9 @@ (1, '2', (3.4,)) if self.wantobjects else ('1', '2', '3.4')), ] + tk_patchlevel = get_tk_patchlevel() if tcl_version >= (8, 5): - if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5): + if not self.wantobjects or tk_patchlevel < (8, 5, 5): # Before 8.5.5 dicts were converted to lists through string expected = ('12', '\u20ac', '\xe2\x82\xac', '3.4') else: @@ -509,8 +510,11 @@ (call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)), expected), ] + dbg_info = ('want objects? %s, Tcl version: %s, Tk patchlevel: %s' + % (self.wantobjects, tcl_version, tk_patchlevel)) for arg, res in testcases: - self.assertEqual(splitlist(arg), res, msg=arg) + self.assertEqual(splitlist(arg), res, + 'arg=%a, %s' % (arg, dbg_info)) self.assertRaises(TclError, splitlist, '{') def test_split(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 10:42:49 2014 From: python-checkins at python.org (lukasz.langa) Date: Thu, 4 Sep 2014 10:42:49 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_=2319546=3A_onfigparse?= =?utf-8?q?r_exceptions_expose_implementation_details=2E__Patch_by?= Message-ID: <3hpbBK23jbz7Ljn@mail.python.org> http://hg.python.org/cpython/rev/2b14665b7bce changeset: 92332:2b14665b7bce parent: 92328:c69be794a9c6 user: ?ukasz Langa date: Thu Sep 04 01:36:33 2014 -0700 summary: Fix #19546: onfigparser exceptions expose implementation details. Patch by Claudiu Popa. files: Lib/configparser.py | 12 ++-- Lib/test/test_configparser.py | 52 +++++++++++++++++++++++ Misc/NEWS | 4 + 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/Lib/configparser.py b/Lib/configparser.py --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -410,7 +410,7 @@ v = map[var] except KeyError: raise InterpolationMissingOptionError( - option, section, rest, var) + option, section, rest, var) from None if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) @@ -482,7 +482,7 @@ "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( - option, section, rest, ":".join(path)) + option, section, rest, ":".join(path)) from None if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), @@ -515,7 +515,7 @@ value = value % vars except KeyError as e: raise InterpolationMissingOptionError( - option, section, rawval, e.args[0]) + option, section, rawval, e.args[0]) from None else: break if value and "%(" in value: @@ -647,7 +647,7 @@ try: opts = self._sections[section].copy() except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None opts.update(self._defaults) return list(opts.keys()) @@ -876,7 +876,7 @@ try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): @@ -917,7 +917,7 @@ try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None option = self.optionxform(option) existed = option in sectdict if existed: diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py --- a/Lib/test/test_configparser.py +++ b/Lib/test/test_configparser.py @@ -1763,6 +1763,58 @@ self.assertEqual(s['k2'], 'v2') self.assertEqual(s['k3'], 'v3;#//still v3# and still v3') +class ExceptionContextTestCase(unittest.TestCase): + """ Test that implementation details doesn't leak + through raising exceptions. """ + + def test_get_basic_interpolation(self): + parser = configparser.ConfigParser() + parser.read_string(""" + [Paths] + home_dir: /Users + my_dir: %(home_dir1)s/lumberjack + my_pictures: %(my_dir)s/Pictures + """) + cm = self.assertRaises(configparser.InterpolationMissingOptionError) + with cm: + parser.get('Paths', 'my_dir') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_get_extended_interpolation(self): + parser = configparser.ConfigParser( + interpolation=configparser.ExtendedInterpolation()) + parser.read_string(""" + [Paths] + home_dir: /Users + my_dir: ${home_dir1}/lumberjack + my_pictures: ${my_dir}/Pictures + """) + cm = self.assertRaises(configparser.InterpolationMissingOptionError) + with cm: + parser.get('Paths', 'my_dir') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_missing_options(self): + parser = configparser.ConfigParser() + parser.read_string(""" + [Paths] + home_dir: /Users + """) + with self.assertRaises(configparser.NoSectionError) as cm: + parser.options('test') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_missing_section(self): + config = configparser.ConfigParser() + with self.assertRaises(configparser.NoSectionError) as cm: + config.set('Section1', 'an_int', '15') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_remove_option(self): + config = configparser.ConfigParser() + with self.assertRaises(configparser.NoSectionError) as cm: + config.remove_option('Section1', 'an_int') + self.assertIs(cm.exception.__suppress_context__, True) if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,10 @@ Library ------- +- Issue #19546: configparser exceptions no longer expose implementation details. + Chained KeyErrors are removed, which leads to cleaner tracebacks. Patch by + Claudiu Popa. + - Issue #22051: turtledemo no longer reloads examples to re-run them. Initialization of variables and gui setup should be done in main(), which is called each time a demo is run, but not on import. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 10:42:50 2014 From: python-checkins at python.org (lukasz.langa) Date: Thu, 4 Sep 2014 10:42:50 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?q?=29=3A_Merge_fix_for_=2319546=3A_configparser_exceptions_leak_i?= =?utf-8?q?mplementation_details?= Message-ID: <3hpbBL4sJgz7LkM@mail.python.org> http://hg.python.org/cpython/rev/554ead559f24 changeset: 92333:554ead559f24 parent: 92331:e6cfbc3e2ed9 parent: 92332:2b14665b7bce user: ?ukasz Langa date: Thu Sep 04 01:42:29 2014 -0700 summary: Merge fix for #19546: configparser exceptions leak implementation details files: Lib/configparser.py | 12 ++-- Lib/test/test_configparser.py | 52 +++++++++++++++++++++++ Misc/NEWS | 4 + 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/Lib/configparser.py b/Lib/configparser.py --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -410,7 +410,7 @@ v = map[var] except KeyError: raise InterpolationMissingOptionError( - option, section, rest, var) + option, section, rest, var) from None if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) @@ -482,7 +482,7 @@ "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( - option, section, rest, ":".join(path)) + option, section, rest, ":".join(path)) from None if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), @@ -515,7 +515,7 @@ value = value % vars except KeyError as e: raise InterpolationMissingOptionError( - option, section, rawval, e.args[0]) + option, section, rawval, e.args[0]) from None else: break if value and "%(" in value: @@ -647,7 +647,7 @@ try: opts = self._sections[section].copy() except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None opts.update(self._defaults) return list(opts.keys()) @@ -876,7 +876,7 @@ try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): @@ -917,7 +917,7 @@ try: sectdict = self._sections[section] except KeyError: - raise NoSectionError(section) + raise NoSectionError(section) from None option = self.optionxform(option) existed = option in sectdict if existed: diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py --- a/Lib/test/test_configparser.py +++ b/Lib/test/test_configparser.py @@ -1763,6 +1763,58 @@ self.assertEqual(s['k2'], 'v2') self.assertEqual(s['k3'], 'v3;#//still v3# and still v3') +class ExceptionContextTestCase(unittest.TestCase): + """ Test that implementation details doesn't leak + through raising exceptions. """ + + def test_get_basic_interpolation(self): + parser = configparser.ConfigParser() + parser.read_string(""" + [Paths] + home_dir: /Users + my_dir: %(home_dir1)s/lumberjack + my_pictures: %(my_dir)s/Pictures + """) + cm = self.assertRaises(configparser.InterpolationMissingOptionError) + with cm: + parser.get('Paths', 'my_dir') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_get_extended_interpolation(self): + parser = configparser.ConfigParser( + interpolation=configparser.ExtendedInterpolation()) + parser.read_string(""" + [Paths] + home_dir: /Users + my_dir: ${home_dir1}/lumberjack + my_pictures: ${my_dir}/Pictures + """) + cm = self.assertRaises(configparser.InterpolationMissingOptionError) + with cm: + parser.get('Paths', 'my_dir') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_missing_options(self): + parser = configparser.ConfigParser() + parser.read_string(""" + [Paths] + home_dir: /Users + """) + with self.assertRaises(configparser.NoSectionError) as cm: + parser.options('test') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_missing_section(self): + config = configparser.ConfigParser() + with self.assertRaises(configparser.NoSectionError) as cm: + config.set('Section1', 'an_int', '15') + self.assertIs(cm.exception.__suppress_context__, True) + + def test_remove_option(self): + config = configparser.ConfigParser() + with self.assertRaises(configparser.NoSectionError) as cm: + config.remove_option('Section1', 'an_int') + self.assertIs(cm.exception.__suppress_context__, True) if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,10 @@ Library ------- +- Issue #19546: configparser exceptions no longer expose implementation details. + Chained KeyErrors are removed, which leads to cleaner tracebacks. Patch by + Claudiu Popa. + - Issue #22051: turtledemo no longer reloads examples to re-run them. Initialization of variables and gui setup should be done in main(), which is called each time a demo is run, but not on import. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 17:30:34 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 4 Sep 2014 17:30:34 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2321951=3A_Fix_AsOb?= =?utf-8?q?j=28=29_of_the_=5Ftkinter_module=3A_raise_MemoryError_on_memory?= Message-ID: <3hpmDp1Mcsz7LjS@mail.python.org> http://hg.python.org/cpython/rev/9ab404cdcaa1 changeset: 92334:9ab404cdcaa1 user: Victor Stinner date: Thu Sep 04 17:29:52 2014 +0200 summary: Issue #21951: Fix AsObj() of the _tkinter module: raise MemoryError on memory allocation failure files: Modules/_tkinter.c | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -913,8 +913,10 @@ return NULL; } argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *)); - if(!argv) - return 0; + if(!argv) { + PyErr_NoMemory(); + return NULL; + } for (i = 0; i < size; i++) argv[i] = AsObj(PySequence_Fast_GET_ITEM(value,i)); result = Tcl_NewListObj(size, argv); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 17:43:12 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 4 Sep 2014 17:43:12 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_remove_script_?= =?utf-8?q?that_now_lives_at_https=3A//github=2Ecom/python/docsbuild-scrip?= =?utf-8?q?ts?= Message-ID: <3hpmWN2C1Nz7LjR@mail.python.org> http://hg.python.org/cpython/rev/77687e7e6d9d changeset: 92335:77687e7e6d9d branch: 2.7 parent: 92326:db95df7f309e user: Benjamin Peterson date: Mon Sep 01 23:00:11 2014 -0400 summary: remove script that now lives at https://github.com/python/docsbuild-scripts files: Doc/tools/dailybuild.py | 135 ---------------------------- 1 files changed, 0 insertions(+), 135 deletions(-) diff --git a/Doc/tools/dailybuild.py b/Doc/tools/dailybuild.py deleted file mode 100755 --- a/Doc/tools/dailybuild.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Runs the daily build of the Python docs on dinsdale.python.org. -# -# Usages: -# -# dailybuild.py [-q] -# -# without any arguments builds docs for all branches configured in the global -# BRANCHES value. -q selects "quick build", which means to build only HTML. -# -# dailybuild.py [-q] [-d] -# -# builds one version, where is an SVN checkout directory of the -# Python branch to build docs for, and is the directory where the -# result should be placed. If -d is given, the docs are built even if the -# branch is in development mode (i.e. version contains a, b or c). -# -# This script is not run from the checkout, so if you want to change how the -# daily build is run, you must replace it on dinsdale. This is necessary, for -# example, after the release of a new minor version. -# -# 03/2010, Georg Brandl - -import os -import sys -import getopt - - -BUILDROOT = '/home/gbrandl/docbuild' -SPHINXBUILD = os.path.join(BUILDROOT, 'sphinx-env/bin/sphinx-build') -WWWROOT = '/data/ftp.python.org/pub/docs.python.org' - -BRANCHES = [ - # checkout, target, isdev - (BUILDROOT + '/python34', WWWROOT + '/3.4', False), - (BUILDROOT + '/python35', WWWROOT + '/3.5', True), - (BUILDROOT + '/python27', WWWROOT + '/2.7', False), -] - - -def _files_changed(old, new): - with open(old, 'rb') as fp1, open(new, 'rb') as fp2: - st1 = os.fstat(fp1.fileno()) - st2 = os.fstat(fp2.fileno()) - if st1.st_size != st2.st_size: - return False - if st1.st_mtime >= st2.st_mtime: - return True - while True: - one = fp1.read(4096) - two = fp2.read(4096) - if one != two: - return False - if one == '': - break - return True - -def build_one(checkout, target, isdev, quick): - print 'Doc autobuild started in %s' % checkout - os.chdir(checkout) - print 'Running hg pull --update' - os.system('hg pull --update') - print 'Running make autobuild' - maketarget = 'autobuild-' + ('html' if quick else - ('dev' if isdev else 'stable')) - if os.WEXITSTATUS(os.system('cd Doc; make SPHINXBUILD=%s %s' % (SPHINXBUILD, maketarget))) == 2: - print '*' * 80 - return - print('Computing changed files') - changed = [] - for dirpath, dirnames, filenames in os.walk('Doc/build/html/'): - dir_rel = dirpath[len('Doc/build/html/'):] - for fn in filenames: - local_path = os.path.join(dirpath, fn) - rel_path = os.path.join(dir_rel, fn) - target_path = os.path.join(target, rel_path) - if (os.path.exists(target_path) and - not _files_changed(target_path, local_path)): - changed.append(rel_path) - print 'Copying HTML files to %s' % target - os.system('cp -a Doc/build/html/* %s' % target) - if not quick: - print 'Copying dist files' - os.system('mkdir -p %s/archives' % target) - os.system('cp -a Doc/dist/* %s/archives' % target) - changed.append('archives/') - for fn in os.listdir(os.path.join(target, 'archives')): - changed.append('archives/' + fn) - print '%s files changed' % len(changed) - if changed: - target_ino = os.stat(target).st_ino - targets_dir = os.path.dirname(target) - prefixes = [] - for fn in os.listdir(targets_dir): - if os.stat(os.path.join(targets_dir, fn)).st_ino == target_ino: - prefixes.append(fn) - to_purge = [] - for prefix in prefixes: - to_purge.extend(prefix + "/" + p for p in changed) - purge_cmd = 'curl -X PURGE "https://docs.python.org/{%s}"' % ','.join(to_purge) - print("Running CDN purge") - os.system(purge_cmd) - print 'Finished' - print '=' * 80 - -def usage(): - print 'Usage:' - print ' %s' % sys.argv[0] - print 'or' - print ' %s [-d] ' % sys.argv[0] - sys.exit(1) - - -if __name__ == '__main__': - try: - opts, args = getopt.getopt(sys.argv[1:], 'dq') - except getopt.error: - usage() - quick = devel = False - for opt, _ in opts: - if opt == '-q': - quick = True - if opt == '-d': - devel = True - if devel and not args: - usage() - if args: - if len(args) != 2: - usage() - build_one(os.path.abspath(args[0]), os.path.abspath(args[1]), devel, quick) - else: - for checkout, dest, devel in BRANCHES: - build_one(checkout, dest, devel, quick) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 17:50:20 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 4 Sep 2014 17:50:20 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_the_correct_ro?= =?utf-8?q?le_is_=3Ameth=3A_not_=3Amethod=3A?= Message-ID: <3hpmgc0WSRz7Ljf@mail.python.org> http://hg.python.org/cpython/rev/f17ab9fed3b0 changeset: 92336:f17ab9fed3b0 branch: 2.7 user: Benjamin Peterson date: Thu Sep 04 11:50:14 2014 -0400 summary: the correct role is :meth: not :method: files: Doc/library/argparse.rst | 14 ++++++-------- 1 files changed, 6 insertions(+), 8 deletions(-) diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1213,19 +1213,17 @@ ^^^^^^^^^^^^^^ Action classes implement the Action API, a callable which returns a callable -which processes arguments from the command-line. Any object which follows -this API may be passed as the ``action`` parameter to -:method:`add_argument`. +which processes arguments from the command-line. Any object which follows this +API may be passed as the ``action`` parameter to :meth:`add_argument`. .. class:: Action(option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None) -Action objects are used by an ArgumentParser to represent the information -needed to parse a single argument from one or more strings from the -command line. The Action class must accept the two positional arguments -plus any keyword arguments passed to :method:`ArgumentParser.add_argument` -except for the ``action`` itself. +Action objects are used by an ArgumentParser to represent the information needed +to parse a single argument from one or more strings from the command line. The +Action class must accept the two positional arguments plus any keyword arguments +passed to :meth:`ArgumentParser.add_argument` except for the ``action`` itself. Instances of Action (or return value of any callable to the ``action`` parameter) should have attributes "dest", "option_strings", "default", "type", -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 21:00:19 2014 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 4 Sep 2014 21:00:19 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2320421=3A_Add_a_?= =?utf-8?q?=2Eversion=28=29_method_to_SSL_sockets_exposing_the_actual?= Message-ID: <3hprtq1hdhz7LlY@mail.python.org> http://hg.python.org/cpython/rev/648685f8d5e9 changeset: 92337:648685f8d5e9 parent: 92334:9ab404cdcaa1 user: Antoine Pitrou date: Thu Sep 04 21:00:10 2014 +0200 summary: Issue #20421: Add a .version() method to SSL sockets exposing the actual protocol version in use. files: Doc/library/ssl.rst | 18 ++++++- Lib/ssl.py | 9 ++++ Lib/test/test_ssl.py | 67 ++++++++++++++++++++++--------- Misc/NEWS | 3 + Modules/_ssl.c | 13 ++++++ 5 files changed, 86 insertions(+), 24 deletions(-) diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -910,10 +910,10 @@ .. method:: SSLSocket.selected_npn_protocol() - Returns the protocol that was selected during the TLS/SSL handshake. If - :meth:`SSLContext.set_npn_protocols` was not called, or if the other party - does not support NPN, or if the handshake has not yet happened, this will - return ``None``. + Returns the higher-level protocol that was selected during the TLS/SSL + handshake. If :meth:`SSLContext.set_npn_protocols` was not called, or + if the other party does not support NPN, or if the handshake has not yet + happened, this will return ``None``. .. versionadded:: 3.3 @@ -925,6 +925,16 @@ returned socket should always be used for further communication with the other side of the connection, rather than the original socket. +.. method:: SSLSocket.version() + + Return the actual SSL protocol version negotiated by the connection + as a string, or ``None`` is no secure connection is established. + As of this writing, possible return values include ``"SSLv2"``, + ``"SSLv3"``, ``"TLSv1"``, ``"TLSv1.1"`` and ``"TLSv1.2"``. + Recent OpenSSL versions may define more return values. + + .. versionadded:: 3.5 + .. attribute:: SSLSocket.context The :class:`SSLContext` object this SSL socket is tied to. If the SSL diff --git a/Lib/ssl.py b/Lib/ssl.py --- a/Lib/ssl.py +++ b/Lib/ssl.py @@ -861,6 +861,15 @@ return None return self._sslobj.tls_unique_cb() + def version(self): + """ + Return a string identifying the protocol version used by the + current SSL channel, or None if there is no established channel. + """ + if self._sslobj is None: + return None + return self._sslobj.version() + def wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -1942,7 +1942,8 @@ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), - 'client_npn_protocol': s.selected_npn_protocol() + 'client_npn_protocol': s.selected_npn_protocol(), + 'version': s.version(), }) s.close() stats['server_npn_protocols'] = server.selected_protocols @@ -1950,6 +1951,13 @@ def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): + """ + Try to SSL-connect using *client_protocol* to *server_protocol*. + If *expect_success* is true, assert that the connection succeeds, + if it's false, assert that the connection fails. + Also, if *expect_success* is a string, assert that it is the protocol + version actually used by the connection. + """ if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { @@ -1979,8 +1987,8 @@ ctx.load_cert_chain(CERTFILE) ctx.load_verify_locations(CERTFILE) try: - server_params_test(client_context, server_context, - chatty=False, connectionchatty=False) + stats = server_params_test(client_context, server_context, + chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: @@ -1995,6 +2003,10 @@ "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) + elif (expect_success is not True + and expect_success != stats['version']): + raise AssertionError("version mismatch: expected %r, got %r" + % (expect_success, stats['version'])) class ThreadedTests(unittest.TestCase): @@ -2225,17 +2237,17 @@ sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3') try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1') - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) # Server with specific SSL options try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, @@ -2252,9 +2264,9 @@ """Connecting to an SSLv3 server with various client options""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3') + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False, @@ -2262,7 +2274,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True, + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3', client_options=ssl.OP_NO_SSLv2) @skip_if_broken_ubuntu_ssl @@ -2270,9 +2282,9 @@ """Connecting to a TLSv1 server with various client options""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1') + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) @@ -2287,14 +2299,14 @@ Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True) + try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_1) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False) @@ -2307,7 +2319,7 @@ Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True, + try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2', server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2, client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,) if hasattr(ssl, 'PROTOCOL_SSLv2'): @@ -2316,7 +2328,7 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_2) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2') try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) @@ -2697,6 +2709,21 @@ s.connect((HOST, server.port)) self.assertIn("no shared cipher", str(server.conn_errors[0])) + def test_version_basic(self): + """ + Basic tests for SSLSocket.version(). + More tests are done in the test_protocol_*() methods. + """ + context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) + with ThreadedEchoServer(CERTFILE, + ssl_version=ssl.PROTOCOL_TLSv1, + chatty=False) as server: + with context.wrap_socket(socket.socket()) as s: + self.assertIs(s.version(), None) + s.connect((HOST, server.port)) + self.assertEqual(s.version(), "TLSv1") + self.assertIs(s.version(), None) + @unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL") def test_default_ecdh_curve(self): # Issue #21015: elliptic curve-based Diffie Hellman key exchange diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -129,6 +129,9 @@ Library ------- +- Issue #20421: Add a .version() method to SSL sockets exposing the actual + protocol version in use. + - Issue #19546: configparser exceptions no longer expose implementation details. Chained KeyErrors are removed, which leads to cleaner tracebacks. Patch by Claudiu Popa. diff --git a/Modules/_ssl.c b/Modules/_ssl.c --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -1402,6 +1402,18 @@ return NULL; } +static PyObject *PySSL_version(PySSLSocket *self) +{ + const char *version; + + if (self->ssl == NULL) + Py_RETURN_NONE; + version = SSL_get_version(self->ssl); + if (!strcmp(version, "unknown")) + Py_RETURN_NONE; + return PyUnicode_FromString(version); +} + #ifdef OPENSSL_NPN_NEGOTIATED static PyObject *PySSL_selected_npn_protocol(PySSLSocket *self) { const unsigned char *out; @@ -1939,6 +1951,7 @@ {"peer_certificate", (PyCFunction)PySSL_peercert, METH_VARARGS, PySSL_peercert_doc}, {"cipher", (PyCFunction)PySSL_cipher, METH_NOARGS}, + {"version", (PyCFunction)PySSL_version, METH_NOARGS}, #ifdef OPENSSL_NPN_NEGOTIATED {"selected_npn_protocol", (PyCFunction)PySSL_selected_npn_protocol, METH_NOARGS}, #endif -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 22:33:37 2014 From: python-checkins at python.org (alex.gaynor) Date: Thu, 4 Sep 2014 22:33:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIwNDIx?= =?utf-8?q?=3A_Add_a_=2Eversion=28=29_method_to_SSL_sockets_exposing_the_a?= =?utf-8?q?ctual?= Message-ID: <3hptyT0fJ5z7Ljp@mail.python.org> http://hg.python.org/cpython/rev/16c86a6bdbe2 changeset: 92338:16c86a6bdbe2 branch: 2.7 parent: 92336:f17ab9fed3b0 user: Alex Gaynor date: Thu Sep 04 13:33:22 2014 -0700 summary: Issue #20421: Add a .version() method to SSL sockets exposing the actual protocol version in use. Backport from default. files: Doc/library/ssl.rst | 18 ++++++- Lib/ssl.py | 9 ++++ Lib/test/test_ssl.py | 67 ++++++++++++++++++++++--------- Misc/NEWS | 1 + Modules/_ssl.c | 13 ++++++ 5 files changed, 84 insertions(+), 24 deletions(-) diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -867,10 +867,10 @@ .. method:: SSLSocket.selected_npn_protocol() - Returns the protocol that was selected during the TLS/SSL handshake. If - :meth:`SSLContext.set_npn_protocols` was not called, or if the other party - does not support NPN, or if the handshake has not yet happened, this will - return ``None``. + Returns the higher-level protocol that was selected during the TLS/SSL + handshake. If :meth:`SSLContext.set_npn_protocols` was not called, or + if the other party does not support NPN, or if the handshake has not yet + happened, this will return ``None``. .. versionadded:: 2.7.9 @@ -882,6 +882,16 @@ returned socket should always be used for further communication with the other side of the connection, rather than the original socket. +.. method:: SSLSocket.version() + + Return the actual SSL protocol version negotiated by the connection + as a string, or ``None`` is no secure connection is established. + As of this writing, possible return values include ``"SSLv2"``, + ``"SSLv3"``, ``"TLSv1"``, ``"TLSv1.1"`` and ``"TLSv1.2"``. + Recent OpenSSL versions may define more return values. + + .. versionadded:: 3.5 + .. attribute:: SSLSocket.context The :class:`SSLContext` object this SSL socket is tied to. If the SSL diff --git a/Lib/ssl.py b/Lib/ssl.py --- a/Lib/ssl.py +++ b/Lib/ssl.py @@ -862,6 +862,15 @@ return None return self._sslobj.tls_unique_cb() + def version(self): + """ + Return a string identifying the protocol version used by the + current SSL channel, or None if there is no established channel. + """ + if self._sslobj is None: + return None + return self._sslobj.version() + def wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -1904,7 +1904,8 @@ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), - 'client_npn_protocol': s.selected_npn_protocol() + 'client_npn_protocol': s.selected_npn_protocol(), + 'version': s.version(), }) s.close() stats['server_npn_protocols'] = server.selected_protocols @@ -1912,6 +1913,13 @@ def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): + """ + Try to SSL-connect using *client_protocol* to *server_protocol*. + If *expect_success* is true, assert that the connection succeeds, + if it's false, assert that the connection fails. + Also, if *expect_success* is a string, assert that it is the protocol + version actually used by the connection. + """ if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { @@ -1941,8 +1949,8 @@ ctx.load_cert_chain(CERTFILE) ctx.load_verify_locations(CERTFILE) try: - server_params_test(client_context, server_context, - chatty=False, connectionchatty=False) + stats = server_params_test(client_context, server_context, + chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: @@ -1957,6 +1965,10 @@ "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) + elif (expect_success is not True + and expect_success != stats['version']): + raise AssertionError("version mismatch: expected %r, got %r" + % (expect_success, stats['version'])) class ThreadedTests(unittest.TestCase): @@ -2186,17 +2198,17 @@ sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3') try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1') - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) # Server with specific SSL options try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, @@ -2213,9 +2225,9 @@ """Connecting to an SSLv3 server with various client options""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3') + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False, @@ -2223,7 +2235,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs - try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True, + try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3', client_options=ssl.OP_NO_SSLv2) @skip_if_broken_ubuntu_ssl @@ -2231,9 +2243,9 @@ """Connecting to a TLSv1 server with various client options""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) - try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1') + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) + try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) @@ -2248,14 +2260,14 @@ Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True) + try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_1) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False) @@ -2268,7 +2280,7 @@ Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") - try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True, + try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2', server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2, client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,) if hasattr(ssl, 'PROTOCOL_SSLv2'): @@ -2277,7 +2289,7 @@ try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_2) - try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True) + try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2') try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) @@ -2619,6 +2631,21 @@ s.connect((HOST, server.port)) self.assertIn("no shared cipher", str(server.conn_errors[0])) + def test_version_basic(self): + """ + Basic tests for SSLSocket.version(). + More tests are done in the test_protocol_*() methods. + """ + context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) + with ThreadedEchoServer(CERTFILE, + ssl_version=ssl.PROTOCOL_TLSv1, + chatty=False) as server: + with closing(context.wrap_socket(socket.socket())) as s: + self.assertIs(s.version(), None) + s.connect((HOST, server.port)) + self.assertEqual(s.version(), "TLSv1") + self.assertIs(s.version(), None) + @unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL") def test_default_ecdh_curve(self): # Issue #21015: elliptic curve-based Diffie Hellman key exchange diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -171,6 +171,7 @@ ------- - Issue #22160: The bundled version of OpenSSL has been updated to 1.0.1i. +version in use. What's New in Python 2.7.8? diff --git a/Modules/_ssl.c b/Modules/_ssl.c --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -1384,6 +1384,18 @@ return NULL; } +static PyObject *PySSL_version(PySSLSocket *self) +{ + const char *version; + + if (self->ssl == NULL) + Py_RETURN_NONE; + version = SSL_get_version(self->ssl); + if (!strcmp(version, "unknown")) + Py_RETURN_NONE; + return PyUnicode_FromString(version); +} + #ifdef OPENSSL_NPN_NEGOTIATED static PyObject *PySSL_selected_npn_protocol(PySSLSocket *self) { const unsigned char *out; @@ -1907,6 +1919,7 @@ {"peer_certificate", (PyCFunction)PySSL_peercert, METH_VARARGS, PySSL_peercert_doc}, {"cipher", (PyCFunction)PySSL_cipher, METH_NOARGS}, + {"version", (PyCFunction)PySSL_version, METH_NOARGS}, #ifdef OPENSSL_NPN_NEGOTIATED {"selected_npn_protocol", (PyCFunction)PySSL_selected_npn_protocol, METH_NOARGS}, #endif -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 4 22:37:18 2014 From: python-checkins at python.org (alex.gaynor) Date: Thu, 4 Sep 2014 22:37:18 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Fixed_the_vers?= =?utf-8?q?ionadded_in_the_docs_for_the_backport_in_16c86a6bdbe2?= Message-ID: <3hpv2k2D7Pz7Ljf@mail.python.org> http://hg.python.org/cpython/rev/390e910b2f96 changeset: 92339:390e910b2f96 branch: 2.7 user: Alex Gaynor date: Thu Sep 04 13:37:07 2014 -0700 summary: Fixed the versionadded in the docs for the backport in 16c86a6bdbe2 files: Doc/library/ssl.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -890,7 +890,7 @@ ``"SSLv3"``, ``"TLSv1"``, ``"TLSv1.1"`` and ``"TLSv1.2"``. Recent OpenSSL versions may define more return values. - .. versionadded:: 3.5 + .. versionadded:: 2.7.9 .. attribute:: SSLSocket.context -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 01:16:44 2014 From: python-checkins at python.org (victor.stinner) Date: Fri, 5 Sep 2014 01:16:44 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322290=3A_PyObject?= =?utf-8?q?=5FCall=28=29_now_fails_with_an_assertion_error_when_called?= Message-ID: <3hpyZh1tkjz7LjT@mail.python.org> http://hg.python.org/cpython/rev/16e3d240456f changeset: 92340:16e3d240456f parent: 92337:648685f8d5e9 user: Victor Stinner date: Fri Sep 05 01:10:29 2014 +0200 summary: Issue #22290: PyObject_Call() now fails with an assertion error when called with an exception set. This new assertion helps to understand if the exception was already set before calling the function or raised by the function. files: Objects/abstract.c | 5 +++++ 1 files changed, 5 insertions(+), 0 deletions(-) diff --git a/Objects/abstract.c b/Objects/abstract.c --- a/Objects/abstract.c +++ b/Objects/abstract.c @@ -2074,6 +2074,11 @@ { ternaryfunc call; + /* PyObject_Call() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller looses its exception */ + assert(!PyErr_Occurred()); + if ((call = func->ob_type->tp_call) != NULL) { PyObject *result; if (Py_EnterRecursiveCall(" while calling a Python object")) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 03:01:15 2014 From: python-checkins at python.org (terry.reedy) Date: Fri, 5 Sep 2014 03:01:15 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxOTMz?= =?utf-8?q?=3A_Make_Control-Mousewhell_only_change_font_size_and_not_also_?= =?utf-8?q?scroll=2E?= Message-ID: <3hq0vH5YKwz7LjT@mail.python.org> http://hg.python.org/cpython/rev/ecc98ea50bc3 changeset: 92341:ecc98ea50bc3 branch: 3.4 parent: 92327:b2e9d845bc6c user: Terry Jan Reedy date: Thu Sep 04 20:59:41 2014 -0400 summary: Issue #21933: Make Control-Mousewhell only change font size and not also scroll. Original patch by Serhiy Storchaka. files: Lib/turtledemo/__main__.py | 40 +++++++++++++------------ 1 files changed, 21 insertions(+), 19 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -19,8 +19,9 @@ The demo viewer windows can be resized. The divider between text and canvas can be moved by grabbing it with the mouse. The text font - size can be changed from the menu and with Control/Command- '-'/'+'. - It can also be changed on most systems with Control-mousewheel. + size can be changed from the menu and with Control/Command '-'/'+'. + It can also be changed on most systems with Control-mousewheel + when the mouse is over the text. Press START button to start the demo. Stop execution by pressing the STOP button. @@ -210,10 +211,19 @@ self.hbar = hbar = Scrollbar(text_frame, name='hbar', orient=HORIZONTAL) hbar['command'] = text.xview hbar.pack(side=BOTTOM, fill=X) + text['yscrollcommand'] = vbar.set + text['xscrollcommand'] = hbar.set text['font'] = tuple(txtfont) - text['yscrollcommand'] = vbar.set - text['xscrollcommand'] = hbar.set + shortcut = 'Command' if darwin else 'Control' + text.bind_all('<%s-minus>' % shortcut, self.decrease_size) + text.bind_all('<%s-underscore>' % shortcut, self.decrease_size) + text.bind_all('<%s-equal>' % shortcut, self.increase_size) + text.bind_all('<%s-plus>' % shortcut, self.increase_size) + text.bind('', self.update_mousewheel) + text.bind('', self.increase_size) + text.bind('', self.decrease_size) + text.pack(side=LEFT, fill=BOTH, expand=1) return text_frame @@ -224,7 +234,7 @@ turtle._Screen._canvas = self._canvas = canvas = turtle.ScrolledCanvas( root, 800, 600, self.canvwidth, self.canvheight) canvas.adjustScrolls() - self.makeBindings(canvas._rootwindow) + canvas._rootwindow.bind('', self.onResize) canvas._canvas['borderwidth'] = 0 self.screen = _s_ = turtle.Screen() @@ -233,18 +243,6 @@ turtle.RawTurtle.screens = [_s_] return canvas - def makeBindings(self, widget): - widget.bind('', self.onResize) - - shortcut = 'Command' if darwin else 'Control' - widget.bind_all('<%s-minus>' % shortcut, self.decrease_size) - widget.bind_all('<%s-underscore>' % shortcut, self.decrease_size) - widget.bind_all('<%s-equal>' % shortcut, self.increase_size) - widget.bind_all('<%s-plus>' % shortcut, self.increase_size) - widget.bind_all('', self.update_mousewheel) - widget.bind('', self.increase_size) - widget.bind('', self.decrease_size) - def set_txtsize(self, size): txtfont[1] = size self.text['font'] = tuple(txtfont) @@ -252,15 +250,19 @@ def decrease_size(self, dummy=None): self.set_txtsize(max(txtfont[1] - 1, MINIMUM_FONT_SIZE)) + return 'break' def increase_size(self, dummy=None): self.set_txtsize(min(txtfont[1] + 1, MAXIMUM_FONT_SIZE)) + return 'break' def update_mousewheel(self, event): # For wheel up, event.delte = 120 on Windows, -1 on darwin. # X-11 sends Control-Button-4 event instead. - (self.decrease_size() if (event.delta < 0 and not darwin) - else self.increase_size()) + if (event.delta < 0) == (not darwin): + return self.decrease_size() + else: + return self.increase_size() def configGUI(self, start, stop, clear, txt="", color="blue"): self.start_btn.config(state=start, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 03:01:17 2014 From: python-checkins at python.org (terry.reedy) Date: Fri, 5 Sep 2014 03:01:17 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E4?= Message-ID: <3hq0vK1BzYz7Ljr@mail.python.org> http://hg.python.org/cpython/rev/034ebeb8be2d changeset: 92342:034ebeb8be2d parent: 92340:16e3d240456f parent: 92341:ecc98ea50bc3 user: Terry Jan Reedy date: Thu Sep 04 21:00:52 2014 -0400 summary: Merge with 3.4 files: Lib/turtledemo/__main__.py | 40 +++++++++++++------------ 1 files changed, 21 insertions(+), 19 deletions(-) diff --git a/Lib/turtledemo/__main__.py b/Lib/turtledemo/__main__.py --- a/Lib/turtledemo/__main__.py +++ b/Lib/turtledemo/__main__.py @@ -19,8 +19,9 @@ The demo viewer windows can be resized. The divider between text and canvas can be moved by grabbing it with the mouse. The text font - size can be changed from the menu and with Control/Command- '-'/'+'. - It can also be changed on most systems with Control-mousewheel. + size can be changed from the menu and with Control/Command '-'/'+'. + It can also be changed on most systems with Control-mousewheel + when the mouse is over the text. Press START button to start the demo. Stop execution by pressing the STOP button. @@ -210,10 +211,19 @@ self.hbar = hbar = Scrollbar(text_frame, name='hbar', orient=HORIZONTAL) hbar['command'] = text.xview hbar.pack(side=BOTTOM, fill=X) + text['yscrollcommand'] = vbar.set + text['xscrollcommand'] = hbar.set text['font'] = tuple(txtfont) - text['yscrollcommand'] = vbar.set - text['xscrollcommand'] = hbar.set + shortcut = 'Command' if darwin else 'Control' + text.bind_all('<%s-minus>' % shortcut, self.decrease_size) + text.bind_all('<%s-underscore>' % shortcut, self.decrease_size) + text.bind_all('<%s-equal>' % shortcut, self.increase_size) + text.bind_all('<%s-plus>' % shortcut, self.increase_size) + text.bind('', self.update_mousewheel) + text.bind('', self.increase_size) + text.bind('', self.decrease_size) + text.pack(side=LEFT, fill=BOTH, expand=1) return text_frame @@ -224,7 +234,7 @@ turtle._Screen._canvas = self._canvas = canvas = turtle.ScrolledCanvas( root, 800, 600, self.canvwidth, self.canvheight) canvas.adjustScrolls() - self.makeBindings(canvas._rootwindow) + canvas._rootwindow.bind('', self.onResize) canvas._canvas['borderwidth'] = 0 self.screen = _s_ = turtle.Screen() @@ -233,18 +243,6 @@ turtle.RawTurtle.screens = [_s_] return canvas - def makeBindings(self, widget): - widget.bind('', self.onResize) - - shortcut = 'Command' if darwin else 'Control' - widget.bind_all('<%s-minus>' % shortcut, self.decrease_size) - widget.bind_all('<%s-underscore>' % shortcut, self.decrease_size) - widget.bind_all('<%s-equal>' % shortcut, self.increase_size) - widget.bind_all('<%s-plus>' % shortcut, self.increase_size) - widget.bind_all('', self.update_mousewheel) - widget.bind('', self.increase_size) - widget.bind('', self.decrease_size) - def set_txtsize(self, size): txtfont[1] = size self.text['font'] = tuple(txtfont) @@ -252,15 +250,19 @@ def decrease_size(self, dummy=None): self.set_txtsize(max(txtfont[1] - 1, MINIMUM_FONT_SIZE)) + return 'break' def increase_size(self, dummy=None): self.set_txtsize(min(txtfont[1] + 1, MAXIMUM_FONT_SIZE)) + return 'break' def update_mousewheel(self, event): # For wheel up, event.delte = 120 on Windows, -1 on darwin. # X-11 sends Control-Button-4 event instead. - (self.decrease_size() if (event.delta < 0 and not darwin) - else self.increase_size()) + if (event.delta < 0) == (not darwin): + return self.decrease_size() + else: + return self.increase_size() def configGUI(self, start, stop, clear, txt="", color="blue"): self.start_btn.config(state=start, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 04:41:04 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 5 Sep 2014 04:41:04 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_fix_malformed_?= =?utf-8?q?use_cmdoption_directive?= Message-ID: <3hq36S4nbjz7LjV@mail.python.org> http://hg.python.org/cpython/rev/41617012fa1e changeset: 92343:41617012fa1e branch: 2.7 parent: 92339:390e910b2f96 user: Benjamin Peterson date: Thu Sep 04 22:40:34 2014 -0400 summary: fix malformed use cmdoption directive files: Doc/library/compileall.rst | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst --- a/Doc/library/compileall.rst +++ b/Doc/library/compileall.rst @@ -20,7 +20,8 @@ .. program:: compileall -.. cmdoption:: [directory|file]... +.. cmdoption:: directory ... + file ... Positional arguments are files to compile or directories that contain source files, traversed recursively. If no argument is given, behave as if -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 05:07:14 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 5 Sep 2014 05:07:14 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_remove_automat?= =?utf-8?q?ic_svn_checkout_of_sphinx_and_its_deps_from_Doc/Makefile?= Message-ID: <3hq3hf3GYDz7LjM@mail.python.org> http://hg.python.org/cpython/rev/48033d90c61d changeset: 92344:48033d90c61d branch: 2.7 user: Benjamin Peterson date: Thu Sep 04 23:07:03 2014 -0400 summary: remove automatic svn checkout of sphinx and its deps from Doc/Makefile Users are now required to install Sphinx themselves as is already the case with the Python 3 branch. files: .hgignore | 5 - Doc/Makefile | 54 ++++-------- Doc/README.txt | 54 +++++++++---- Doc/make.bat | 152 ++++++++++++++++++++++++++---------- Misc/NEWS | 3 + 5 files changed, 167 insertions(+), 101 deletions(-) diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -21,11 +21,6 @@ reflog.txt$ tags$ Lib/plat-mac/errors.rsrc.df.rsrc -Doc/tools/sphinx/ -Doc/tools/docutils/ -Doc/tools/jinja/ -Doc/tools/jinja2/ -Doc/tools/pygments/ Misc/python.pc Modules/Setup$ Modules/Setup.config diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -5,7 +5,7 @@ # You can set these variables from the command line. PYTHON = python -SVNROOT = http://svn.python.org/projects +SPHINXBUILD = sphinx-build SPHINXOPTS = PAPER = SOURCES = @@ -14,18 +14,19 @@ ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) -.PHONY: help checkout update build html htmlhelp latex text changes linkcheck \ +.PHONY: help build html htmlhelp latex text changes linkcheck \ suspicious coverage doctest pydoc-topics htmlview clean dist check serve \ autobuild-dev autobuild-stable help: @echo "Please use \`make ' where is one of" @echo " clean to remove build files" - @echo " update to update build tools" @echo " html to make standalone HTML files" + @echo " htmlview to open the index page built by the html target in your browser" @echo " htmlhelp to make HTML files and a HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " text to make plain text files" + @echo " epub to make EPUB files" @echo " changes to make an overview over all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " coverage to check documentation coverage for library and C API" @@ -36,30 +37,8 @@ @echo " check to run a check for frequent markup errors" @echo " serve to serve the documentation on the localhost (8000)" -# Note: if you update versions here, do the same in make.bat and README.txt -checkout: - @if [ ! -d tools/sphinx ]; then \ - echo "Checking out Sphinx..."; \ - svn checkout $(SVNROOT)/external/Sphinx-1.0.7/sphinx tools/sphinx; \ - fi - @if [ ! -d tools/docutils ]; then \ - echo "Checking out Docutils..."; \ - svn checkout $(SVNROOT)/external/docutils-0.6/docutils tools/docutils; \ - fi - @if [ ! -d tools/jinja2 ]; then \ - echo "Checking out Jinja..."; \ - svn checkout $(SVNROOT)/external/Jinja-2.3.1/jinja2 tools/jinja2; \ - fi - @if [ ! -d tools/pygments ]; then \ - echo "Checking out Pygments..."; \ - svn checkout $(SVNROOT)/external/Pygments-1.3.1/pygments tools/pygments; \ - fi - -update: clean checkout - -build: checkout - mkdir -p build/$(BUILDER) build/doctrees - $(PYTHON) tools/sphinx-build.py $(ALLSPHINXOPTS) +build: + $(SPHINXBUILD) $(ALLSPHINXOPTS) @echo html: BUILDER = html @@ -81,6 +60,10 @@ text: build @echo "Build finished; the text files are in build/text." +epub: BUILDER = epub +epub: build + @echo "Build finished; the epub files are in build/epub." + changes: BUILDER = changes changes: build @echo "The overview file is in build/changes." @@ -114,17 +97,13 @@ pydoc-topics: BUILDER = pydoc-topics pydoc-topics: build @echo "Building finished; now copy build/pydoc-topics/topics.py" \ - "to Lib/pydoc_data/topics.py" + "to ../Lib/pydoc_data/topics.py" htmlview: html $(PYTHON) -c "import webbrowser; webbrowser.open('build/html/index.html')" clean: -rm -rf build/* - -rm -rf tools/sphinx - -rm -rf tools/pygments - -rm -rf tools/jinja2 - -rm -rf tools/docutils dist: rm -rf dist @@ -164,6 +143,11 @@ cp build/latex/docs-pdf.zip dist/python-$(DISTVERSION)-docs-pdf-letter.zip cp build/latex/docs-pdf.tar.bz2 dist/python-$(DISTVERSION)-docs-pdf-letter.tar.bz2 + # copy the epub build + rm -rf build/epub + make epub + cp -pPR build/epub/Python.epub dist/python-$(DISTVERSION)-docs.epub + check: $(PYTHON) tools/rstlint.py -i tools @@ -174,7 +158,6 @@ # for development releases: always build autobuild-dev: - make update make dist SPHINXOPTS='-A daily=1 -A versionswitcher=1' -make suspicious @@ -182,10 +165,9 @@ autobuild-html: make html SPHINXOPTS='-A daily=1 -A versionswitcher=1' -# for stable releases: only build if not in pre-release stage (alpha, beta) -# release candidate downloads are okay, since the stable tree can be in that stage +# for stable releases: only build if not in pre-release stage (alpha, beta, rc) autobuild-stable: - @case $(DISTVERSION) in *[ab]*) \ + @case $(DISTVERSION) in *[abc]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ exit 1;; \ esac diff --git a/Doc/README.txt b/Doc/README.txt --- a/Doc/README.txt +++ b/Doc/README.txt @@ -3,54 +3,69 @@ This directory contains the reStructuredText (reST) sources to the Python documentation. You don't need to build them yourself, prebuilt versions are -available at https://docs.python.org/2/download.html +available at . -Documentation on the authoring Python documentation, including information about +Documentation on authoring Python documentation, including information about both style and markup, is available in the "Documenting Python" chapter of the -documentation. +developers guide . Building the docs ================= -You need to have Python 2 installed; the toolset used to build the -docs is written in Python. It is called *Sphinx*, it is not included in this -tree, but maintained separately. Also needed are the docutils, supplying the -base markup that Sphinx uses, Jinja, a templating engine, and optionally -Pygments, a code highlighter. +You need to have Sphinx installed; it is the toolset +used to build the docs. It is not included in this tree, but maintained +separately and available from PyPI . Using make ---------- -Luckily, a Makefile has been prepared so that on Unix, provided you have -installed Python and Subversion, you can just run :: +A Makefile has been prepared so that on Unix, provided you have installed +Sphinx, you can just run :: make html -to check out the necessary toolset in the `tools/` subdirectory and build the -HTML output files. To view the generated HTML, point your favorite browser at -the top-level index `build/html/index.html` after running "make". +to build the HTML output files. On Windows, we try to emulate the Makefile as closely as possible with a ``make.bat`` file. +To use a Python interpreter that's not called ``python``, use the standard +way to set Makefile variables, using e.g. :: + + make html PYTHON=python3 + +On Windows, set the PYTHON environment variable instead. + +To use a specific sphinx-build (something other than ``sphinx-build``), set +the SPHINXBUILD variable. + Available make targets are: + * "clean", which removes all build files. + * "html", which builds standalone HTML files for offline viewing. + * "htmlview", which re-uses the "html" builder, but then opens the main page + in your default web browser. + * "htmlhelp", which builds HTML files and a HTML Help project file usable to convert them into a single Compiled HTML (.chm) file -- these are popular under Microsoft Windows, but very handy on every platform. - To create the CHM file, you need to run the Microsoft HTML Help Workshop over - the generated project (.hhp) file. + To create the CHM file, you need to run the Microsoft HTML Help Workshop + over the generated project (.hhp) file. The make.bat script does this for + you on Windows. * "latex", which builds LaTeX source files as input to "pdflatex" to produce PDF documents. * "text", which builds a plain text file for each source file. + * "epub", which builds an EPUB document, suitable to be viewed on e-book + readers. + * "linkcheck", which checks all external references to see whether they are broken, redirected or malformed, and outputs this information to stdout as well as a plain-text (.txt) file. @@ -70,7 +85,12 @@ * "suspicious", which checks the parsed markup for text that looks like malformed and thus unconverted reST. -A "make update" updates the Subversion checkouts in `tools/`. + * "check", which checks for frequent markup errors. + + * "serve", which serves the build/html directory on port 8000. + + * "dist", (Unix only) which creates distributable archives of HTML, text, + PDF, and EPUB builds. Without make @@ -78,7 +98,7 @@ Install the Sphinx package and its dependencies from PyPI. -Then, from the ``Docs`` directory, run :: +Then, from the ``Doc`` directory, run :: sphinx-build -b . build/ diff --git a/Doc/make.bat b/Doc/make.bat --- a/Doc/make.bat +++ b/Doc/make.bat @@ -1,58 +1,124 @@ -@@echo off + at echo off setlocal -set SVNROOT=http://svn.python.org/projects -if "%PYTHON%" EQU "" set PYTHON=..\pcbuild\python -if "%HTMLHELP%" EQU "" set HTMLHELP=%ProgramFiles%\HTML Help Workshop\hhc.exe +pushd %~dp0 + +set this=%~n0 + +if "%SPHINXBUILD%" EQU "" set SPHINXBUILD=sphinx-build +if "%PYTHON%" EQU "" set PYTHON=py + +if DEFINED ProgramFiles(x86) set _PRGMFLS=%ProgramFiles(x86)% +if NOT DEFINED ProgramFiles(x86) set _PRGMFLS=%ProgramFiles% +if "%HTMLHELP%" EQU "" set HTMLHELP=%_PRGMFLS%\HTML Help Workshop\hhc.exe + if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/sphinxext/patchlevel.py`) do set DISTVERSION=%%v +if "%BUILDDIR%" EQU "" set BUILDDIR=build + +rem Targets that don't require sphinx-build if "%1" EQU "" goto help -if "%1" EQU "html" goto build -if "%1" EQU "htmlhelp" goto build -if "%1" EQU "latex" goto build -if "%1" EQU "text" goto build -if "%1" EQU "suspicious" goto build -if "%1" EQU "linkcheck" goto build -if "%1" EQU "changes" goto build -if "%1" EQU "checkout" goto checkout -if "%1" EQU "update" goto update +if "%1" EQU "help" goto help +if "%1" EQU "check" goto check +if "%1" EQU "serve" goto serve +if "%1" == "clean" ( + rmdir /q /s %BUILDDIR% + goto end +) + +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + goto end +) + +rem Targets that do require sphinx-build and have their own label +if "%1" EQU "htmlview" goto htmlview + +rem Everything else +goto build :help -set this=%~n0 -echo HELP +echo.usage: %this% BUILDER [filename ...] echo. -echo %this% checkout -echo %this% update -echo %this% html -echo %this% htmlhelp -echo %this% latex -echo %this% text -echo %this% suspicious -echo %this% linkcheck -echo %this% changes +echo.Call %this% with the desired Sphinx builder as the first argument, e.g. +echo.``%this% html`` or ``%this% doctest``. Interesting targets that are +echo.always available include: echo. -goto end - -:checkout -svn co %SVNROOT%/external/Sphinx-1.0.7/sphinx tools/sphinx -svn co %SVNROOT%/external/docutils-0.6/docutils tools/docutils -svn co %SVNROOT%/external/Jinja-2.3.1/jinja2 tools/jinja2 -svn co %SVNROOT%/external/Pygments-1.3.1/pygments tools/pygments -goto end - -:update -svn update tools/sphinx -svn update tools/docutils -svn update tools/jinja2 -svn update tools/pygments +echo. Provided by Sphinx: +echo. html, htmlhelp, latex, text +echo. suspicious, linkcheck, changes, doctest +echo. Provided by this script: +echo. clean, check, serve, htmlview +echo. +echo.All arguments past the first one are passed through to sphinx-build as +echo.filenames to build or are ignored. See README.txt in this directory or +echo.the documentation for your version of Sphinx for more exhaustive lists +echo.of available targets and descriptions of each. +echo. +echo.This script assumes that the SPHINXBUILD environment variable contains +echo.a legitimate command for calling sphinx-build, or that sphinx-build is +echo.on your PATH if SPHINXBUILD is not set. Options for sphinx-build can +echo.be passed by setting the SPHINXOPTS environment variable. goto end :build -if not exist build mkdir build -if not exist build\%1 mkdir build\%1 -if not exist build\doctrees mkdir build\doctrees -cmd /C %PYTHON% tools\sphinx-build.py -b%1 -dbuild\doctrees . build\%* -if "%1" EQU "htmlhelp" "%HTMLHELP%" build\htmlhelp\python%DISTVERSION:.=%.hhp +if NOT "%PAPER%" == "" ( + set SPHINXOPTS=-D latex_paper_size=%PAPER% %SPHINXOPTS% +) +cmd /C %SPHINXBUILD% %SPHINXOPTS% -b%1 -dbuild\doctrees . %BUILDDIR%\%* + +if "%1" EQU "htmlhelp" ( + if not exist "%HTMLHELP%" ( + echo. + echo.The HTML Help Workshop was not found. Set the HTMLHELP variable + echo.to the path to hhc.exe or download and install it from + echo.http://msdn.microsoft.com/en-us/library/ms669985 + rem Set errorlevel to 1 and exit + cmd /C exit /b 1 + goto end + ) + cmd /C "%HTMLHELP%" build\htmlhelp\python%DISTVERSION:.=%.hhp + rem hhc.exe seems to always exit with code 1, reset to 0 for less than 2 + if not errorlevel 2 cmd /C exit /b 0 +) + +echo. +if errorlevel 1 ( + echo.Build failed (exit code %ERRORLEVEL%^), check for error messages + echo.above. Any output will be found in %BUILDDIR%\%1 +) else ( + echo.Build succeeded. All output should be in %BUILDDIR%\%1 +) +goto end + +:htmlview +if NOT "%2" EQU "" ( + echo.Can't specify filenames to build with htmlview target, ignoring. +) +cmd /C %this% html + +if EXIST %BUILDDIR%\html\index.html ( + echo.Opening %BUILDDIR%\html\index.html in the default web browser... + start %BUILDDIR%\html\index.html +) + +goto end + +:check +cmd /C %PYTHON% tools\rstlint.py -i tools +goto end + +:serve +cmd /C %PYTHON% ..\Tools\scripts\serve.py %BUILDDIR%\html goto end :end +popd diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -158,6 +158,9 @@ Build ----- +- The documentation Makefile no longer automatically downloads Sphinx. Users are + now required to have Sphinx already installed to build the documentation. + - Issue #21958: Define HAVE_ROUND when building with Visual Studio 2013 and above. Patch by Zachary Turner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 05:26:01 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 5 Sep 2014 05:26:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?devguide=3A_update_for_recent_changes?= =?utf-8?q?_in_doc_building?= Message-ID: <3hq46K1p8Zz7LjY@mail.python.org> http://hg.python.org/devguide/rev/2ed6c4b04334 changeset: 711:2ed6c4b04334 user: Benjamin Peterson date: Thu Sep 04 23:25:50 2014 -0400 summary: update for recent changes in doc building files: documenting.rst | 17 +++++++++-------- 1 files changed, 9 insertions(+), 8 deletions(-) diff --git a/documenting.rst b/documenting.rst --- a/documenting.rst +++ b/documenting.rst @@ -1451,13 +1451,8 @@ below. You can view the documentation after building the HTML by pointing a browser at the file :file:`Doc/build/html/index.html`. -In the Python 2.7 and 3.3 branches, the Sphinx toolchain will be checked out -using Subversion from ``svn.python.org`` by the Makefile. This toolchain will -need an installed Python 2 to run. - -In the Python 3.4 and later branches, you are expected to have installed a -recent version of Sphinx on your system, so that the Makefile can find the -``sphinx-build`` command. +You are expected to have installed a recent version of Sphinx on your system or +in a virtualenv_, so that the Makefile can find the ``sphinx-build`` command. Using make / make.bat @@ -1469,7 +1464,9 @@ cd Doc make html -or alternatively ``make -C Doc html``. This builds the output as HTML. +or alternatively ``make -C Doc html``. This builds the output as HTML. You can +specify the location of the ``sphinx-build`` command with the ``SPHINXBUILD`` +make variable. For Windows users there is a :file:`make.bat` batchfile that tries to work like ``make`` does. @@ -1478,6 +1475,9 @@ * "html", which builds standalone HTML files for offline viewing. + * "htmlview", which builds the standalone HTML files and then opens a web + browser to display them. + * "htmlhelp", which builds HTML files and a HTML Help project file usable to convert them into a single Compiled HTML (.chm) file -- these are popular under Microsoft Windows, but very handy on every platform. @@ -1526,3 +1526,4 @@ .. _Jinja: http://jinja.pocoo.org/ .. _Pygments: http://pygments.org/ .. _Sphinx: http://sphinx-doc.org/ +.. _virtualenv: https://virtualenv.pypa.io/ -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Fri Sep 5 09:11:38 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 09:11:38 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Silenced_some_warnings_abo?= =?utf-8?q?ut_comparison_between_signed_and_unsigned_integer?= Message-ID: <3hq96f4WVFz7Lkg@mail.python.org> http://hg.python.org/cpython/rev/858747aac9e9 changeset: 92345:858747aac9e9 parent: 92342:034ebeb8be2d user: Serhiy Storchaka date: Fri Sep 05 10:10:23 2014 +0300 summary: Silenced some warnings about comparison between signed and unsigned integer expressions. files: Modules/_pickle.c | 6 +++--- Parser/node.c | 4 ++-- Parser/pgenmain.c | 7 ++++--- Parser/printgrammar.c | 5 +++-- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Modules/_pickle.c b/Modules/_pickle.c --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -1456,7 +1456,7 @@ pdata[1] = (unsigned char)(*value & 0xff); len = 2; } - else if (*value <= 0xffffffffL) { + else if ((size_t)*value <= 0xffffffffUL) { pdata[0] = LONG_BINGET; pdata[1] = (unsigned char)(*value & 0xff); pdata[2] = (unsigned char)((*value >> 8) & 0xff); @@ -1513,7 +1513,7 @@ pdata[1] = (unsigned char)idx; len = 2; } - else if (idx <= 0xffffffffL) { + else if ((size_t)idx <= 0xffffffffUL) { pdata[0] = LONG_BINPUT; pdata[1] = (unsigned char)(idx & 0xff); pdata[2] = (unsigned char)((idx >> 8) & 0xff); @@ -2013,7 +2013,7 @@ header[1] = (unsigned char)size; len = 2; } - else if (size <= 0xffffffffL) { + else if ((size_t)size <= 0xffffffffUL) { header[0] = BINBYTES; header[1] = (unsigned char)(size & 0xff); header[2] = (unsigned char)((size >> 8) & 0xff); diff --git a/Parser/node.c b/Parser/node.c --- a/Parser/node.c +++ b/Parser/node.c @@ -70,8 +70,8 @@ * Note that this would be straightforward if a node stored its current * capacity. The code is tricky to avoid that. */ -#define XXXROUNDUP(n) ((n) <= 1 ? (n) : \ - (n) <= 128 ? _Py_SIZE_ROUND_UP((n), 4) : \ +#define XXXROUNDUP(n) ((n) <= 1 ? (n) : \ + (n) <= 128 ? (int)_Py_SIZE_ROUND_UP((n), 4) : \ fancy_roundup(n)) diff --git a/Parser/pgenmain.c b/Parser/pgenmain.c --- a/Parser/pgenmain.c +++ b/Parser/pgenmain.c @@ -96,10 +96,11 @@ fprintf(stderr, "Parsing error %d, line %d.\n", err.error, err.lineno); if (err.text != NULL) { - size_t i; + size_t len; + int i; fprintf(stderr, "%s", err.text); - i = strlen(err.text); - if (i == 0 || err.text[i-1] != '\n') + len = strlen(err.text); + if (len == 0 || err.text[len-1] != '\n') fprintf(stderr, "\n"); for (i = 0; i < err.offset; i++) { if (err.text[i] == '\t') diff --git a/Parser/printgrammar.c b/Parser/printgrammar.c --- a/Parser/printgrammar.c +++ b/Parser/printgrammar.c @@ -84,7 +84,7 @@ printdfas(grammar *g, FILE *fp) { dfa *d; - int i, j; + int i, j, n; printstates(g, fp); fprintf(fp, "static dfa dfas[%d] = {\n", g->g_ndfas); @@ -93,7 +93,8 @@ fprintf(fp, " {%d, \"%s\", %d, %d, states_%d,\n", d->d_type, d->d_name, d->d_initial, d->d_nstates, i); fprintf(fp, " \""); - for (j = 0; j < NBYTES(g->g_ll.ll_nlabels); j++) + n = NBYTES(g->g_ll.ll_nlabels); + for (j = 0; j < n; j++) fprintf(fp, "\\%03o", d->d_first[j] & 0xff); fprintf(fp, "\"},\n"); } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 09:26:26 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 09:26:26 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMjIx?= =?utf-8?q?=3A_Backported_fixes_from_Python_3_=28issue_=2318960=29=2E?= Message-ID: <3hq9Rk0RzZz7LlH@mail.python.org> http://hg.python.org/cpython/rev/dd1e21f17b1c changeset: 92346:dd1e21f17b1c branch: 2.7 parent: 92344:48033d90c61d user: Serhiy Storchaka date: Fri Sep 05 10:22:05 2014 +0300 summary: Issue #22221: Backported fixes from Python 3 (issue #18960). * Now the source encoding declaration on the second line isn't effective if the first line contains anything except a comment. This affects compile(), eval() and exec() too. * IDLE now ignores the source encoding declaration on the second line if the first line contains anything except a comment. * 2to3 and the findnocoding.py script now ignore the source encoding declaration on the second line if the first line contains anything except a comment. files: Lib/idlelib/IOBinding.py | 3 +++ Lib/lib2to3/pgen2/tokenize.py | 3 +++ Lib/test/test_compile.py | 17 ++++++++++++++++- Misc/NEWS | 10 ++++++++++ Parser/tokenizer.c | 20 +++++++++++++++++--- Tools/scripts/findnocoding.py | 4 +++- 6 files changed, 52 insertions(+), 5 deletions(-) diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py --- a/Lib/idlelib/IOBinding.py +++ b/Lib/idlelib/IOBinding.py @@ -72,6 +72,7 @@ encoding = encoding.lower() coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') +blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)') class EncodingMessage(SimpleDialog): "Inform user that an encoding declaration is needed." @@ -130,6 +131,8 @@ match = coding_re.match(line) if match is not None: break + if not blank_re.match(line): + return None else: return None name = match.group(1) diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py --- a/Lib/lib2to3/pgen2/tokenize.py +++ b/Lib/lib2to3/pgen2/tokenize.py @@ -237,6 +237,7 @@ toks_append(tokval) cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') +blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)') def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" @@ -309,6 +310,8 @@ encoding = find_cookie(first) if encoding: return encoding, [first] + if not blank_re.match(first): + return default, [first] second = read_or_stop() if not second: diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -412,9 +412,24 @@ l = lambda: "foo" self.assertIsNone(l.__doc__) - def test_unicode_encoding(self): + @test_support.requires_unicode + def test_encoding(self): + code = b'# -*- coding: badencoding -*-\npass\n' + self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec') code = u"# -*- coding: utf-8 -*-\npass\n" self.assertRaises(SyntaxError, compile, code, "tmp", "exec") + code = 'u"\xc2\xa4"\n' + self.assertEqual(eval(code), u'\xc2\xa4') + code = u'u"\xc2\xa4"\n' + self.assertEqual(eval(code), u'\xc2\xa4') + code = '# -*- coding: latin1 -*-\nu"\xc2\xa4"\n' + self.assertEqual(eval(code), u'\xc2\xa4') + code = '# -*- coding: utf-8 -*-\nu"\xc2\xa4"\n' + self.assertEqual(eval(code), u'\xa4') + code = '# -*- coding: iso8859-15 -*-\nu"\xc2\xa4"\n' + self.assertEqual(eval(code), test_support.u(r'\xc2\u20ac')) + code = 'u"""\\\n# -*- coding: utf-8 -*-\n\xc2\xa4"""\n' + self.assertEqual(eval(code), u'# -*- coding: utf-8 -*-\n\xc2\xa4') def test_subscripts(self): # SF bug 1448804 diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #22221: Now the source encoding declaration on the second line isn't + effective if the first line contains anything except a comment. + - Issue #22023: Fix ``%S``, ``%R`` and ``%V`` formats of :c:func:`PyUnicode_FromFormat`. @@ -124,6 +127,9 @@ IDLE ---- +- Issue #22221: IDLE now ignores the source encoding declaration on the second + line if the first line contains anything except a comment. + - Issue #17390: Adjust Editor window title; remove 'Python', move version to end. @@ -140,6 +146,10 @@ Tools/Demos ----------- +- Issue #22221: 2to3 and the findnocoding.py script now ignore the source + encoding declaration on the second line if the first line contains anything + except a comment. + - Issue #22201: Command-line interface of the zipfile module now correctly extracts ZIP files with directory entries. Patch by Ryan Wilson. diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -259,11 +259,25 @@ char * cs; int r = 1; - if (tok->cont_line) + if (tok->cont_line) { /* It's a continuation line, so it can't be a coding spec. */ + tok->read_coding_spec = 1; return 1; + } cs = get_coding_spec(line, size); - if (cs != NULL) { + if (!cs) { + Py_ssize_t i; + for (i = 0; i < size; i++) { + if (line[i] == '#' || line[i] == '\n' || line[i] == '\r') + break; + if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') { + /* Stop checking coding spec after a line containing + * anything except a comment. */ + tok->read_coding_spec = 1; + break; + } + } + } else { tok->read_coding_spec = 1; if (tok->encoding == NULL) { assert(tok->decoding_state == 1); /* raw */ @@ -688,7 +702,7 @@ if (newl[0]) { if (!check_coding_spec(str, newl[0] - str, tok, buf_setreadl)) return error_ret(tok); - if (tok->enc == NULL && newl[1]) { + if (tok->enc == NULL && !tok->read_coding_spec && newl[1]) { if (!check_coding_spec(newl[0]+1, newl[1] - newl[0], tok, buf_setreadl)) return error_ret(tok); diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -33,6 +33,7 @@ decl_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)') +blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)') def get_declaration(line): match = decl_re.match(line) @@ -57,7 +58,8 @@ line1 = infile.readline() line2 = infile.readline() - if get_declaration(line1) or get_declaration(line2): + if (get_declaration(line1) or + blank_re.match(line1) and get_declaration(line2)): # the file does have an encoding declaration, so trust it infile.close() return False -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Fri Sep 5 09:54:47 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 05 Sep 2014 09:54:47 +0200 Subject: [Python-checkins] Daily reference leaks (034ebeb8be2d): sum=151932 Message-ID: results for 034ebeb8be2d on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [0, 2, 0] references, sum=2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 0] references, sum=-2 test_site leaked [-2, 0, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog0EOwfh', '-x'] From python-checkins at python.org Fri Sep 5 10:11:59 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 10:11:59 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjIx?= =?utf-8?q?=3A_Add_tests_for_compile=28=29_with_source_encoding_cookie=2E?= Message-ID: <3hqBSH5nBQz7Ljf@mail.python.org> http://hg.python.org/cpython/rev/13cd8ea4cafe changeset: 92347:13cd8ea4cafe branch: 3.4 parent: 92341:ecc98ea50bc3 user: Serhiy Storchaka date: Fri Sep 05 11:00:56 2014 +0300 summary: Issue #22221: Add tests for compile() with source encoding cookie. files: Lib/test/test_compile.py | 23 ++++++++++++++++++++--- 1 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -304,9 +304,26 @@ l = lambda: "foo" self.assertIsNone(l.__doc__) -## def test_unicode_encoding(self): -## code = "# -*- coding: utf-8 -*-\npass\n" -## self.assertRaises(SyntaxError, compile, code, "tmp", "exec") + def test_encoding(self): + code = b'# -*- coding: badencoding -*-\npass\n' + self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec') + code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n' + compile(code, 'tmp', 'exec') + self.assertEqual(eval(code), '\xc2\xa4') + code = '"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\xa4') + code = b'"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xa4') + code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\xa4') + code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xa4') + code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\u20ac') + code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' + self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4') + code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' + self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4') def test_subscripts(self): # SF bug 1448804 -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 10:12:00 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 10:12:00 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322221=3A_Add_tests_for_compile=28=29_with_sourc?= =?utf-8?q?e_encoding_cookie=2E?= Message-ID: <3hqBSJ73B8z7LjY@mail.python.org> http://hg.python.org/cpython/rev/9d335a54d728 changeset: 92348:9d335a54d728 parent: 92345:858747aac9e9 parent: 92347:13cd8ea4cafe user: Serhiy Storchaka date: Fri Sep 05 11:01:43 2014 +0300 summary: Issue #22221: Add tests for compile() with source encoding cookie. files: Lib/test/test_compile.py | 23 ++++++++++++++++++++--- 1 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -304,9 +304,26 @@ l = lambda: "foo" self.assertIsNone(l.__doc__) -## def test_unicode_encoding(self): -## code = "# -*- coding: utf-8 -*-\npass\n" -## self.assertRaises(SyntaxError, compile, code, "tmp", "exec") + def test_encoding(self): + code = b'# -*- coding: badencoding -*-\npass\n' + self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec') + code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n' + compile(code, 'tmp', 'exec') + self.assertEqual(eval(code), '\xc2\xa4') + code = '"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\xa4') + code = b'"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xa4') + code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\xa4') + code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xa4') + code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n' + self.assertEqual(eval(code), '\xc2\u20ac') + code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' + self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4') + code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' + self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4') def test_subscripts(self): # SF bug 1448804 -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 12:16:34 2014 From: python-checkins at python.org (victor.stinner) Date: Fri, 5 Sep 2014 12:16:34 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogcmVncnRlc3Q6IGJh?= =?utf-8?q?ckport_=22=5B__1/399=5D=22_progress_back_from_Python_3?= Message-ID: <3hqFD227zVz7Lk2@mail.python.org> http://hg.python.org/cpython/rev/425c8bbc2ee7 changeset: 92349:425c8bbc2ee7 branch: 2.7 parent: 92346:dd1e21f17b1c user: Victor Stinner date: Fri Sep 05 12:12:11 2014 +0200 summary: regrtest: backport "[ 1/399]" progress back from Python 3 The progress bar helps a lot to analyze noisy buildbot logs, to find quickly where errors occurred. files: Lib/test/regrtest.py | 19 +++++++++++++++---- 1 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -475,8 +475,12 @@ if bad: return tests = test_forever() + test_count = '' + test_count_width = 3 else: tests = iter(selected) + test_count = '/{}'.format(len(selected)) + test_count_width = len(test_count) - 1 if use_mp: try: @@ -521,8 +525,6 @@ output.put((None, None, None, None)) return result = json.loads(result) - if not quiet: - stdout = test+'\n'+stdout output.put((test, stdout.rstrip(), stderr.rstrip(), result)) except BaseException: output.put((None, None, None, None)) @@ -531,6 +533,7 @@ for worker in workers: worker.start() finished = 0 + test_index = 1 try: while finished < use_mp: test, stdout, stderr, result = output.get() @@ -547,15 +550,23 @@ assert result[1] == 'KeyboardInterrupt' raise KeyboardInterrupt # What else? accumulate_result(test, result) + if not quiet: + fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + print(fmt.format( + test_count_width, test_index, test_count, + len(bad), test)) + test_index += 1 except KeyboardInterrupt: interrupted = True pending.close() for worker in workers: worker.join() else: - for test in tests: + for test_index, test in enumerate(tests, 1): if not quiet: - print test + fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + print(fmt.format( + test_count_width, test_index, test_count, len(bad), test)) sys.stdout.flush() if trace: # If we're tracing code coverage, then we don't exit with status -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 16:03:40 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 5 Sep 2014 16:03:40 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_backport_our_o?= =?utf-8?q?wn_copy_of_the_ref-counting_extension?= Message-ID: <3hqLG46JcFz7LjX@mail.python.org> http://hg.python.org/cpython/rev/9d41ceea3b8b changeset: 92350:9d41ceea3b8b branch: 2.7 user: Benjamin Peterson date: Fri Sep 05 10:03:26 2014 -0400 summary: backport our own copy of the ref-counting extension files: Doc/conf.py | 4 +- Doc/tools/sphinxext/c_annotations.py | 120 +++++++++++++++ 2 files changed, 122 insertions(+), 2 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -13,8 +13,8 @@ # General configuration # --------------------- -extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage', - 'sphinx.ext.doctest', 'pyspecific'] +extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', + 'pyspecific', 'c_annotations'] templates_path = ['tools/sphinxext'] # General substitutions. diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/sphinxext/c_annotations.py new file mode 100644 --- /dev/null +++ b/Doc/tools/sphinxext/c_annotations.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +""" + c_annotations.py + ~~~~~~~~~~~~~~~~ + + Supports annotations for C API elements: + + * reference count annotations for C API functions. Based on + refcount.py and anno-api.py in the old Python documentation tools. + + * stable API annotations + + Usage: Set the `refcount_file` config value to the path to the reference + count data file. + + :copyright: Copyright 2007-2013 by Georg Brandl. + :license: Python license. +""" + +from os import path +from docutils import nodes +from docutils.parsers.rst import directives + +from sphinx import addnodes +from sphinx.domains.c import CObject + + +class RCEntry: + def __init__(self, name): + self.name = name + self.args = [] + self.result_type = '' + self.result_refs = None + + +class Annotations(dict): + @classmethod + def fromfile(cls, filename): + d = cls() + fp = open(filename, 'r') + try: + for line in fp: + line = line.strip() + if line[:1] in ("", "#"): + # blank lines and comments + continue + parts = line.split(":", 4) + if len(parts) != 5: + raise ValueError("Wrong field count in %r" % line) + function, type, arg, refcount, comment = parts + # Get the entry, creating it if needed: + try: + entry = d[function] + except KeyError: + entry = d[function] = RCEntry(function) + if not refcount or refcount == "null": + refcount = None + else: + refcount = int(refcount) + # Update the entry with the new parameter or the result + # information. + if arg: + entry.args.append((arg, type, refcount)) + else: + entry.result_type = type + entry.result_refs = refcount + finally: + fp.close() + return d + + def add_annotations(self, app, doctree): + for node in doctree.traverse(addnodes.desc_content): + par = node.parent + if par['domain'] != 'c': + continue + if par['stableabi']: + node.insert(0, nodes.emphasis(' Part of the stable ABI.', + ' Part of the stable ABI.', + classes=['stableabi'])) + if par['objtype'] != 'function': + continue + if not par[0].has_key('names') or not par[0]['names']: + continue + name = par[0]['names'][0] + if name.startswith("c."): + name = name[2:] + entry = self.get(name) + if not entry: + continue + elif entry.result_type not in ("PyObject*", "PyVarObject*"): + continue + if entry.result_refs is None: + rc = 'Return value: Always NULL.' + elif entry.result_refs: + rc = 'Return value: New reference.' + else: + rc = 'Return value: Borrowed reference.' + node.insert(0, nodes.emphasis(rc, rc, classes=['refcount'])) + + +def init_annotations(app): + refcounts = Annotations.fromfile( + path.join(app.srcdir, app.config.refcount_file)) + app.connect('doctree-read', refcounts.add_annotations) + + +def setup(app): + app.add_config_value('refcount_file', '', True) + app.connect('builder-inited', init_annotations) + + # monkey-patch C object... + CObject.option_spec = { + 'noindex': directives.flag, + 'stableabi': directives.flag, + } + old_handle_signature = CObject.handle_signature + def new_handle_signature(self, sig, signode): + signode.parent['stableabi'] = 'stableabi' in self.options + return old_handle_signature(self, sig, signode) + CObject.handle_signature = new_handle_signature -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 19:30:55 2014 From: python-checkins at python.org (alex.gaynor) Date: Fri, 5 Sep 2014 19:30:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_476=3A_Updates_based_on_G?= =?utf-8?q?uido=27s_feedback_on_python-dev?= Message-ID: <3hqQsC0K3Gz7Lm3@mail.python.org> http://hg.python.org/peps/rev/67652719e187 changeset: 5546:67652719e187 user: Alex Gaynor date: Fri Sep 05 10:23:19 2014 -0700 summary: PEP 476: Updates based on Guido's feedback on python-dev files: pep-0476.txt | 22 +++++----------------- 1 files changed, 5 insertions(+), 17 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -111,19 +111,6 @@ Twisted's 14.0 release made this same change, and it has been met with almost no opposition. -In order to make this transition as smooth as possible, the next 3.4.x release -following this PEP will be modified to emit a warning in cases that would raise -an Exception in Python 3.5. - -Warnings --------- - -To support this warning, in 3.4.next a new ``verify_mode`` is introduced -``CERT_WARN``, which is equivilant to ``CERT_NONE``, except in cases that would -fail as ``CERT_REQUIRED`` or fail the hostname check emits a warning. In -3.4.next the ``httplib`` module will set this as the ``verify_mode`` if the -default context is used. - Other protocols =============== @@ -142,10 +129,11 @@ Python Versions =============== -This PEP proposes making these changes to ``default`` (Python 3) branch. I -strongly believe these changes also belong in Python 2, but doing them in a -patch-release isn't reasonable, and there is strong opposition to doing a 2.8 -release. +This PEP describes changes that will occur on both the 3.4.x, 3.5 and 2.7.X +branches. For 2.7.X this will require backporting the ``context`` +(``SSLContext``) argument to ``httplib``, in addition to the features already +backported in +:pep:`466`. Copyright ========= -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Fri Sep 5 21:09:01 2014 From: python-checkins at python.org (victor.stinner) Date: Fri, 5 Sep 2014 21:09:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMzQw?= =?utf-8?q?=3A_Fix_Python_3_warnings_in_Python_2_tests?= Message-ID: <3hqT2P63jQz7Lkb@mail.python.org> http://hg.python.org/cpython/rev/0675b3a55941 changeset: 92351:0675b3a55941 branch: 2.7 user: Victor Stinner date: Fri Sep 05 21:05:05 2014 +0200 summary: Issue #22340: Fix Python 3 warnings in Python 2 tests files: Lib/idlelib/idle_test/test_calltips.py | 2 +- Lib/sqlite3/test/dbapi.py | 4 +- Lib/sqlite3/test/types.py | 10 ++- Lib/sqlite3/test/userfunctions.py | 20 ++++-- Lib/test/test_collections.py | 43 ++++++++----- Lib/test/test_hash.py | 3 +- Lib/test/test_hmac.py | 16 ++-- Lib/test/test_ssl.py | 3 +- 8 files changed, 63 insertions(+), 38 deletions(-) diff --git a/Lib/idlelib/idle_test/test_calltips.py b/Lib/idlelib/idle_test/test_calltips.py --- a/Lib/idlelib/idle_test/test_calltips.py +++ b/Lib/idlelib/idle_test/test_calltips.py @@ -163,7 +163,7 @@ # In 3.x, get_entity changed from 'instance method' to module function # since 'self' not used. Use dummy instance until change 2.7 also. def test_bad_entity(self): - self.assertIsNone(CTi.get_entity('1/0')) + self.assertIsNone(CTi.get_entity('1//0')) def test_good_entity(self): self.assertIs(CTi.get_entity('int'), int) diff --git a/Lib/sqlite3/test/dbapi.py b/Lib/sqlite3/test/dbapi.py --- a/Lib/sqlite3/test/dbapi.py +++ b/Lib/sqlite3/test/dbapi.py @@ -24,6 +24,7 @@ import unittest import sys import sqlite3 as sqlite +from test import test_support try: import threading except ImportError: @@ -653,7 +654,8 @@ ts = sqlite.TimestampFromTicks(42) def CheckBinary(self): - b = sqlite.Binary(chr(0) + "'") + with test_support.check_py3k_warnings(): + b = sqlite.Binary(chr(0) + "'") class ExtensionTests(unittest.TestCase): def CheckScriptStringSql(self): diff --git a/Lib/sqlite3/test/types.py b/Lib/sqlite3/test/types.py --- a/Lib/sqlite3/test/types.py +++ b/Lib/sqlite3/test/types.py @@ -24,6 +24,7 @@ import datetime import unittest import sqlite3 as sqlite +from test import test_support try: import zlib except ImportError: @@ -67,7 +68,8 @@ self.assertEqual(row[0], val) def CheckBlob(self): - val = buffer("Guglhupf") + with test_support.check_py3k_warnings(): + val = buffer("Guglhupf") self.cur.execute("insert into test(b) values (?)", (val,)) self.cur.execute("select b from test") row = self.cur.fetchone() @@ -231,7 +233,8 @@ def CheckBlob(self): # default - val = buffer("Guglhupf") + with test_support.check_py3k_warnings(): + val = buffer("Guglhupf") self.cur.execute("insert into test(bin) values (?)", (val,)) self.cur.execute("select bin from test") row = self.cur.fetchone() @@ -347,7 +350,8 @@ def CheckBinaryInputForConverter(self): testdata = "abcdefg" * 10 - result = self.con.execute('select ? as "x [bin]"', (buffer(zlib.compress(testdata)),)).fetchone()[0] + with test_support.check_py3k_warnings(): + result = self.con.execute('select ? as "x [bin]"', (buffer(zlib.compress(testdata)),)).fetchone()[0] self.assertEqual(testdata, result) class DateTimeTests(unittest.TestCase): diff --git a/Lib/sqlite3/test/userfunctions.py b/Lib/sqlite3/test/userfunctions.py --- a/Lib/sqlite3/test/userfunctions.py +++ b/Lib/sqlite3/test/userfunctions.py @@ -24,6 +24,7 @@ import unittest import sqlite3 as sqlite +from test import test_support def func_returntext(): return "foo" @@ -36,7 +37,8 @@ def func_returnnull(): return None def func_returnblob(): - return buffer("blob") + with test_support.check_py3k_warnings(): + return buffer("blob") def func_returnlonglong(): return 1<<31 def func_raiseexception(): @@ -202,8 +204,9 @@ cur = self.con.cursor() cur.execute("select returnblob()") val = cur.fetchone()[0] - self.assertEqual(type(val), buffer) - self.assertEqual(val, buffer("blob")) + with test_support.check_py3k_warnings(): + self.assertEqual(type(val), buffer) + self.assertEqual(val, buffer("blob")) def CheckFuncReturnLongLong(self): cur = self.con.cursor() @@ -246,7 +249,8 @@ def CheckParamBlob(self): cur = self.con.cursor() - cur.execute("select isblob(?)", (buffer("blob"),)) + with test_support.check_py3k_warnings(): + cur.execute("select isblob(?)", (buffer("blob"),)) val = cur.fetchone()[0] self.assertEqual(val, 1) @@ -269,8 +273,9 @@ b blob ) """) - cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)", - ("foo", 5, 3.14, None, buffer("blob"),)) + with test_support.check_py3k_warnings(): + cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)", + ("foo", 5, 3.14, None, buffer("blob"),)) self.con.create_aggregate("nostep", 1, AggrNoStep) self.con.create_aggregate("nofinalize", 1, AggrNoFinalize) @@ -362,7 +367,8 @@ def CheckAggrCheckParamBlob(self): cur = self.con.cursor() - cur.execute("select checkType('blob', ?)", (buffer("blob"),)) + with test_support.check_py3k_warnings(): + cur.execute("select checkType('blob', ?)", (buffer("blob"),)) val = cur.fetchone()[0] self.assertEqual(val, 1) diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -8,13 +8,14 @@ from random import randrange, shuffle import keyword import re -import sets import sys from collections import Hashable, Iterable, Iterator from collections import Sized, Container, Callable from collections import Set, MutableSet from collections import Mapping, MutableMapping from collections import Sequence, MutableSequence +with test_support.check_warnings(('', DeprecationWarning)): + import sets TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests @@ -713,10 +714,12 @@ self.assertTrue(r1 < r3) self.assertFalse(r1 < r1) self.assertFalse(r1 < r2) - # python 2 only, cross-type compares will succeed - f1 < l3 - f1 < l1 - f1 < l2 + + with test_support.check_py3k_warnings(): + # python 2 only, cross-type compares will succeed + f1 < l3 + f1 < l1 + f1 < l2 # any subset self.assertTrue(f1 <= f3) @@ -728,10 +731,12 @@ self.assertTrue(r1 <= r3) self.assertTrue(r1 <= r1) self.assertFalse(r1 <= r2) - # python 2 only, cross-type compares will succeed - f1 <= l3 - f1 <= l1 - f1 <= l2 + + with test_support.check_py3k_warnings(): + # python 2 only, cross-type compares will succeed + f1 <= l3 + f1 <= l1 + f1 <= l2 # proper superset self.assertTrue(f3 > f1) @@ -743,10 +748,12 @@ self.assertTrue(r3 > r1) self.assertFalse(r1 > r1) self.assertFalse(r2 > r1) - # python 2 only, cross-type compares will succeed - f1 > l3 - f1 > l1 - f1 > l2 + + with test_support.check_py3k_warnings(): + # python 2 only, cross-type compares will succeed + f1 > l3 + f1 > l1 + f1 > l2 # any superset self.assertTrue(f3 >= f1) @@ -758,10 +765,12 @@ self.assertTrue(r3 >= r1) self.assertTrue(r1 >= r1) self.assertFalse(r2 >= r1) - # python 2 only, cross-type compares will succeed - f1 >= l3 - f1 >=l1 - f1 >= l2 + + with test_support.check_py3k_warnings(): + # python 2 only, cross-type compares will succeed + f1 >= l3 + f1 >=l1 + f1 >= l2 # equality self.assertTrue(f1 == f1) diff --git a/Lib/test/test_hash.py b/Lib/test/test_hash.py --- a/Lib/test/test_hash.py +++ b/Lib/test/test_hash.py @@ -215,7 +215,8 @@ repr_ = 'buffer("abc")' def test_empty_string(self): - self.assertEqual(hash(buffer("")), 0) + with test_support.check_py3k_warnings(): + self.assertEqual(hash(buffer("")), 0) class DatetimeTests(HashRandomizationTests): def get_hash_command(self, repr_): diff --git a/Lib/test/test_hmac.py b/Lib/test/test_hmac.py --- a/Lib/test/test_hmac.py +++ b/Lib/test/test_hmac.py @@ -389,10 +389,11 @@ a, b = "foo?", "foo?" self.assertTrue(hmac.compare_digest(a, b)) - # subclasses are supported by ignore __eq__ - class mystr(str): - def __eq__(self, other): - return False + with test_support.check_py3k_warnings(): + # subclasses are supported by ignore __eq__ + class mystr(str): + def __eq__(self, other): + return False a, b = mystr("foobar"), mystr("foobar") self.assertTrue(hmac.compare_digest(a, b)) @@ -401,9 +402,10 @@ a, b = mystr("foobar"), mystr("foobaz") self.assertFalse(hmac.compare_digest(a, b)) - class mybytes(bytes): - def __eq__(self, other): - return False + with test_support.check_py3k_warnings(): + class mybytes(bytes): + def __eq__(self, other): + return False a, b = mybytes(b"foobar"), mybytes(b"foobar") self.assertTrue(hmac.compare_digest(a, b)) diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -2365,7 +2365,8 @@ # now fetch the same data from the HTTPS server url = 'https://%s:%d/%s' % ( HOST, server.port, os.path.split(CERTFILE)[1]) - f = urllib.urlopen(url) + with support.check_py3k_warnings(): + f = urllib.urlopen(url) try: dlen = f.info().getheader("content-length") if dlen and (int(dlen) > 0): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 21:41:37 2014 From: python-checkins at python.org (victor.stinner) Date: Fri, 5 Sep 2014 21:41:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMzQw?= =?utf-8?q?=3A_Fix_test=5Fcollections_if_the_sets_module_was_already_impor?= =?utf-8?q?ted?= Message-ID: <3hqTm15XHVz7Ln0@mail.python.org> http://hg.python.org/cpython/rev/407653078135 changeset: 92352:407653078135 branch: 2.7 user: Victor Stinner date: Fri Sep 05 21:41:25 2014 +0200 summary: Issue #22340: Fix test_collections if the sets module was already imported files: Lib/test/test_collections.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -14,8 +14,8 @@ from collections import Set, MutableSet from collections import Mapping, MutableMapping from collections import Sequence, MutableSequence -with test_support.check_warnings(('', DeprecationWarning)): - import sets +# Silence deprecation warning +sets = test_support.import_module('sets', deprecated=True) TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 22:28:57 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 5 Sep 2014 22:28:57 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_use_correct_article_=28closes?= =?utf-8?q?_=2322342=29?= Message-ID: <3hqVpd62Lrz7LkP@mail.python.org> http://hg.python.org/peps/rev/4b03ae00a76b changeset: 5547:4b03ae00a76b user: Benjamin Peterson date: Fri Sep 05 16:28:49 2014 -0400 summary: use correct article (closes #22342) files: pep-0380.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0380.txt b/pep-0380.txt --- a/pep-0380.txt +++ b/pep-0380.txt @@ -207,7 +207,7 @@ The rationale behind most of the semantics presented above stems from the desire to be able to refactor generator code. It should be -possible to take an section of code containing one or more ``yield`` +possible to take a section of code containing one or more ``yield`` expressions, move it into a separate function (using the usual techniques to deal with references to variables in the surrounding scope, etc.), and call the new function using a ``yield from`` -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Fri Sep 5 22:38:34 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 22:38:34 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIwNjQz?= =?utf-8?q?=3A_Removed_unneeded_=28and_wrong=29_class_directives=2E?= Message-ID: <3hqW1k2fJQz7Ljt@mail.python.org> http://hg.python.org/cpython/rev/bc4e26755a13 changeset: 92353:bc4e26755a13 branch: 3.4 parent: 92347:13cd8ea4cafe user: Serhiy Storchaka date: Fri Sep 05 23:27:36 2014 +0300 summary: Issue #20643: Removed unneeded (and wrong) class directives. files: Doc/reference/expressions.rst | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -400,7 +400,6 @@ is already executing raises a :exc:`ValueError` exception. .. index:: exception: StopIteration -.. class:: generator .. method:: generator.__next__() @@ -410,7 +409,7 @@ :meth:`~generator.__next__` method, the current yield expression always evaluates to :const:`None`. The execution then continues to the next yield expression, where the generator is suspended again, and the value of the - :token:`expression_list` is returned to :meth:`next`'s caller. If the + :token:`expression_list` is returned to :meth:`__next__`'s caller. If the generator exits without yielding another value, a :exc:`StopIteration` exception is raised. @@ -450,8 +449,6 @@ other exception, it is propagated to the caller. :meth:`close` does nothing if the generator has already exited due to an exception or normal exit. -.. class:: . - .. index:: single: yield; examples Examples -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 22:38:35 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 22:38:35 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2320643=3A_Removed_unneeded_=28and_wrong=29_class?= =?utf-8?q?_directives=2E?= Message-ID: <3hqW1l4G0cz7Ljy@mail.python.org> http://hg.python.org/cpython/rev/060e347c9a23 changeset: 92354:060e347c9a23 parent: 92348:9d335a54d728 parent: 92353:bc4e26755a13 user: Serhiy Storchaka date: Fri Sep 05 23:28:19 2014 +0300 summary: Issue #20643: Removed unneeded (and wrong) class directives. files: Doc/reference/expressions.rst | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -400,7 +400,6 @@ is already executing raises a :exc:`ValueError` exception. .. index:: exception: StopIteration -.. class:: generator .. method:: generator.__next__() @@ -410,7 +409,7 @@ :meth:`~generator.__next__` method, the current yield expression always evaluates to :const:`None`. The execution then continues to the next yield expression, where the generator is suspended again, and the value of the - :token:`expression_list` is returned to :meth:`next`'s caller. If the + :token:`expression_list` is returned to :meth:`__next__`'s caller. If the generator exits without yielding another value, a :exc:`StopIteration` exception is raised. @@ -450,8 +449,6 @@ other exception, it is propagated to the caller. :meth:`close` does nothing if the generator has already exited due to an exception or normal exit. -.. class:: . - .. index:: single: yield; examples Examples -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 5 22:38:37 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 5 Sep 2014 22:38:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIwNjQz?= =?utf-8?q?=3A_Fixed_references_to_the_next=28=29_method_=28distinguish_fr?= =?utf-8?q?om_the?= Message-ID: <3hqW1n07Tmz7Lls@mail.python.org> http://hg.python.org/cpython/rev/6dba9db360d0 changeset: 92355:6dba9db360d0 branch: 2.7 parent: 92352:407653078135 user: Serhiy Storchaka date: Fri Sep 05 23:34:12 2014 +0300 summary: Issue #20643: Fixed references to the next() method (distinguish from the next() function). files: Doc/c-api/typeobj.rst | 2 +- Doc/glossary.rst | 4 ++-- Doc/library/2to3.rst | 2 +- Doc/library/collections.rst | 4 ++-- Doc/library/stdtypes.rst | 8 ++++---- Doc/reference/expressions.rst | 9 ++++----- Doc/reference/simple_stmts.rst | 16 ++++++++-------- Doc/tutorial/classes.rst | 10 +++++----- 8 files changed, 27 insertions(+), 28 deletions(-) diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst --- a/Doc/c-api/typeobj.rst +++ b/Doc/c-api/typeobj.rst @@ -770,7 +770,7 @@ exception may or may not be set. When another error occurs, it must return *NULL* too. Its presence normally signals that the instances of this type are iterators (although classic instances always have this function, even if - they don't define a :meth:`next` method). + they don't define a :meth:`~iterator.next` method). Iterator types should also define the :c:member:`~PyTypeObject.tp_iter` function, and that function should return the iterator instance itself (not a new iterator diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -408,10 +408,10 @@ iterator An object representing a stream of data. Repeated calls to the iterator's - :meth:`next` method return successive items in the stream. When no more + :meth:`~generator.next` method return successive items in the stream. When no more data are available a :exc:`StopIteration` exception is raised instead. At this point, the iterator object is exhausted and any further calls to its - :meth:`next` method just raise :exc:`StopIteration` again. Iterators are + :meth:`~generator.next` method just raise :exc:`StopIteration` again. Iterators are required to have an :meth:`__iter__` method that returns the iterator object itself so every iterator is also iterable and may be used in most places where other iterables are accepted. One notable exception is code diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -289,7 +289,7 @@ .. 2to3fixer:: next Converts the use of iterator's :meth:`~iterator.next` methods to the - :func:`next` function. It also renames :meth:`next` methods to + :func:`next` function. It also renames :meth:`~iterator.next` methods to :meth:`~iterator.__next__`. .. 2to3fixer:: nonzero diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -954,8 +954,8 @@ .. class:: Iterator - ABC for classes that provide the :meth:`__iter__` and :meth:`next` methods. - See also the definition of :term:`iterator`. + ABC for classes that provide the :meth:`~iterator.__iter__` and + :meth:`~iterator.next` methods. See also the definition of :term:`iterator`. .. class:: Sequence MutableSequence diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -652,7 +652,7 @@ specific types are not important beyond their implementation of the iterator protocol. -The intention of the protocol is that once an iterator's :meth:`next` method +The intention of the protocol is that once an iterator's :meth:`~iterator.next` method raises :exc:`StopIteration`, it will continue to do so on subsequent calls. Implementations that do not obey this property are deemed broken. (This constraint was added in Python 2.3; in Python 2.2, various iterators are broken @@ -667,9 +667,9 @@ Python's :term:`generator`\s provide a convenient way to implement the iterator protocol. If a container object's :meth:`__iter__` method is implemented as a generator, it will automatically return an iterator object (technically, a -generator object) supplying the :meth:`__iter__` and :meth:`next` methods. More -information about generators can be found in :ref:`the documentation for the -yield expression `. +generator object) supplying the :meth:`~iterator.__iter__` and +:meth:`~iterator.next` methods. More information about generators can be found +in :ref:`the documentation for the yield expression `. .. _typesseq: diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -431,22 +431,21 @@ is already executing raises a :exc:`ValueError` exception. .. index:: exception: StopIteration -.. class:: generator .. method:: generator.next() Starts the execution of a generator function or resumes it at the last executed :keyword:`yield` expression. When a generator function is resumed with a - :meth:`next` method, the current :keyword:`yield` expression always evaluates to + :meth:`~generator.next` method, the current :keyword:`yield` expression + always evaluates to :const:`None`. The execution then continues to the next :keyword:`yield` expression, where the generator is suspended again, and the value of the - :token:`expression_list` is returned to :meth:`next`'s caller. If the generator + :token:`expression_list` is returned to :meth:`~generator.next`'s caller. + If the generator exits without yielding another value, a :exc:`StopIteration` exception is raised. -.. class:: . - .. method:: generator.send(value) Resumes the execution and "sends" a value into the generator function. The diff --git a/Doc/reference/simple_stmts.rst b/Doc/reference/simple_stmts.rst --- a/Doc/reference/simple_stmts.rst +++ b/Doc/reference/simple_stmts.rst @@ -506,16 +506,16 @@ When a generator function is called, it returns an iterator known as a generator iterator, or more commonly, a generator. The body of the generator function is -executed by calling the generator's :meth:`next` method repeatedly until it -raises an exception. +executed by calling the generator's :meth:`~generator.next` method repeatedly +until it raises an exception. When a :keyword:`yield` statement is executed, the state of the generator is -frozen and the value of :token:`expression_list` is returned to :meth:`next`'s -caller. By "frozen" we mean that all local state is retained, including the -current bindings of local variables, the instruction pointer, and the internal -evaluation stack: enough information is saved so that the next time :meth:`next` -is invoked, the function can proceed exactly as if the :keyword:`yield` -statement were just another external call. +frozen and the value of :token:`expression_list` is returned to +:meth:`~generator.next`'s caller. By "frozen" we mean that all local state is +retained, including the current bindings of local variables, the instruction +pointer, and the internal evaluation stack: enough information is saved so that +the next time :meth:`~generator.next` is invoked, the function can proceed +exactly as if the :keyword:`yield` statement were just another external call. As of Python version 2.5, the :keyword:`yield` statement is now allowed in the :keyword:`try` clause of a :keyword:`try` ... :keyword:`finally` construct. If diff --git a/Doc/tutorial/classes.rst b/Doc/tutorial/classes.rst --- a/Doc/tutorial/classes.rst +++ b/Doc/tutorial/classes.rst @@ -788,8 +788,8 @@ Having seen the mechanics behind the iterator protocol, it is easy to add iterator behavior to your classes. Define an :meth:`__iter__` method which -returns an object with a :meth:`next` method. If the class defines -:meth:`next`, then :meth:`__iter__` can just return ``self``:: +returns an object with a :meth:`~iterator.next` method. If the class +defines :meth:`~iterator.next`, then :meth:`__iter__` can just return ``self``:: class Reverse: """Iterator for looping over a sequence backwards.""" @@ -825,7 +825,7 @@ :term:`Generator`\s are a simple and powerful tool for creating iterators. They are written like regular functions but use the :keyword:`yield` statement -whenever they want to return data. Each time :meth:`next` is called, the +whenever they want to return data. Each time :func:`next` is called on it, the generator resumes where it left-off (it remembers all the data values and which statement was last executed). An example shows that generators can be trivially easy to create:: @@ -846,8 +846,8 @@ Anything that can be done with generators can also be done with class based iterators as described in the previous section. What makes generators so -compact is that the :meth:`__iter__` and :meth:`next` methods are created -automatically. +compact is that the :meth:`__iter__` and :meth:`~generator.next` methods +are created automatically. Another key feature is that the local variables and execution state are automatically saved between calls. This made the function easier to write and -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 00:58:35 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 6 Sep 2014 00:58:35 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Update_OS_X_in?= =?utf-8?q?staller_build_script_for_changes_to_documentation_build=3A?= Message-ID: <3hqZ7H3Dswz7Ljy@mail.python.org> http://hg.python.org/cpython/rev/ec052852524d changeset: 92356:ec052852524d branch: 2.7 user: Ned Deily date: Fri Sep 05 15:51:54 2014 -0700 summary: Update OS X installer build script for changes to documentation build: as of 2.7.9, doc builds require an externally installed sphinx-build like 3.4+ builds do. files: Mac/BuildScript/build-installer.py | 16 +++++----------- 1 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -615,8 +615,7 @@ # Ensure ws have access to hg and to sphinx-build. # You may have to create links in /usr/bin for them. runCommand('hg --version') - if getVersionTuple() >= (3, 4): - runCommand('sphinx-build --version') + runCommand('sphinx-build --version') def parseOptions(args=None): """ @@ -929,15 +928,10 @@ docdir = os.path.join(rootDir, 'pydocs') curDir = os.getcwd() os.chdir(buildDir) - # The Doc build changed for 3.4 (technically, for 3.4.1) - if getVersionTuple() < (3, 4): - # This step does an svn checkout of sphinx and its dependencies - runCommand('make update') - runCommand("make html PYTHON='%s'" % os.path.abspath(sys.executable)) - else: - runCommand('make clean') - # Assume sphinx-build is on our PATH, checked in checkEnvironment - runCommand('make html') + # The Doc build changed for 3.4 (technically, for 3.4.1) and for 2.7.9 + runCommand('make clean') + # Assume sphinx-build is on our PATH, checked in checkEnvironment + runCommand('make html') os.chdir(curDir) if not os.path.exists(docdir): os.mkdir(docdir) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 00:58:36 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 6 Sep 2014 00:58:36 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogRG9jdW1lbnQgMi43?= =?utf-8?q?=2E9_changes_in_OS_X_installer_build_requirements=3A?= Message-ID: <3hqZ7J4vH7z7Ljy@mail.python.org> http://hg.python.org/cpython/rev/322f77ee6d5a changeset: 92357:322f77ee6d5a branch: 2.7 user: Ned Deily date: Fri Sep 05 15:52:45 2014 -0700 summary: Document 2.7.9 changes in OS X installer build requirements: because the Doc Makefile has been changed to no longer download sphinx and its dependencies, build-installer.py now requires that there be an externally-supplied sphinx-build available. files: Mac/BuildScript/README.txt | 21 +++++++++++++++------ 1 files changed, 15 insertions(+), 6 deletions(-) diff --git a/Mac/BuildScript/README.txt b/Mac/BuildScript/README.txt --- a/Mac/BuildScript/README.txt +++ b/Mac/BuildScript/README.txt @@ -11,9 +11,9 @@ For Python 2.7.x and 3.x, PSF practice is to build two installer variants for each release. -Beginning with Python 2.7.8, we plan to drop binary installer support for +Beginning with Python 2.7.9, we plan to drop binary installer support for Mac OS X 10.3.9 and 10.4.x systems. To ease the transition, for Python 2.7.7 -only there will be three installers provided: +and 2.7.8 there were three installers provided: 1. DEPRECATED - 32-bit-only, i386 and PPC universal, capable on running on all machines supported by Mac OS X 10.3.9 through (at least) 10.9:: @@ -41,7 +41,8 @@ * ``MacOSX10.4u`` SDK (later SDKs do not support PPC G3 processors) * ``MACOSX_DEPLOYMENT_TARGET=10.3`` * Apple ``gcc-4.0`` - * system Python 2.5 for documentation build with Sphinx + * bootstrap non-framework Python 2.7 for documentation build with + Sphinx (as of 2.7.9) - alternate build environments: @@ -76,7 +77,8 @@ * ``MacOSX10.5`` SDK * ``MACOSX_DEPLOYMENT_TARGET=10.5`` * Apple ``gcc-4.2`` - * system Python 2.5+ for documentation build with Sphinx + * bootstrap non-framework Python 2.7 for documentation build with + Sphinx (as of 2.7.9) - alternate build environments: @@ -110,7 +112,8 @@ * ``MacOSX10.6`` SDK * ``MACOSX_DEPLOYMENT_TARGET=10.6`` * Apple ``gcc-4.2`` - * system Python 2.6 for documentation build with Sphinx + * bootstrap non-framework Python 2.7 for documentation build with + Sphinx (as of 2.7.9) - alternate build environments: @@ -134,7 +137,13 @@ interfere with the build. * The documentation for the release is built using Sphinx - because it is included in the installer. + because it is included in the installer. For 2.7.x up to and including + 2.7.8, the ``Doc/Makefile`` used ``svn`` to download repos of + ``Sphinx`` and its dependencies. Beginning with 2.7.9, the ``Doc/Makefile`` + assumes there is an externally-provided ``sphinx-build`` and requires at + least Python 2.6 to run. Because of this, it is no longer possible to + build a 2.7.9 or later installer on OS X 10.5 using the Apple-supplied + Python 2.5. * It is safest to start each variant build with an empty source directory populated with a fresh copy of the untarred source. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 00:58:37 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 6 Sep 2014 00:58:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Keep_Mac/build?= =?utf-8?q?-installer=2Epy_in_sync_across_branches_=28affects_2=2E7_only?= =?utf-8?b?KS4=?= Message-ID: <3hqZ7K6G0Xz7Lkw@mail.python.org> http://hg.python.org/cpython/rev/b424d9679308 changeset: 92358:b424d9679308 branch: 3.4 parent: 92353:bc4e26755a13 user: Ned Deily date: Fri Sep 05 15:57:05 2014 -0700 summary: Keep Mac/build-installer.py in sync across branches (affects 2.7 only). files: Mac/BuildScript/build-installer.py | 16 +++++----------- 1 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -615,8 +615,7 @@ # Ensure ws have access to hg and to sphinx-build. # You may have to create links in /usr/bin for them. runCommand('hg --version') - if getVersionTuple() >= (3, 4): - runCommand('sphinx-build --version') + runCommand('sphinx-build --version') def parseOptions(args=None): """ @@ -929,15 +928,10 @@ docdir = os.path.join(rootDir, 'pydocs') curDir = os.getcwd() os.chdir(buildDir) - # The Doc build changed for 3.4 (technically, for 3.4.1) - if getVersionTuple() < (3, 4): - # This step does an svn checkout of sphinx and its dependencies - runCommand('make update') - runCommand("make html PYTHON='%s'" % os.path.abspath(sys.executable)) - else: - runCommand('make clean') - # Assume sphinx-build is on our PATH, checked in checkEnvironment - runCommand('make html') + # The Doc build changed for 3.4 (technically, for 3.4.1) and for 2.7.9 + runCommand('make clean') + # Assume sphinx-build is on our PATH, checked in checkEnvironment + runCommand('make html') os.chdir(curDir) if not os.path.exists(docdir): os.mkdir(docdir) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 00:58:39 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 6 Sep 2014 00:58:39 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Keep_Mac/build-installer=2Epy_in_sync_across_branches_?= =?utf-8?q?=28affects_2=2E7_only=29=2E?= Message-ID: <3hqZ7M0sMTz7Lml@mail.python.org> http://hg.python.org/cpython/rev/7fe96ffb2dc6 changeset: 92359:7fe96ffb2dc6 parent: 92354:060e347c9a23 parent: 92358:b424d9679308 user: Ned Deily date: Fri Sep 05 15:57:54 2014 -0700 summary: Keep Mac/build-installer.py in sync across branches (affects 2.7 only). files: Mac/BuildScript/build-installer.py | 16 +++++----------- 1 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -615,8 +615,7 @@ # Ensure ws have access to hg and to sphinx-build. # You may have to create links in /usr/bin for them. runCommand('hg --version') - if getVersionTuple() >= (3, 4): - runCommand('sphinx-build --version') + runCommand('sphinx-build --version') def parseOptions(args=None): """ @@ -929,15 +928,10 @@ docdir = os.path.join(rootDir, 'pydocs') curDir = os.getcwd() os.chdir(buildDir) - # The Doc build changed for 3.4 (technically, for 3.4.1) - if getVersionTuple() < (3, 4): - # This step does an svn checkout of sphinx and its dependencies - runCommand('make update') - runCommand("make html PYTHON='%s'" % os.path.abspath(sys.executable)) - else: - runCommand('make clean') - # Assume sphinx-build is on our PATH, checked in checkEnvironment - runCommand('make html') + # The Doc build changed for 3.4 (technically, for 3.4.1) and for 2.7.9 + runCommand('make clean') + # Assume sphinx-build is on our PATH, checked in checkEnvironment + runCommand('make html') os.chdir(curDir) if not os.path.exists(docdir): os.mkdir(docdir) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 04:28:51 2014 From: python-checkins at python.org (guido.van.rossum) Date: Sat, 6 Sep 2014 04:28:51 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_PEP-411-compliant_note?= =?utf-8?q?_about_asyncio_being_provisional=2E_Fixes_issue_=2322346=2E?= Message-ID: <3hqfnv3cz2z7Lkw@mail.python.org> http://hg.python.org/cpython/rev/f8f3e83c9528 changeset: 92360:f8f3e83c9528 user: Guido van Rossum date: Fri Sep 05 19:28:40 2014 -0700 summary: Add PEP-411-compliant note about asyncio being provisional. Fixes issue #22346. files: Doc/library/asyncio.rst | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) diff --git a/Doc/library/asyncio.rst b/Doc/library/asyncio.rst --- a/Doc/library/asyncio.rst +++ b/Doc/library/asyncio.rst @@ -4,6 +4,13 @@ .. module:: asyncio :synopsis: Asynchronous I/O, event loop, coroutines and tasks. +.. note:: + + The asyncio package has been included in the standard library on a + :term:`provisional basis `. Backwards incompatible + changes (up to and including removal of the module) may occur if deemed + necessary by the core developers. + .. versionadded:: 3.4 **Source code:** :source:`Lib/asyncio/` -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Sat Sep 6 10:38:25 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 06 Sep 2014 10:38:25 +0200 Subject: [Python-checkins] Daily reference leaks (7fe96ffb2dc6): sum=151930 Message-ID: results for 7fe96ffb2dc6 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [0, -2, 2] references, sum=0 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 0] references, sum=-2 test_site leaked [-2, 0, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogOtKgWZ', '-x'] From python-checkins at python.org Sat Sep 6 11:47:21 2014 From: python-checkins at python.org (nick.coghlan) Date: Sat, 6 Sep 2014 11:47:21 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzQ2?= =?utf-8?q?=3A_also_include_asyncio_PEP_411_notice_in_3=2E4?= Message-ID: <3hqrWs1VRPz7MnQ@mail.python.org> http://hg.python.org/cpython/rev/fefe7822e6b8 changeset: 92361:fefe7822e6b8 branch: 3.4 parent: 92358:b424d9679308 user: Nick Coghlan date: Sat Sep 06 19:43:06 2014 +1000 summary: Issue #22346: also include asyncio PEP 411 notice in 3.4 files: Doc/library/asyncio.rst | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) diff --git a/Doc/library/asyncio.rst b/Doc/library/asyncio.rst --- a/Doc/library/asyncio.rst +++ b/Doc/library/asyncio.rst @@ -4,6 +4,13 @@ .. module:: asyncio :synopsis: Asynchronous I/O, event loop, coroutines and tasks. +.. note:: + + The asyncio package has been included in the standard library on a + :term:`provisional basis `. Backwards incompatible + changes (up to and including removal of the module) may occur if deemed + necessary by the core developers. + .. versionadded:: 3.4 **Source code:** :source:`Lib/asyncio/` -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 11:47:22 2014 From: python-checkins at python.org (nick.coghlan) Date: Sat, 6 Sep 2014 11:47:22 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge_from_3=2E4_backport?= Message-ID: <3hqrWt36Dzz7MnQ@mail.python.org> http://hg.python.org/cpython/rev/258481642dd7 changeset: 92362:258481642dd7 parent: 92360:f8f3e83c9528 parent: 92361:fefe7822e6b8 user: Nick Coghlan date: Sat Sep 06 19:44:41 2014 +1000 summary: Null merge from 3.4 backport files: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 12:40:19 2014 From: python-checkins at python.org (nick.coghlan) Date: Sat, 6 Sep 2014 12:40:19 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjk1?= =?utf-8?q?=3A_Adopt_=27python_-m_pip=27_as_the_preferred_invocation?= Message-ID: <3hqshz5QhVz7MpF@mail.python.org> http://hg.python.org/cpython/rev/e8447da8791d changeset: 92363:e8447da8791d branch: 3.4 parent: 92361:fefe7822e6b8 user: Nick Coghlan date: Sat Sep 06 20:38:23 2014 +1000 summary: Issue #22295: Adopt 'python -m pip' as the preferred invocation files: Doc/distributing/index.rst | 13 +++++++++- Doc/glossary.rst | 8 ++++++ Doc/installing/index.rst | 30 +++++++++++++++++-------- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/Doc/distributing/index.rst b/Doc/distributing/index.rst --- a/Doc/distributing/index.rst +++ b/Doc/distributing/index.rst @@ -93,9 +93,18 @@ versions of Python. The currently recommended build and distribution tools can be installed -using ``pip``:: +by invoking the ``pip`` module at the command line:: - pip install setuptools wheel twine + python -m pip install setuptools wheel twine + +.. note:: + + For POSIX users (including Mac OS X and Linux users), these instructions + assume the use of a :term:`virtual environment`. + + For Windows users, these instructions assume that the option to + adjust the system PATH environment variable was selected when installing + Python. The Python Packaging User Guide includes more details on the `currently recommended tools`_. diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -857,6 +857,14 @@ dictionary view to become a full list use ``list(dictview)``. See :ref:`dict-views`. + virtual environment + A cooperatively isolated runtime environment that allows Python users + and applications to install and upgrade Python distribution packages + without interfering with the behaviour of other Python applications + running on the same system. + + See also :ref:`scripts-pyvenv` + virtual machine A computer defined entirely in software. Python's virtual machine executes the :term:`bytecode` emitted by the bytecode compiler. diff --git a/Doc/installing/index.rst b/Doc/installing/index.rst --- a/Doc/installing/index.rst +++ b/Doc/installing/index.rst @@ -40,6 +40,10 @@ * ``pyvenv`` is the standard tool for creating virtual environments, and has been part of Python since Python 3.3. Starting with Python 3.4, it defaults to installing ``pip`` into all created virtual environments +* ``virtualenv`` is a third party alternative (and predecessor) to + ``pyvenv``. It allows virtual environments to be used on versions of + Python prior to 3.4, which either don't provide ``pyvenv`` at all, or + aren't able to automatically install ``pip`` into created environments. * the `Python Package Index `__ is a public repository of open source licensed packages made available for use by other Python users @@ -63,27 +67,33 @@ =========== The standard packaging tools are all designed to be used from the command -line. For Windows users, the examples below assume that the option to -adjust the system PATH environment variable was selected when installing -Python. For Linux users, the command to install into the system version of -Python 3 is likely to be ``pip3`` rather than ``pip``. +line. The following command will install the latest version of a module and its dependencies from the Python Package Index:: - pip install SomePackage + python -m pip install SomePackage + +.. note:: + + For POSIX users (including Mac OS X and Linux users), the examples in + this guide assume the use of a :term:`virtual environment`. + + For Windows users, the examples in this guide assume that the option to + adjust the system PATH environment variable was selected when installing + Python. It's also possible to specify an exact or minimum version directly on the command line:: - pip install SomePackage==1.0.4 # specific version - pip install 'SomePackage>=1.0.4' # minimum version + python -m pip install SomePackage==1.0.4 # specific version + python -m pip install 'SomePackage>=1.0.4' # minimum version Normally, if a suitable module is already installed, attempting to install it again will have no effect. Upgrading existing modules must be requested explicitly:: - pip install --upgrade SomePackage + python -m pip install --upgrade SomePackage More information and resources regarding ``pip`` and its capabilities can be found in the `Python Packaging User Guide `__. @@ -120,8 +130,8 @@ ... install packages just for the current user? ----------------------------------------------- -Passing the ``--user`` option to ``pip install`` will install a package -just for the current user, rather than for all users of the system. +Passing the ``--user`` option to ``python -m pip install`` will install a +package just for the current user, rather than for all users of the system. ... install scientific Python packages? -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 12:40:21 2014 From: python-checkins at python.org (nick.coghlan) Date: Sat, 6 Sep 2014 12:40:21 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_issue_=2322295_fix_from_3=2E4?= Message-ID: <3hqsj1144jz7NCX@mail.python.org> http://hg.python.org/cpython/rev/a969b42e6e2b changeset: 92364:a969b42e6e2b parent: 92362:258481642dd7 parent: 92363:e8447da8791d user: Nick Coghlan date: Sat Sep 06 20:40:00 2014 +1000 summary: Merge issue #22295 fix from 3.4 files: Doc/distributing/index.rst | 13 +++++++++- Doc/glossary.rst | 8 ++++++ Doc/installing/index.rst | 30 +++++++++++++++++-------- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/Doc/distributing/index.rst b/Doc/distributing/index.rst --- a/Doc/distributing/index.rst +++ b/Doc/distributing/index.rst @@ -93,9 +93,18 @@ versions of Python. The currently recommended build and distribution tools can be installed -using ``pip``:: +by invoking the ``pip`` module at the command line:: - pip install setuptools wheel twine + python -m pip install setuptools wheel twine + +.. note:: + + For POSIX users (including Mac OS X and Linux users), these instructions + assume the use of a :term:`virtual environment`. + + For Windows users, these instructions assume that the option to + adjust the system PATH environment variable was selected when installing + Python. The Python Packaging User Guide includes more details on the `currently recommended tools`_. diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -858,6 +858,14 @@ dictionary view to become a full list use ``list(dictview)``. See :ref:`dict-views`. + virtual environment + A cooperatively isolated runtime environment that allows Python users + and applications to install and upgrade Python distribution packages + without interfering with the behaviour of other Python applications + running on the same system. + + See also :ref:`scripts-pyvenv` + virtual machine A computer defined entirely in software. Python's virtual machine executes the :term:`bytecode` emitted by the bytecode compiler. diff --git a/Doc/installing/index.rst b/Doc/installing/index.rst --- a/Doc/installing/index.rst +++ b/Doc/installing/index.rst @@ -40,6 +40,10 @@ * ``pyvenv`` is the standard tool for creating virtual environments, and has been part of Python since Python 3.3. Starting with Python 3.4, it defaults to installing ``pip`` into all created virtual environments +* ``virtualenv`` is a third party alternative (and predecessor) to + ``pyvenv``. It allows virtual environments to be used on versions of + Python prior to 3.4, which either don't provide ``pyvenv`` at all, or + aren't able to automatically install ``pip`` into created environments. * the `Python Package Index `__ is a public repository of open source licensed packages made available for use by other Python users @@ -63,27 +67,33 @@ =========== The standard packaging tools are all designed to be used from the command -line. For Windows users, the examples below assume that the option to -adjust the system PATH environment variable was selected when installing -Python. For Linux users, the command to install into the system version of -Python 3 is likely to be ``pip3`` rather than ``pip``. +line. The following command will install the latest version of a module and its dependencies from the Python Package Index:: - pip install SomePackage + python -m pip install SomePackage + +.. note:: + + For POSIX users (including Mac OS X and Linux users), the examples in + this guide assume the use of a :term:`virtual environment`. + + For Windows users, the examples in this guide assume that the option to + adjust the system PATH environment variable was selected when installing + Python. It's also possible to specify an exact or minimum version directly on the command line:: - pip install SomePackage==1.0.4 # specific version - pip install 'SomePackage>=1.0.4' # minimum version + python -m pip install SomePackage==1.0.4 # specific version + python -m pip install 'SomePackage>=1.0.4' # minimum version Normally, if a suitable module is already installed, attempting to install it again will have no effect. Upgrading existing modules must be requested explicitly:: - pip install --upgrade SomePackage + python -m pip install --upgrade SomePackage More information and resources regarding ``pip`` and its capabilities can be found in the `Python Packaging User Guide `__. @@ -120,8 +130,8 @@ ... install packages just for the current user? ----------------------------------------------- -Passing the ``--user`` option to ``pip install`` will install a package -just for the current user, rather than for all users of the system. +Passing the ``--user`` option to ``python -m pip install`` will install a +package just for the current user, rather than for all users of the system. ... install scientific Python packages? -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 19:08:26 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 19:08:26 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322215=3A_Now_Valu?= =?utf-8?q?eError_is_raised_instead_of_TypeError_when_str_or_bytes?= Message-ID: <3hr2Jp0FrFz7LjV@mail.python.org> http://hg.python.org/cpython/rev/25032ec29315 changeset: 92365:25032ec29315 user: Serhiy Storchaka date: Sat Sep 06 20:07:17 2014 +0300 summary: Issue #22215: Now ValueError is raised instead of TypeError when str or bytes argument contains not permitted null character or byte. files: Lib/test/test_builtin.py | 4 ++-- Lib/test/test_fileio.py | 4 ++-- Lib/test/test_getargs2.py | 10 +++++----- Lib/test/test_io.py | 4 ++-- Lib/test/test_site.py | 2 +- Misc/NEWS | 3 +++ Modules/_io/fileio.c | 2 +- Modules/_tkinter.c | 4 ++-- Modules/posixmodule.c | 2 +- Modules/socketmodule.c | 2 +- Objects/bytesobject.c | 4 ++-- Objects/unicodeobject.c | 10 +++++----- Python/bltinmodule.c | 4 ++-- Python/getargs.c | 25 ++++++++++++------------- 14 files changed, 41 insertions(+), 39 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -312,11 +312,11 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print(42)\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print(42)\n', '', 'single', 0xff) - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(ValueError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') compile('print("\xe5")\n', '', 'exec') - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(ValueError, compile, chr(0), 'f', 'exec') self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad') # test the optimize argument diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py --- a/Lib/test/test_fileio.py +++ b/Lib/test/test_fileio.py @@ -361,8 +361,8 @@ def testConstructorHandlesNULChars(self): fn_with_NUL = 'foo\0bar' - self.assertRaises(TypeError, _FileIO, fn_with_NUL, 'w') - self.assertRaises(TypeError, _FileIO, bytes(fn_with_NUL, 'ascii'), 'w') + self.assertRaises(ValueError, _FileIO, fn_with_NUL, 'w') + self.assertRaises(ValueError, _FileIO, bytes(fn_with_NUL, 'ascii'), 'w') def testInvalidFd(self): self.assertRaises(ValueError, _FileIO, -10) diff --git a/Lib/test/test_getargs2.py b/Lib/test/test_getargs2.py --- a/Lib/test/test_getargs2.py +++ b/Lib/test/test_getargs2.py @@ -482,7 +482,7 @@ def test_s(self): from _testcapi import getargs_s self.assertEqual(getargs_s('abc\xe9'), b'abc\xc3\xa9') - self.assertRaises(TypeError, getargs_s, 'nul:\0') + self.assertRaises(ValueError, getargs_s, 'nul:\0') self.assertRaises(TypeError, getargs_s, b'bytes') self.assertRaises(TypeError, getargs_s, bytearray(b'bytearray')) self.assertRaises(TypeError, getargs_s, memoryview(b'memoryview')) @@ -509,7 +509,7 @@ def test_z(self): from _testcapi import getargs_z self.assertEqual(getargs_z('abc\xe9'), b'abc\xc3\xa9') - self.assertRaises(TypeError, getargs_z, 'nul:\0') + self.assertRaises(ValueError, getargs_z, 'nul:\0') self.assertRaises(TypeError, getargs_z, b'bytes') self.assertRaises(TypeError, getargs_z, bytearray(b'bytearray')) self.assertRaises(TypeError, getargs_z, memoryview(b'memoryview')) @@ -537,7 +537,7 @@ from _testcapi import getargs_y self.assertRaises(TypeError, getargs_y, 'abc\xe9') self.assertEqual(getargs_y(b'bytes'), b'bytes') - self.assertRaises(TypeError, getargs_y, b'nul:\0') + self.assertRaises(ValueError, getargs_y, b'nul:\0') self.assertRaises(TypeError, getargs_y, bytearray(b'bytearray')) self.assertRaises(TypeError, getargs_y, memoryview(b'memoryview')) self.assertRaises(TypeError, getargs_y, None) @@ -577,7 +577,7 @@ def test_u(self): from _testcapi import getargs_u self.assertEqual(getargs_u('abc\xe9'), 'abc\xe9') - self.assertRaises(TypeError, getargs_u, 'nul:\0') + self.assertRaises(ValueError, getargs_u, 'nul:\0') self.assertRaises(TypeError, getargs_u, b'bytes') self.assertRaises(TypeError, getargs_u, bytearray(b'bytearray')) self.assertRaises(TypeError, getargs_u, memoryview(b'memoryview')) @@ -595,7 +595,7 @@ def test_Z(self): from _testcapi import getargs_Z self.assertEqual(getargs_Z('abc\xe9'), 'abc\xe9') - self.assertRaises(TypeError, getargs_Z, 'nul:\0') + self.assertRaises(ValueError, getargs_Z, 'nul:\0') self.assertRaises(TypeError, getargs_Z, b'bytes') self.assertRaises(TypeError, getargs_Z, bytearray(b'bytearray')) self.assertRaises(TypeError, getargs_Z, memoryview(b'memoryview')) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -363,8 +363,8 @@ def test_open_handles_NUL_chars(self): fn_with_NUL = 'foo\0bar' - self.assertRaises(TypeError, self.open, fn_with_NUL, 'w') - self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w') + self.assertRaises(ValueError, self.open, fn_with_NUL, 'w') + self.assertRaises(ValueError, self.open, bytes(fn_with_NUL, 'ascii'), 'w') def test_raw_file_io(self): with self.open(support.TESTFN, "wb", buffering=0) as f: diff --git a/Lib/test/test_site.py b/Lib/test/test_site.py --- a/Lib/test/test_site.py +++ b/Lib/test/test_site.py @@ -147,7 +147,7 @@ re.escape(os.path.join(pth_dir, pth_fn))) # XXX: ditto previous XXX comment. self.assertRegex(err_out.getvalue(), 'Traceback') - self.assertRegex(err_out.getvalue(), 'TypeError') + self.assertRegex(err_out.getvalue(), 'ValueError') def test_addsitedir(self): # Same tests for test_addpackage since addsitedir() essentially just diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #22215: Now ValueError is raised instead of TypeError when str or bytes + argument contains not permitted null character or byte. + - Issue #22258: Fix the internal function set_inheritable() on Illumos. This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -256,7 +256,7 @@ int rv = _PyUnicode_HasNULChars(nameobj); if (rv) { if (rv != -1) - PyErr_SetString(PyExc_TypeError, "embedded NUL character"); + PyErr_SetString(PyExc_ValueError, "embedded null character"); return -1; } widename = PyUnicode_AsUnicode(nameobj); diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -1417,7 +1417,7 @@ } s = PyBytes_AsString(in); if (strlen(s) != (size_t)PyBytes_Size(in)) { - PyErr_SetString(PyExc_ValueError, "null byte in bytes object"); + PyErr_SetString(PyExc_ValueError, "embedded null byte"); return 0; } *out = s; @@ -1434,7 +1434,7 @@ return 0; } if (strlen(s) != (size_t)size) { - PyErr_SetString(PyExc_ValueError, "null character in string"); + PyErr_SetString(PyExc_ValueError, "embedded null character"); return 0; } *out = s; diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -903,7 +903,7 @@ narrow = PyBytes_AS_STRING(bytes); if ((size_t)length != strlen(narrow)) { - FORMAT_EXCEPTION(PyExc_ValueError, "embedded NUL character in %s"); + FORMAT_EXCEPTION(PyExc_ValueError, "embedded null character in %s"); Py_DECREF(bytes); return 0; } diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -1273,7 +1273,7 @@ } if (strlen(data->buf) != len) { Py_CLEAR(data->obj); - PyErr_SetString(PyExc_TypeError, "host name must not contain NUL character"); + PyErr_SetString(PyExc_TypeError, "host name must not contain null character"); return 0; } return Py_CLEANUP_SUPPORTED; diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -593,8 +593,8 @@ if (len != NULL) *len = PyBytes_GET_SIZE(obj); else if (strlen(*s) != (size_t)PyBytes_GET_SIZE(obj)) { - PyErr_SetString(PyExc_TypeError, - "expected bytes with no null"); + PyErr_SetString(PyExc_ValueError, + "embedded null byte"); return -1; } return 0; diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -3247,7 +3247,7 @@ wlen2 = wcslen(wstr); if (wlen2 != wlen) { PyMem_Free(wstr); - PyErr_SetString(PyExc_TypeError, "embedded null character"); + PyErr_SetString(PyExc_ValueError, "embedded null character"); return NULL; } @@ -3519,8 +3519,8 @@ if (locale_error_handler(errors, &surrogateescape) < 0) return NULL; - if (str[len] != '\0' || (size_t)len != strlen(str)) { - PyErr_SetString(PyExc_TypeError, "embedded null character"); + if (str[len] != '\0' || (size_t)len != strlen(str)) { + PyErr_SetString(PyExc_ValueError, "embedded null byte"); return NULL; } @@ -3697,7 +3697,7 @@ size = PyBytes_GET_SIZE(output); data = PyBytes_AS_STRING(output); if ((size_t)size != strlen(data)) { - PyErr_SetString(PyExc_TypeError, "embedded NUL character"); + PyErr_SetString(PyExc_ValueError, "embedded null byte"); Py_DECREF(output); return 0; } @@ -3741,7 +3741,7 @@ } if (findchar(PyUnicode_DATA(output), PyUnicode_KIND(output), PyUnicode_GET_LENGTH(output), 0, 1) >= 0) { - PyErr_SetString(PyExc_TypeError, "embedded NUL character"); + PyErr_SetString(PyExc_ValueError, "embedded null character"); Py_DECREF(output); return 0; } diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -745,8 +745,8 @@ return NULL; } - if (strlen(str) != (size_t)size) { - PyErr_SetString(PyExc_TypeError, + if (strlen(str) != (size_t)size) { + PyErr_SetString(PyExc_ValueError, "source code string cannot contain null bytes"); return NULL; } diff --git a/Python/getargs.c b/Python/getargs.c --- a/Python/getargs.c +++ b/Python/getargs.c @@ -872,10 +872,10 @@ STORE_SIZE(count); format++; } else { - if (strlen(*p) != (size_t)count) - return converterr( - "bytes without null bytes", - arg, msgbuf, bufsize); + if (strlen(*p) != (size_t)count) { + PyErr_SetString(PyExc_ValueError, "embedded null byte"); + RETURN_ERR_OCCURRED; + } } break; } @@ -948,16 +948,15 @@ if (sarg == NULL) return converterr(CONV_UNICODE, arg, msgbuf, bufsize); + if (strlen(sarg) != (size_t)len) { + PyErr_SetString(PyExc_ValueError, "embedded null character"); + RETURN_ERR_OCCURRED; + } *p = sarg; } else return converterr(c == 'z' ? "str or None" : "str", arg, msgbuf, bufsize); - if (*p != NULL && sarg != NULL && (Py_ssize_t) strlen(*p) != len) - return converterr( - c == 'z' ? "str without null characters or None" - : "str without null characters", - arg, msgbuf, bufsize); } break; } @@ -994,10 +993,10 @@ *p = PyUnicode_AsUnicodeAndSize(arg, &len); if (*p == NULL) RETURN_ERR_OCCURRED; - if (Py_UNICODE_strlen(*p) != (size_t)len) - return converterr( - "str without null characters or None", - arg, msgbuf, bufsize); + if (Py_UNICODE_strlen(*p) != (size_t)len) { + PyErr_SetString(PyExc_ValueError, "embedded null character"); + RETURN_ERR_OCCURRED; + } } else return converterr(c == 'Z' ? "str or None" : "str", arg, msgbuf, bufsize); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 20:46:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 20:46:06 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE5NTI0?= =?utf-8?q?=3A_Fixed_resource_leak_in_the_HTTP_connection_when_an_invalid?= Message-ID: <3hr4TV6nbgz7Ljp@mail.python.org> http://hg.python.org/cpython/rev/c1fb19907cc4 changeset: 92366:c1fb19907cc4 branch: 3.4 parent: 92363:e8447da8791d user: Serhiy Storchaka date: Sat Sep 06 21:41:39 2014 +0300 summary: Issue #19524: Fixed resource leak in the HTTP connection when an invalid response is received. Patch by Martin Panter. files: Lib/test/test_urllib.py | 73 ++++++++++++++------------- Lib/test/test_urllib2.py | 29 +++++++++++ Lib/urllib/request.py | 25 +++++---- Misc/ACKS | 1 + Misc/NEWS | 3 + 5 files changed, 86 insertions(+), 45 deletions(-) diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py --- a/Lib/test/test_urllib.py +++ b/Lib/test/test_urllib.py @@ -48,43 +48,48 @@ return opener.open(url, data) +def fakehttp(fakedata): + class FakeSocket(io.BytesIO): + io_refs = 1 + + def sendall(self, data): + FakeHTTPConnection.buf = data + + def makefile(self, *args, **kwds): + self.io_refs += 1 + return self + + def read(self, amt=None): + if self.closed: + return b"" + return io.BytesIO.read(self, amt) + + def readline(self, length=None): + if self.closed: + return b"" + return io.BytesIO.readline(self, length) + + def close(self): + self.io_refs -= 1 + if self.io_refs == 0: + io.BytesIO.close(self) + + class FakeHTTPConnection(http.client.HTTPConnection): + + # buffer to store data for verification in urlopen tests. + buf = None + fakesock = FakeSocket(fakedata) + + def connect(self): + self.sock = self.fakesock + + return FakeHTTPConnection + + class FakeHTTPMixin(object): def fakehttp(self, fakedata): - class FakeSocket(io.BytesIO): - io_refs = 1 - - def sendall(self, data): - FakeHTTPConnection.buf = data - - def makefile(self, *args, **kwds): - self.io_refs += 1 - return self - - def read(self, amt=None): - if self.closed: - return b"" - return io.BytesIO.read(self, amt) - - def readline(self, length=None): - if self.closed: - return b"" - return io.BytesIO.readline(self, length) - - def close(self): - self.io_refs -= 1 - if self.io_refs == 0: - io.BytesIO.close(self) - - class FakeHTTPConnection(http.client.HTTPConnection): - - # buffer to store data for verification in urlopen tests. - buf = None - - def connect(self): - self.sock = FakeSocket(fakedata) - self._connection_class = http.client.HTTPConnection - http.client.HTTPConnection = FakeHTTPConnection + http.client.HTTPConnection = fakehttp(fakedata) def unfakehttp(self): http.client.HTTPConnection = self._connection_class diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py --- a/Lib/test/test_urllib2.py +++ b/Lib/test/test_urllib2.py @@ -1,5 +1,6 @@ import unittest from test import support +from test import test_urllib import os import io @@ -13,6 +14,7 @@ from urllib.request import Request, OpenerDirector, _parse_proxy, _proxy_bypass_macosx_sysconf from urllib.parse import urlparse import urllib.error +import http.client # XXX # Request @@ -1393,6 +1395,33 @@ self.assertEqual(len(http_handler.requests), 1) self.assertFalse(http_handler.requests[0].has_header(auth_header)) + def test_http_closed(self): + """Test the connection is cleaned up when the response is closed""" + for (transfer, data) in ( + ("Connection: close", b"data"), + ("Transfer-Encoding: chunked", b"4\r\ndata\r\n0\r\n\r\n"), + ("Content-Length: 4", b"data"), + ): + header = "HTTP/1.1 200 OK\r\n{}\r\n\r\n".format(transfer) + conn = test_urllib.fakehttp(header.encode() + data) + handler = urllib.request.AbstractHTTPHandler() + req = Request("http://dummy/") + req.timeout = None + with handler.do_open(conn, req) as resp: + resp.read() + self.assertTrue(conn.fakesock.closed, + "Connection not closed with {!r}".format(transfer)) + + def test_invalid_closed(self): + """Test the connection is cleaned up after an invalid response""" + conn = test_urllib.fakehttp(b"") + handler = urllib.request.AbstractHTTPHandler() + req = Request("http://dummy/") + req.timeout = None + with self.assertRaises(http.client.BadStatusLine): + handler.do_open(conn, req) + self.assertTrue(conn.fakesock.closed, "Connection not closed") + class MiscTests(unittest.TestCase): diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py --- a/Lib/urllib/request.py +++ b/Lib/urllib/request.py @@ -1170,18 +1170,21 @@ h.set_tunnel(req._tunnel_host, headers=tunnel_headers) try: - h.request(req.get_method(), req.selector, req.data, headers) - except OSError as err: # timeout error + try: + h.request(req.get_method(), req.selector, req.data, headers) + except OSError as err: # timeout error + raise URLError(err) + r = h.getresponse() + except: h.close() - raise URLError(err) - else: - r = h.getresponse() - # If the server does not send us a 'Connection: close' header, - # HTTPConnection assumes the socket should be left open. Manually - # mark the socket to be closed when this response object goes away. - if h.sock: - h.sock.close() - h.sock = None + raise + + # If the server does not send us a 'Connection: close' header, + # HTTPConnection assumes the socket should be left open. Manually + # mark the socket to be closed when this response object goes away. + if h.sock: + h.sock.close() + h.sock = None r.url = req.get_full_url() # This line replaces the .msg attribute of the HTTPResponse diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1003,6 +1003,7 @@ Todd R. Palmer Juan David Ib??ez Palomar Jan Palus +Martin Panter Mathias Panzenb?ck M. Papillon Peter Parente diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #19524: Fixed resource leak in the HTTP connection when an invalid + response is received. Patch by Martin Panter. + - Issue #22051: turtledemo no longer reloads examples to re-run them. Initialization of variables and gui setup should be done in main(), which is called each time a demo is run, but not on import. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 20:46:08 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 20:46:08 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2319524=3A_Fixed_resource_leak_in_the_HTTP_connec?= =?utf-8?q?tion_when_an_invalid?= Message-ID: <3hr4TX2hCgz7LkN@mail.python.org> http://hg.python.org/cpython/rev/43bf95480c3c changeset: 92367:43bf95480c3c parent: 92365:25032ec29315 parent: 92366:c1fb19907cc4 user: Serhiy Storchaka date: Sat Sep 06 21:43:49 2014 +0300 summary: Issue #19524: Fixed resource leak in the HTTP connection when an invalid response is received. Patch by Martin Panter. files: Lib/test/test_urllib.py | 73 ++++++++++++++------------- Lib/test/test_urllib2.py | 29 +++++++++++ Lib/urllib/request.py | 25 +++++---- Misc/ACKS | 1 + Misc/NEWS | 3 + 5 files changed, 86 insertions(+), 45 deletions(-) diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py --- a/Lib/test/test_urllib.py +++ b/Lib/test/test_urllib.py @@ -48,43 +48,48 @@ return opener.open(url, data) +def fakehttp(fakedata): + class FakeSocket(io.BytesIO): + io_refs = 1 + + def sendall(self, data): + FakeHTTPConnection.buf = data + + def makefile(self, *args, **kwds): + self.io_refs += 1 + return self + + def read(self, amt=None): + if self.closed: + return b"" + return io.BytesIO.read(self, amt) + + def readline(self, length=None): + if self.closed: + return b"" + return io.BytesIO.readline(self, length) + + def close(self): + self.io_refs -= 1 + if self.io_refs == 0: + io.BytesIO.close(self) + + class FakeHTTPConnection(http.client.HTTPConnection): + + # buffer to store data for verification in urlopen tests. + buf = None + fakesock = FakeSocket(fakedata) + + def connect(self): + self.sock = self.fakesock + + return FakeHTTPConnection + + class FakeHTTPMixin(object): def fakehttp(self, fakedata): - class FakeSocket(io.BytesIO): - io_refs = 1 - - def sendall(self, data): - FakeHTTPConnection.buf = data - - def makefile(self, *args, **kwds): - self.io_refs += 1 - return self - - def read(self, amt=None): - if self.closed: - return b"" - return io.BytesIO.read(self, amt) - - def readline(self, length=None): - if self.closed: - return b"" - return io.BytesIO.readline(self, length) - - def close(self): - self.io_refs -= 1 - if self.io_refs == 0: - io.BytesIO.close(self) - - class FakeHTTPConnection(http.client.HTTPConnection): - - # buffer to store data for verification in urlopen tests. - buf = None - - def connect(self): - self.sock = FakeSocket(fakedata) - self._connection_class = http.client.HTTPConnection - http.client.HTTPConnection = FakeHTTPConnection + http.client.HTTPConnection = fakehttp(fakedata) def unfakehttp(self): http.client.HTTPConnection = self._connection_class diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py --- a/Lib/test/test_urllib2.py +++ b/Lib/test/test_urllib2.py @@ -1,5 +1,6 @@ import unittest from test import support +from test import test_urllib import os import io @@ -13,6 +14,7 @@ from urllib.request import Request, OpenerDirector, _parse_proxy, _proxy_bypass_macosx_sysconf from urllib.parse import urlparse import urllib.error +import http.client # XXX # Request @@ -1393,6 +1395,33 @@ self.assertEqual(len(http_handler.requests), 1) self.assertFalse(http_handler.requests[0].has_header(auth_header)) + def test_http_closed(self): + """Test the connection is cleaned up when the response is closed""" + for (transfer, data) in ( + ("Connection: close", b"data"), + ("Transfer-Encoding: chunked", b"4\r\ndata\r\n0\r\n\r\n"), + ("Content-Length: 4", b"data"), + ): + header = "HTTP/1.1 200 OK\r\n{}\r\n\r\n".format(transfer) + conn = test_urllib.fakehttp(header.encode() + data) + handler = urllib.request.AbstractHTTPHandler() + req = Request("http://dummy/") + req.timeout = None + with handler.do_open(conn, req) as resp: + resp.read() + self.assertTrue(conn.fakesock.closed, + "Connection not closed with {!r}".format(transfer)) + + def test_invalid_closed(self): + """Test the connection is cleaned up after an invalid response""" + conn = test_urllib.fakehttp(b"") + handler = urllib.request.AbstractHTTPHandler() + req = Request("http://dummy/") + req.timeout = None + with self.assertRaises(http.client.BadStatusLine): + handler.do_open(conn, req) + self.assertTrue(conn.fakesock.closed, "Connection not closed") + class MiscTests(unittest.TestCase): diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py --- a/Lib/urllib/request.py +++ b/Lib/urllib/request.py @@ -1170,18 +1170,21 @@ h.set_tunnel(req._tunnel_host, headers=tunnel_headers) try: - h.request(req.get_method(), req.selector, req.data, headers) - except OSError as err: # timeout error + try: + h.request(req.get_method(), req.selector, req.data, headers) + except OSError as err: # timeout error + raise URLError(err) + r = h.getresponse() + except: h.close() - raise URLError(err) - else: - r = h.getresponse() - # If the server does not send us a 'Connection: close' header, - # HTTPConnection assumes the socket should be left open. Manually - # mark the socket to be closed when this response object goes away. - if h.sock: - h.sock.close() - h.sock = None + raise + + # If the server does not send us a 'Connection: close' header, + # HTTPConnection assumes the socket should be left open. Manually + # mark the socket to be closed when this response object goes away. + if h.sock: + h.sock.close() + h.sock = None r.url = req.get_full_url() # This line replaces the .msg attribute of the HTTPResponse diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1012,6 +1012,7 @@ Todd R. Palmer Juan David Ib??ez Palomar Jan Palus +Martin Panter Mathias Panzenb?ck M. Papillon Peter Parente diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #19524: Fixed resource leak in the HTTP connection when an invalid + response is received. Patch by Martin Panter. + - Issue #20421: Add a .version() method to SSL sockets exposing the actual protocol version in use. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:21:26 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:21:26 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322131=3A_Moderniz?= =?utf-8?q?ed_the_code_of_the_uuid_module=2E?= Message-ID: <3hr5GG3zyJz7Ljj@mail.python.org> http://hg.python.org/cpython/rev/f7b5038d3102 changeset: 92368:f7b5038d3102 user: Serhiy Storchaka date: Sat Sep 06 22:14:04 2014 +0300 summary: Issue #22131: Modernized the code of the uuid module. Optimized bytes and bytes_le properties of UUID and UUID constructor with bytes_le argument. Fixed a bug in handling an error occured during reading from a pipe in _ipconfig_getnode(). files: Lib/uuid.py | 33 ++++++++++++--------------------- 1 files changed, 12 insertions(+), 21 deletions(-) diff --git a/Lib/uuid.py b/Lib/uuid.py --- a/Lib/uuid.py +++ b/Lib/uuid.py @@ -139,10 +139,8 @@ if bytes_le is not None: if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') - bytes = (bytes_(reversed(bytes_le[0:4])) + - bytes_(reversed(bytes_le[4:6])) + - bytes_(reversed(bytes_le[6:8])) + - bytes_le[8:]) + bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + + bytes_le[8-1:6-1:-1] + bytes_le[8:]) if bytes is not None: if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') @@ -234,17 +232,12 @@ @property def bytes(self): - bytes = bytearray() - for shift in range(0, 128, 8): - bytes.insert(0, (self.int >> shift) & 0xff) - return bytes_(bytes) + return self.int.to_bytes(16, 'big') @property def bytes_le(self): bytes = self.bytes - return (bytes_(reversed(bytes[0:4])) + - bytes_(reversed(bytes[4:6])) + - bytes_(reversed(bytes[6:8])) + + return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + bytes[8:]) @property @@ -383,13 +376,11 @@ pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all') except OSError: continue - else: + with pipe: for line in pipe: value = line.split(':')[-1].strip().lower() if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value): return int(value.replace('-', ''), 16) - finally: - pipe.close() def _netbios_getnode(): """Get the hardware address on Windows using NetBIOS calls. @@ -416,9 +407,10 @@ if win32wnet.Netbios(ncb) != 0: continue status._unpack() - bytes = status.adapter_address - return ((bytes[0]<<40) + (bytes[1]<<32) + (bytes[2]<<24) + - (bytes[3]<<16) + (bytes[4]<<8) + bytes[5]) + bytes = status.adapter_address[:6] + if len(bytes) != 6: + continue + return int.from_bytes(bytes, 'big') # Thanks to Thomas Heller for ctypes and for his help with its use here. @@ -487,7 +479,7 @@ def _random_getnode(): """Get a random node ID, with eighth bit set as suggested by RFC 4122.""" import random - return random.randrange(0, 1<<48) | 0x010000000000 + return random.getrandbits(48) | 0x010000000000 _node = None @@ -544,7 +536,7 @@ _last_timestamp = timestamp if clock_seq is None: import random - clock_seq = random.randrange(1<<14) # instead of stable storage + clock_seq = random.getrandbits(14) # instead of stable storage time_low = timestamp & 0xffffffff time_mid = (timestamp >> 32) & 0xffff time_hi_version = (timestamp >> 48) & 0x0fff @@ -576,8 +568,7 @@ return UUID(bytes=os.urandom(16), version=4) except: import random - bytes = bytes_(random.randrange(256) for i in range(16)) - return UUID(bytes=bytes, version=4) + return UUID(int=random.getrandbits(128), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:21:27 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:21:27 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMTMx?= =?utf-8?q?=3A_Fixed_a_bug_in_handling_an_error_occured_during_reading_fro?= =?utf-8?q?m?= Message-ID: <3hr5GH5WMCz7Lk8@mail.python.org> http://hg.python.org/cpython/rev/d8c6b15a2ae3 changeset: 92369:d8c6b15a2ae3 branch: 2.7 parent: 92357:322f77ee6d5a user: Serhiy Storchaka date: Sat Sep 06 22:17:06 2014 +0300 summary: Issue #22131: Fixed a bug in handling an error occured during reading from a pipe in _ipconfig_getnode(). files: Lib/uuid.py | 4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diff --git a/Lib/uuid.py b/Lib/uuid.py --- a/Lib/uuid.py +++ b/Lib/uuid.py @@ -366,13 +366,11 @@ pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all') except IOError: continue - else: + with pipe: for line in pipe: value = line.split(':')[-1].strip().lower() if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value): return int(value.replace('-', ''), 16) - finally: - pipe.close() def _netbios_getnode(): """Get the hardware address on Windows using NetBIOS calls. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:21:28 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:21:28 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMTMx?= =?utf-8?q?=3A_Fixed_a_bug_in_handling_an_error_occured_during_reading_fro?= =?utf-8?q?m?= Message-ID: <3hr5GJ6w5Zz7LkT@mail.python.org> http://hg.python.org/cpython/rev/8a61a287776d changeset: 92370:8a61a287776d branch: 3.4 parent: 92366:c1fb19907cc4 user: Serhiy Storchaka date: Sat Sep 06 22:17:24 2014 +0300 summary: Issue #22131: Fixed a bug in handling an error occured during reading from a pipe in _ipconfig_getnode(). files: Lib/uuid.py | 4 +--- 1 files changed, 1 insertions(+), 3 deletions(-) diff --git a/Lib/uuid.py b/Lib/uuid.py --- a/Lib/uuid.py +++ b/Lib/uuid.py @@ -383,13 +383,11 @@ pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all') except OSError: continue - else: + with pipe: for line in pipe: value = line.split(':')[-1].strip().lower() if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value): return int(value.replace('-', ''), 16) - finally: - pipe.close() def _netbios_getnode(): """Get the hardware address on Windows using NetBIOS calls. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:21:30 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:21:30 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <3hr5GL1ffjz7LkR@mail.python.org> http://hg.python.org/cpython/rev/1f365cea77b8 changeset: 92371:1f365cea77b8 parent: 92368:f7b5038d3102 parent: 92370:8a61a287776d user: Serhiy Storchaka date: Sat Sep 06 22:18:35 2014 +0300 summary: Null merge files: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:52:09 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:52:09 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMjI2?= =?utf-8?q?=3A_Added_private_function_=5Fsplitdict=28=29_in_the_Tkinter_mo?= =?utf-8?q?dule=2E?= Message-ID: <3hr5xj6BzVz7LjY@mail.python.org> http://hg.python.org/cpython/rev/7b0fdc1e917a changeset: 92372:7b0fdc1e917a branch: 2.7 parent: 92369:d8c6b15a2ae3 user: Serhiy Storchaka date: Sat Sep 06 22:47:02 2014 +0300 summary: Issue #22226: Added private function _splitdict() in the Tkinter module. First letter no longer is stripped from the "status" key in the result of Treeview.heading(). files: Lib/lib-tk/Tkinter.py | 78 +++++----- Lib/lib-tk/test/test_ttk/test_functions.py | 27 +-- Lib/lib-tk/ttk.py | 62 +++---- Lib/test/test_tcl.py | 39 +++++ Misc/NEWS | 3 + 5 files changed, 116 insertions(+), 93 deletions(-) diff --git a/Lib/lib-tk/Tkinter.py b/Lib/lib-tk/Tkinter.py --- a/Lib/lib-tk/Tkinter.py +++ b/Lib/lib-tk/Tkinter.py @@ -123,6 +123,29 @@ try: _cnfmerge = _tkinter._cnfmerge except AttributeError: pass +def _splitdict(tk, v, cut_minus=True, conv=None): + """Return a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + """ + t = tk.splitlist(v) + if len(t) % 2: + raise RuntimeError('Tcl list representing a dict is expected ' + 'to contain an even number of elements') + it = iter(t) + dict = {} + for key, value in zip(it, it): + key = str(key) + if cut_minus and key[0] == '-': + key = key[1:] + if conv: + value = conv(value) + dict[key] = value + return dict + class Event: """Container for the properties of an event. @@ -1390,15 +1413,10 @@ else: options = self._options(cnf, kw) if not options: - res = self.tk.call('grid', - command, self._w, index) - words = self.tk.splitlist(res) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - dict[key] = self._gridconvvalue(value) - return dict + return _splitdict( + self.tk, + self.tk.call('grid', command, self._w, index), + conv=self._gridconvvalue) res = self.tk.call( ('grid', command, self._w, index) + options) @@ -1921,16 +1939,10 @@ def pack_info(self): """Return information about the packing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('pack', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('pack', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = pack_info propagate = pack_propagate = Misc.pack_propagate slaves = pack_slaves = Misc.pack_slaves @@ -1972,16 +1984,10 @@ def place_info(self): """Return information about the placing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('place', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('place', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = place_info slaves = place_slaves = Misc.place_slaves @@ -2021,16 +2027,10 @@ def grid_info(self): """Return information about the options for positioning this widget in a grid.""" - words = self.tk.splitlist( - self.tk.call('grid', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('grid', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = grid_info location = grid_location = Misc.grid_location propagate = grid_propagate = Misc.grid_propagate diff --git a/Lib/lib-tk/test/test_ttk/test_functions.py b/Lib/lib-tk/test/test_ttk/test_functions.py --- a/Lib/lib-tk/test/test_ttk/test_functions.py +++ b/Lib/lib-tk/test/test_ttk/test_functions.py @@ -324,26 +324,13 @@ "-opt {3 2m}") - def test_dict_from_tcltuple(self): - fakettuple = ('-a', '{1 2 3}', '-something', 'foo') - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple, False), - {'-a': '{1 2 3}', '-something': 'foo'}) - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple), - {'a': '{1 2 3}', 'something': 'foo'}) - - # passing a tuple with a single item should return an empty dict, - # since it tries to break the tuple by pairs. - self.assertFalse(ttk._dict_from_tcltuple(('single', ))) - - sspec = MockStateSpec('a', 'b') - self.assertEqual(ttk._dict_from_tcltuple(('-a', (sspec, 'val'))), - {'a': [('a', 'b', 'val')]}) - - self.assertEqual(ttk._dict_from_tcltuple((MockTclObj('-padding'), - [MockTclObj('1'), 2, MockTclObj('3m')])), - {'padding': [1, 2, '3m']}) + def test_tclobj_to_py(self): + self.assertEqual( + ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')), + [('a', 'b', 'val')]) + self.assertEqual( + ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]), + [1, 2, '3m']) def test_list_from_statespec(self): diff --git a/Lib/lib-tk/ttk.py b/Lib/lib-tk/ttk.py --- a/Lib/lib-tk/ttk.py +++ b/Lib/lib-tk/ttk.py @@ -26,7 +26,7 @@ "tclobjs_to_py", "setup_master"] import Tkinter -from Tkinter import _flatten, _join, _stringify +from Tkinter import _flatten, _join, _stringify, _splitdict # Verify if Tk is new enough to not need the Tile package _REQUIRE_TILE = True if Tkinter.TkVersion < 8.5 else False @@ -242,21 +242,6 @@ return '\n'.join(script) -def _dict_from_tcltuple(ttuple, cut_minus=True): - """Break tuple in pairs, format it properly, then build the return - dict. If cut_minus is True, the supposed '-' prefixing options will - be removed. - - ttuple is expected to contain an even number of elements.""" - opt_start = 1 if cut_minus else 0 - - retdict = {} - it = iter(ttuple) - for opt, val in zip(it, it): - retdict[str(opt)[opt_start:]] = val - - return tclobjs_to_py(retdict) - def _list_from_statespec(stuple): """Construct a list from the given statespec tuple according to the accepted statespec accepted by _format_mapdict.""" @@ -316,7 +301,7 @@ if len(options) % 2: # option specified without a value, return its value return res - return _dict_from_tcltuple(tk.splitlist(res)) + return _splitdict(tk, res, conv=_tclobj_to_py) def _convert_stringval(value): """Converts a value to, hopefully, a more appropriate Python object.""" @@ -336,20 +321,24 @@ x = int(x) return x +def _tclobj_to_py(val): + """Return value converted from Tcl object to Python object.""" + if val and hasattr(val, '__len__') and not isinstance(val, basestring): + if getattr(val[0], 'typename', None) == 'StateSpec': + val = _list_from_statespec(val) + else: + val = map(_convert_stringval, val) + + elif hasattr(val, 'typename'): # some other (single) Tcl object + val = _convert_stringval(val) + + return val + def tclobjs_to_py(adict): """Returns adict with its values converted from Tcl objects to Python objects.""" - for opt, val in adict.iteritems(): - if val and hasattr(val, '__len__') and not isinstance(val, basestring): - if getattr(val[0], 'typename', None) == 'StateSpec': - val = _list_from_statespec(val) - else: - val = map(_convert_stringval, val) - - elif hasattr(val, 'typename'): # some other (single) Tcl object - val = _convert_stringval(val) - - adict[opt] = val + for opt, val in adict.items(): + adict[opt] = _tclobj_to_py(val) return adict @@ -409,8 +398,10 @@ return _list_from_statespec(self.tk.splitlist( self.tk.call(self._name, "map", style, '-%s' % query_opt))) - return _dict_from_tcltuple(self.tk.splitlist( - self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))) + return _splitdict( + self.tk, + self.tk.call(self._name, "map", style, *_format_mapdict(kw)), + conv=_tclobj_to_py) def lookup(self, style, option, state=None, default=None): @@ -1427,13 +1418,16 @@ def set(self, item, column=None, value=None): - """With one argument, returns a dictionary of column/value pairs - for the specified item. With two arguments, returns the current - value of the specified column. With three arguments, sets the + """Query or set the value of given item. + + With one argument, return a dictionary of column/value pairs + for the specified item. With two arguments, return the current + value of the specified column. With three arguments, set the value of given column in given item to the specified value.""" res = self.tk.call(self._w, "set", item, column, value) if column is None and value is None: - return _dict_from_tcltuple(self.tk.splitlist(res), False) + return _splitdict(self.tk, res, + cut_minus=False, conv=_tclobj_to_py) else: return res diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -7,6 +7,9 @@ # Skip this test if the _tkinter module wasn't built. _tkinter = test_support.import_module('_tkinter') +# Make sure tkinter._fix runs to set up the environment +tkinter = test_support.import_fresh_module('Tkinter') + from Tkinter import Tcl from _tkinter import TclError @@ -565,6 +568,42 @@ for arg, res in testcases: self.assertEqual(split(arg), res) + def test_splitdict(self): + splitdict = tkinter._splitdict + tcl = self.interp.tk + + arg = '-a {1 2 3} -something foo status {}' + self.assertEqual(splitdict(tcl, arg, False), + {'-a': '1 2 3', '-something': 'foo', 'status': ''}) + self.assertEqual(splitdict(tcl, arg), + {'a': '1 2 3', 'something': 'foo', 'status': ''}) + + arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}') + self.assertEqual(splitdict(tcl, arg, False), + {'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'}) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3), 'something': 'foo', 'status': '{}'}) + + self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ') + self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c')) + + arg = tcl.call('list', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3) if self.wantobjects else '1 2 3', + 'something': 'foo', 'status': ''}) + + if tcl_version >= (8, 5): + arg = tcl.call('dict', 'create', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5): + # Before 8.5.5 dicts were converted to lists through string + expected = {'a': '1 2 3', 'something': 'foo', 'status': ''} + else: + expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''} + self.assertEqual(splitdict(tcl, arg), expected) + + character_size = 4 if sys.maxunicode > 0xFFFF else 2 class BigmemTclTest(unittest.TestCase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #22226: First letter no longer is stripped from the "status" key in + the result of Treeview.heading(). + - Issue #22051: turtledemo no longer reloads examples to re-run them. Initialization of variables and gui setup should be done in main(), which is called each time a demo is run, but not on import. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:52:11 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:52:11 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjI2?= =?utf-8?q?=3A_Added_private_function_=5Fsplitdict=28=29_in_the_Tkinter_mo?= =?utf-8?q?dule=2E?= Message-ID: <3hr5xl2DQqz7Lk0@mail.python.org> http://hg.python.org/cpython/rev/f89995a4ec11 changeset: 92373:f89995a4ec11 branch: 3.4 parent: 92370:8a61a287776d user: Serhiy Storchaka date: Sat Sep 06 22:47:58 2014 +0300 summary: Issue #22226: Added private function _splitdict() in the Tkinter module. First letter no longer is stripped from the "status" key in the result of Treeview.heading(). files: Lib/test/test_tcl.py | 37 ++++- Lib/tkinter/__init__.py | 78 +++++----- Lib/tkinter/test/test_ttk/test_functions.py | 27 +-- Lib/tkinter/ttk.py | 60 +++---- Misc/NEWS | 3 + 5 files changed, 112 insertions(+), 93 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -7,7 +7,7 @@ _tkinter = support.import_module('_tkinter') # Make sure tkinter._fix runs to set up the environment -support.import_fresh_module('tkinter') +tkinter = support.import_fresh_module('tkinter') from tkinter import Tcl from _tkinter import TclError @@ -554,6 +554,41 @@ for arg, res in testcases: self.assertEqual(split(arg), res, msg=arg) + def test_splitdict(self): + splitdict = tkinter._splitdict + tcl = self.interp.tk + + arg = '-a {1 2 3} -something foo status {}' + self.assertEqual(splitdict(tcl, arg, False), + {'-a': '1 2 3', '-something': 'foo', 'status': ''}) + self.assertEqual(splitdict(tcl, arg), + {'a': '1 2 3', 'something': 'foo', 'status': ''}) + + arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}') + self.assertEqual(splitdict(tcl, arg, False), + {'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'}) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3), 'something': 'foo', 'status': '{}'}) + + self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ') + self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c')) + + arg = tcl.call('list', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3) if self.wantobjects else '1 2 3', + 'something': 'foo', 'status': ''}) + + if tcl_version >= (8, 5): + arg = tcl.call('dict', 'create', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5): + # Before 8.5.5 dicts were converted to lists through string + expected = {'a': '1 2 3', 'something': 'foo', 'status': ''} + else: + expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''} + self.assertEqual(splitdict(tcl, arg), expected) + class BigmemTclTest(unittest.TestCase): diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -112,6 +112,29 @@ try: _cnfmerge = _tkinter._cnfmerge except AttributeError: pass +def _splitdict(tk, v, cut_minus=True, conv=None): + """Return a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + """ + t = tk.splitlist(v) + if len(t) % 2: + raise RuntimeError('Tcl list representing a dict is expected ' + 'to contain an even number of elements') + it = iter(t) + dict = {} + for key, value in zip(it, it): + key = str(key) + if cut_minus and key[0] == '-': + key = key[1:] + if conv: + value = conv(value) + dict[key] = value + return dict + class Event: """Container for the properties of an event. @@ -1391,15 +1414,10 @@ else: options = self._options(cnf, kw) if not options: - res = self.tk.call('grid', - command, self._w, index) - words = self.tk.splitlist(res) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - dict[key] = self._gridconvvalue(value) - return dict + return _splitdict( + self.tk, + self.tk.call('grid', command, self._w, index), + conv=self._gridconvvalue) res = self.tk.call( ('grid', command, self._w, index) + options) @@ -1959,16 +1977,10 @@ def pack_info(self): """Return information about the packing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('pack', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('pack', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = pack_info propagate = pack_propagate = Misc.pack_propagate slaves = pack_slaves = Misc.pack_slaves @@ -2010,16 +2022,10 @@ def place_info(self): """Return information about the placing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('place', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('place', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = place_info slaves = place_slaves = Misc.place_slaves @@ -2059,16 +2065,10 @@ def grid_info(self): """Return information about the options for positioning this widget in a grid.""" - words = self.tk.splitlist( - self.tk.call('grid', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('grid', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = grid_info location = grid_location = Misc.grid_location propagate = grid_propagate = Misc.grid_propagate diff --git a/Lib/tkinter/test/test_ttk/test_functions.py b/Lib/tkinter/test/test_ttk/test_functions.py --- a/Lib/tkinter/test/test_ttk/test_functions.py +++ b/Lib/tkinter/test/test_ttk/test_functions.py @@ -324,26 +324,13 @@ "-opt {3 2m}") - def test_dict_from_tcltuple(self): - fakettuple = ('-a', '{1 2 3}', '-something', 'foo') - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple, False), - {'-a': '{1 2 3}', '-something': 'foo'}) - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple), - {'a': '{1 2 3}', 'something': 'foo'}) - - # passing a tuple with a single item should return an empty dict, - # since it tries to break the tuple by pairs. - self.assertFalse(ttk._dict_from_tcltuple(('single', ))) - - sspec = MockStateSpec('a', 'b') - self.assertEqual(ttk._dict_from_tcltuple(('-a', (sspec, 'val'))), - {'a': [('a', 'b', 'val')]}) - - self.assertEqual(ttk._dict_from_tcltuple((MockTclObj('-padding'), - [MockTclObj('1'), 2, MockTclObj('3m')])), - {'padding': [1, 2, '3m']}) + def test_tclobj_to_py(self): + self.assertEqual( + ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')), + [('a', 'b', 'val')]) + self.assertEqual( + ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]), + [1, 2, '3m']) def test_list_from_statespec(self): diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -26,7 +26,7 @@ "tclobjs_to_py", "setup_master"] import tkinter -from tkinter import _flatten, _join, _stringify +from tkinter import _flatten, _join, _stringify, _splitdict # Verify if Tk is new enough to not need the Tile package _REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False @@ -240,21 +240,6 @@ return '\n'.join(script) -def _dict_from_tcltuple(ttuple, cut_minus=True): - """Break tuple in pairs, format it properly, then build the return - dict. If cut_minus is True, the supposed '-' prefixing options will - be removed. - - ttuple is expected to contain an even number of elements.""" - opt_start = 1 if cut_minus else 0 - - retdict = {} - it = iter(ttuple) - for opt, val in zip(it, it): - retdict[str(opt)[opt_start:]] = val - - return tclobjs_to_py(retdict) - def _list_from_statespec(stuple): """Construct a list from the given statespec tuple according to the accepted statespec accepted by _format_mapdict.""" @@ -314,7 +299,7 @@ if len(options) % 2: # option specified without a value, return its value return res - return _dict_from_tcltuple(tk.splitlist(res)) + return _splitdict(tk, res, conv=_tclobj_to_py) def _convert_stringval(value): """Converts a value to, hopefully, a more appropriate Python object.""" @@ -334,20 +319,24 @@ x = int(x) return x +def _tclobj_to_py(val): + """Return value converted from Tcl object to Python object.""" + if val and hasattr(val, '__len__') and not isinstance(val, str): + if getattr(val[0], 'typename', None) == 'StateSpec': + val = _list_from_statespec(val) + else: + val = list(map(_convert_stringval, val)) + + elif hasattr(val, 'typename'): # some other (single) Tcl object + val = _convert_stringval(val) + + return val + def tclobjs_to_py(adict): """Returns adict with its values converted from Tcl objects to Python objects.""" for opt, val in adict.items(): - if val and hasattr(val, '__len__') and not isinstance(val, str): - if getattr(val[0], 'typename', None) == 'StateSpec': - val = _list_from_statespec(val) - else: - val = list(map(_convert_stringval, val)) - - elif hasattr(val, 'typename'): # some other (single) Tcl object - val = _convert_stringval(val) - - adict[opt] = val + adict[opt] = _tclobj_to_py(val) return adict @@ -407,8 +396,10 @@ return _list_from_statespec(self.tk.splitlist( self.tk.call(self._name, "map", style, '-%s' % query_opt))) - return _dict_from_tcltuple(self.tk.splitlist( - self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))) + return _splitdict( + self.tk, + self.tk.call(self._name, "map", style, *_format_mapdict(kw)), + conv=_tclobj_to_py) def lookup(self, style, option, state=None, default=None): @@ -1425,13 +1416,16 @@ def set(self, item, column=None, value=None): - """With one argument, returns a dictionary of column/value pairs - for the specified item. With two arguments, returns the current - value of the specified column. With three arguments, sets the + """Query or set the value of given item. + + With one argument, return a dictionary of column/value pairs + for the specified item. With two arguments, return the current + value of the specified column. With three arguments, set the value of given column in given item to the specified value.""" res = self.tk.call(self._w, "set", item, column, value) if column is None and value is None: - return _dict_from_tcltuple(self.tk.splitlist(res), False) + return _splitdict(self.tk, res, + cut_minus=False, conv=_tclobj_to_py) else: return res diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #22226: First letter no longer is stripped from the "status" key in + the result of Treeview.heading(). + - Issue #19524: Fixed resource leak in the HTTP connection when an invalid response is received. Patch by Martin Panter. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 21:52:12 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 6 Sep 2014 21:52:12 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322226=3A_Added_private_function_=5Fsplitdict=28?= =?utf-8?q?=29_in_the_Tkinter_module=2E?= Message-ID: <3hr5xm5MG6z7LkY@mail.python.org> http://hg.python.org/cpython/rev/11cf18ec1900 changeset: 92374:11cf18ec1900 parent: 92371:1f365cea77b8 parent: 92373:f89995a4ec11 user: Serhiy Storchaka date: Sat Sep 06 22:49:07 2014 +0300 summary: Issue #22226: Added private function _splitdict() in the Tkinter module. First letter no longer is stripped from the "status" key in the result of Treeview.heading(). files: Lib/test/test_tcl.py | 37 ++++- Lib/tkinter/__init__.py | 78 +++++----- Lib/tkinter/test/test_ttk/test_functions.py | 27 +-- Lib/tkinter/ttk.py | 60 +++---- Misc/NEWS | 3 + 5 files changed, 112 insertions(+), 93 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -7,7 +7,7 @@ _tkinter = support.import_module('_tkinter') # Make sure tkinter._fix runs to set up the environment -support.import_fresh_module('tkinter') +tkinter = support.import_fresh_module('tkinter') from tkinter import Tcl from _tkinter import TclError @@ -566,6 +566,41 @@ for arg, res in testcases: self.assertEqual(split(arg), res, msg=arg) + def test_splitdict(self): + splitdict = tkinter._splitdict + tcl = self.interp.tk + + arg = '-a {1 2 3} -something foo status {}' + self.assertEqual(splitdict(tcl, arg, False), + {'-a': '1 2 3', '-something': 'foo', 'status': ''}) + self.assertEqual(splitdict(tcl, arg), + {'a': '1 2 3', 'something': 'foo', 'status': ''}) + + arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}') + self.assertEqual(splitdict(tcl, arg, False), + {'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'}) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3), 'something': 'foo', 'status': '{}'}) + + self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ') + self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c')) + + arg = tcl.call('list', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + self.assertEqual(splitdict(tcl, arg), + {'a': (1, 2, 3) if self.wantobjects else '1 2 3', + 'something': 'foo', 'status': ''}) + + if tcl_version >= (8, 5): + arg = tcl.call('dict', 'create', + '-a', (1, 2, 3), '-something', 'foo', 'status', ()) + if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5): + # Before 8.5.5 dicts were converted to lists through string + expected = {'a': '1 2 3', 'something': 'foo', 'status': ''} + else: + expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''} + self.assertEqual(splitdict(tcl, arg), expected) + class BigmemTclTest(unittest.TestCase): diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -112,6 +112,29 @@ try: _cnfmerge = _tkinter._cnfmerge except AttributeError: pass +def _splitdict(tk, v, cut_minus=True, conv=None): + """Return a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + """ + t = tk.splitlist(v) + if len(t) % 2: + raise RuntimeError('Tcl list representing a dict is expected ' + 'to contain an even number of elements') + it = iter(t) + dict = {} + for key, value in zip(it, it): + key = str(key) + if cut_minus and key[0] == '-': + key = key[1:] + if conv: + value = conv(value) + dict[key] = value + return dict + class Event: """Container for the properties of an event. @@ -1393,15 +1416,10 @@ else: options = self._options(cnf, kw) if not options: - res = self.tk.call('grid', - command, self._w, index) - words = self.tk.splitlist(res) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - dict[key] = self._gridconvvalue(value) - return dict + return _splitdict( + self.tk, + self.tk.call('grid', command, self._w, index), + conv=self._gridconvvalue) res = self.tk.call( ('grid', command, self._w, index) + options) @@ -1961,16 +1979,10 @@ def pack_info(self): """Return information about the packing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('pack', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('pack', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = pack_info propagate = pack_propagate = Misc.pack_propagate slaves = pack_slaves = Misc.pack_slaves @@ -2012,16 +2024,10 @@ def place_info(self): """Return information about the placing options for this widget.""" - words = self.tk.splitlist( - self.tk.call('place', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('place', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = place_info slaves = place_slaves = Misc.place_slaves @@ -2061,16 +2067,10 @@ def grid_info(self): """Return information about the options for positioning this widget in a grid.""" - words = self.tk.splitlist( - self.tk.call('grid', 'info', self._w)) - dict = {} - for i in range(0, len(words), 2): - key = words[i][1:] - value = words[i+1] - if str(value)[:1] == '.': - value = self._nametowidget(value) - dict[key] = value - return dict + d = _splitdict(self.tk, self.tk.call('grid', 'info', self._w)) + if 'in' in d: + d['in'] = self.nametowidget(d['in']) + return d info = grid_info location = grid_location = Misc.grid_location propagate = grid_propagate = Misc.grid_propagate diff --git a/Lib/tkinter/test/test_ttk/test_functions.py b/Lib/tkinter/test/test_ttk/test_functions.py --- a/Lib/tkinter/test/test_ttk/test_functions.py +++ b/Lib/tkinter/test/test_ttk/test_functions.py @@ -324,26 +324,13 @@ "-opt {3 2m}") - def test_dict_from_tcltuple(self): - fakettuple = ('-a', '{1 2 3}', '-something', 'foo') - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple, False), - {'-a': '{1 2 3}', '-something': 'foo'}) - - self.assertEqual(ttk._dict_from_tcltuple(fakettuple), - {'a': '{1 2 3}', 'something': 'foo'}) - - # passing a tuple with a single item should return an empty dict, - # since it tries to break the tuple by pairs. - self.assertFalse(ttk._dict_from_tcltuple(('single', ))) - - sspec = MockStateSpec('a', 'b') - self.assertEqual(ttk._dict_from_tcltuple(('-a', (sspec, 'val'))), - {'a': [('a', 'b', 'val')]}) - - self.assertEqual(ttk._dict_from_tcltuple((MockTclObj('-padding'), - [MockTclObj('1'), 2, MockTclObj('3m')])), - {'padding': [1, 2, '3m']}) + def test_tclobj_to_py(self): + self.assertEqual( + ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')), + [('a', 'b', 'val')]) + self.assertEqual( + ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]), + [1, 2, '3m']) def test_list_from_statespec(self): diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -26,7 +26,7 @@ "tclobjs_to_py", "setup_master"] import tkinter -from tkinter import _flatten, _join, _stringify +from tkinter import _flatten, _join, _stringify, _splitdict # Verify if Tk is new enough to not need the Tile package _REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False @@ -240,21 +240,6 @@ return '\n'.join(script) -def _dict_from_tcltuple(ttuple, cut_minus=True): - """Break tuple in pairs, format it properly, then build the return - dict. If cut_minus is True, the supposed '-' prefixing options will - be removed. - - ttuple is expected to contain an even number of elements.""" - opt_start = 1 if cut_minus else 0 - - retdict = {} - it = iter(ttuple) - for opt, val in zip(it, it): - retdict[str(opt)[opt_start:]] = val - - return tclobjs_to_py(retdict) - def _list_from_statespec(stuple): """Construct a list from the given statespec tuple according to the accepted statespec accepted by _format_mapdict.""" @@ -314,7 +299,7 @@ if len(options) % 2: # option specified without a value, return its value return res - return _dict_from_tcltuple(tk.splitlist(res)) + return _splitdict(tk, res, conv=_tclobj_to_py) def _convert_stringval(value): """Converts a value to, hopefully, a more appropriate Python object.""" @@ -334,20 +319,24 @@ x = int(x) return x +def _tclobj_to_py(val): + """Return value converted from Tcl object to Python object.""" + if val and hasattr(val, '__len__') and not isinstance(val, str): + if getattr(val[0], 'typename', None) == 'StateSpec': + val = _list_from_statespec(val) + else: + val = list(map(_convert_stringval, val)) + + elif hasattr(val, 'typename'): # some other (single) Tcl object + val = _convert_stringval(val) + + return val + def tclobjs_to_py(adict): """Returns adict with its values converted from Tcl objects to Python objects.""" for opt, val in adict.items(): - if val and hasattr(val, '__len__') and not isinstance(val, str): - if getattr(val[0], 'typename', None) == 'StateSpec': - val = _list_from_statespec(val) - else: - val = list(map(_convert_stringval, val)) - - elif hasattr(val, 'typename'): # some other (single) Tcl object - val = _convert_stringval(val) - - adict[opt] = val + adict[opt] = _tclobj_to_py(val) return adict @@ -407,8 +396,10 @@ return _list_from_statespec(self.tk.splitlist( self.tk.call(self._name, "map", style, '-%s' % query_opt))) - return _dict_from_tcltuple(self.tk.splitlist( - self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))) + return _splitdict( + self.tk, + self.tk.call(self._name, "map", style, *_format_mapdict(kw)), + conv=_tclobj_to_py) def lookup(self, style, option, state=None, default=None): @@ -1425,13 +1416,16 @@ def set(self, item, column=None, value=None): - """With one argument, returns a dictionary of column/value pairs - for the specified item. With two arguments, returns the current - value of the specified column. With three arguments, sets the + """Query or set the value of given item. + + With one argument, return a dictionary of column/value pairs + for the specified item. With two arguments, return the current + value of the specified column. With three arguments, set the value of given column in given item to the specified value.""" res = self.tk.call(self._w, "set", item, column, value) if column is None and value is None: - return _dict_from_tcltuple(self.tk.splitlist(res), False) + return _splitdict(self.tk, res, + cut_minus=False, conv=_tclobj_to_py) else: return res diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #22226: First letter no longer is stripped from the "status" key in + the result of Treeview.heading(). + - Issue #19524: Fixed resource leak in the HTTP connection when an invalid response is received. Patch by Martin Panter. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 23:24:43 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 6 Sep 2014 23:24:43 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_remove_various?= =?utf-8?q?_dead_version_checks_=28closes_=2322349=29?= Message-ID: <3hr80W3W09z7LlB@mail.python.org> http://hg.python.org/cpython/rev/7fece97e605d changeset: 92375:7fece97e605d branch: 3.4 parent: 92373:f89995a4ec11 user: Benjamin Peterson date: Sat Sep 06 17:24:12 2014 -0400 summary: remove various dead version checks (closes #22349) Patch from Thomas Kluyver. files: Lib/distutils/command/build_ext.py | 17 ++-------- Lib/distutils/sysconfig.py | 24 +------------- Lib/distutils/tests/test_build_ext.py | 20 +++++------ 3 files changed, 16 insertions(+), 45 deletions(-) diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py --- a/Lib/distutils/command/build_ext.py +++ b/Lib/distutils/command/build_ext.py @@ -14,13 +14,7 @@ from distutils.util import get_platform from distutils import log -# this keeps compatibility from 2.3 to 2.5 -if sys.version < "2.6": - USER_BASE = None - HAS_USER_SITE = False -else: - from site import USER_BASE - HAS_USER_SITE = True +from site import USER_BASE if os.name == 'nt': from distutils.msvccompiler import get_build_version @@ -97,14 +91,11 @@ "list of SWIG command line options"), ('swig=', None, "path to the SWIG executable"), + ('user', None, + "add user include, library and rpath") ] - boolean_options = ['inplace', 'debug', 'force', 'swig-cpp'] - - if HAS_USER_SITE: - user_options.append(('user', None, - "add user include, library and rpath")) - boolean_options.append('user') + boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] help_options = [ ('help-compiler', None, diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py --- a/Lib/distutils/sysconfig.py +++ b/Lib/distutils/sysconfig.py @@ -151,10 +151,7 @@ if standard_lib: return os.path.join(prefix, "Lib") else: - if get_python_version() < "2.2": - return prefix - else: - return os.path.join(prefix, "Lib", "site-packages") + return os.path.join(prefix, "Lib", "site-packages") else: raise DistutilsPlatformError( "I don't know where Python installs its library " @@ -244,12 +241,8 @@ inc_dir = _sys_home or project_base else: inc_dir = get_python_inc(plat_specific=1) - if get_python_version() < '2.2': - config_h = 'config.h' - else: - # The name of the config.h file changed in 2.2 - config_h = 'pyconfig.h' - return os.path.join(inc_dir, config_h) + + return os.path.join(inc_dir, 'pyconfig.h') def get_makefile_filename(): @@ -461,17 +454,6 @@ if python_build: g['LDSHARED'] = g['BLDSHARED'] - elif get_python_version() < '2.1': - # The following two branches are for 1.5.2 compatibility. - if sys.platform == 'aix4': # what about AIX 3.x ? - # Linker script is in the config directory, not in Modules as the - # Makefile says. - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - - g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) - global _config_vars _config_vars = g diff --git a/Lib/distutils/tests/test_build_ext.py b/Lib/distutils/tests/test_build_ext.py --- a/Lib/distutils/tests/test_build_ext.py +++ b/Lib/distutils/tests/test_build_ext.py @@ -31,12 +31,11 @@ self.tmp_dir = self.mkdtemp() self.sys_path = sys.path, sys.path[:] sys.path.append(self.tmp_dir) - if sys.version > "2.6": - import site - self.old_user_base = site.USER_BASE - site.USER_BASE = self.mkdtemp() - from distutils.command import build_ext - build_ext.USER_BASE = site.USER_BASE + import site + self.old_user_base = site.USER_BASE + site.USER_BASE = self.mkdtemp() + from distutils.command import build_ext + build_ext.USER_BASE = site.USER_BASE def test_build_ext(self): global ALREADY_TESTED @@ -84,11 +83,10 @@ support.unload('xx') sys.path = self.sys_path[0] sys.path[:] = self.sys_path[1] - if sys.version > "2.6": - import site - site.USER_BASE = self.old_user_base - from distutils.command import build_ext - build_ext.USER_BASE = self.old_user_base + import site + site.USER_BASE = self.old_user_base + from distutils.command import build_ext + build_ext.USER_BASE = self.old_user_base super(BuildExtTestCase, self).tearDown() def test_solaris_enable_shared(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 6 23:24:44 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 6 Sep 2014 23:24:44 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjIzNDkp?= Message-ID: <3hr80X5vP9z7Lkl@mail.python.org> http://hg.python.org/cpython/rev/688701337b1a changeset: 92376:688701337b1a parent: 92374:11cf18ec1900 parent: 92375:7fece97e605d user: Benjamin Peterson date: Sat Sep 06 17:24:35 2014 -0400 summary: merge 3.4 (#22349) files: Lib/distutils/command/build_ext.py | 17 ++-------- Lib/distutils/sysconfig.py | 24 +------------- Lib/distutils/tests/test_build_ext.py | 20 +++++------ 3 files changed, 16 insertions(+), 45 deletions(-) diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py --- a/Lib/distutils/command/build_ext.py +++ b/Lib/distutils/command/build_ext.py @@ -14,13 +14,7 @@ from distutils.util import get_platform from distutils import log -# this keeps compatibility from 2.3 to 2.5 -if sys.version < "2.6": - USER_BASE = None - HAS_USER_SITE = False -else: - from site import USER_BASE - HAS_USER_SITE = True +from site import USER_BASE if os.name == 'nt': from distutils.msvccompiler import get_build_version @@ -97,14 +91,11 @@ "list of SWIG command line options"), ('swig=', None, "path to the SWIG executable"), + ('user', None, + "add user include, library and rpath") ] - boolean_options = ['inplace', 'debug', 'force', 'swig-cpp'] - - if HAS_USER_SITE: - user_options.append(('user', None, - "add user include, library and rpath")) - boolean_options.append('user') + boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user'] help_options = [ ('help-compiler', None, diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py --- a/Lib/distutils/sysconfig.py +++ b/Lib/distutils/sysconfig.py @@ -151,10 +151,7 @@ if standard_lib: return os.path.join(prefix, "Lib") else: - if get_python_version() < "2.2": - return prefix - else: - return os.path.join(prefix, "Lib", "site-packages") + return os.path.join(prefix, "Lib", "site-packages") else: raise DistutilsPlatformError( "I don't know where Python installs its library " @@ -244,12 +241,8 @@ inc_dir = _sys_home or project_base else: inc_dir = get_python_inc(plat_specific=1) - if get_python_version() < '2.2': - config_h = 'config.h' - else: - # The name of the config.h file changed in 2.2 - config_h = 'pyconfig.h' - return os.path.join(inc_dir, config_h) + + return os.path.join(inc_dir, 'pyconfig.h') def get_makefile_filename(): @@ -461,17 +454,6 @@ if python_build: g['LDSHARED'] = g['BLDSHARED'] - elif get_python_version() < '2.1': - # The following two branches are for 1.5.2 compatibility. - if sys.platform == 'aix4': # what about AIX 3.x ? - # Linker script is in the config directory, not in Modules as the - # Makefile says. - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - - g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) - global _config_vars _config_vars = g diff --git a/Lib/distutils/tests/test_build_ext.py b/Lib/distutils/tests/test_build_ext.py --- a/Lib/distutils/tests/test_build_ext.py +++ b/Lib/distutils/tests/test_build_ext.py @@ -31,12 +31,11 @@ self.tmp_dir = self.mkdtemp() self.sys_path = sys.path, sys.path[:] sys.path.append(self.tmp_dir) - if sys.version > "2.6": - import site - self.old_user_base = site.USER_BASE - site.USER_BASE = self.mkdtemp() - from distutils.command import build_ext - build_ext.USER_BASE = site.USER_BASE + import site + self.old_user_base = site.USER_BASE + site.USER_BASE = self.mkdtemp() + from distutils.command import build_ext + build_ext.USER_BASE = site.USER_BASE def test_build_ext(self): global ALREADY_TESTED @@ -84,11 +83,10 @@ support.unload('xx') sys.path = self.sys_path[0] sys.path[:] = self.sys_path[1] - if sys.version > "2.6": - import site - site.USER_BASE = self.old_user_base - from distutils.command import build_ext - build_ext.USER_BASE = self.old_user_base + import site + site.USER_BASE = self.old_user_base + from distutils.command import build_ext + build_ext.USER_BASE = self.old_user_base super(BuildExtTestCase, self).tearDown() def test_solaris_enable_shared(self): -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Sun Sep 7 10:40:30 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 07 Sep 2014 10:40:30 +0200 Subject: [Python-checkins] Daily reference leaks (688701337b1a): sum=151928 Message-ID: results for 688701337b1a on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [0, -2, 0] references, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, -2, 0] references, sum=-2 test_site leaked [0, -2, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogummiCi', '-x'] From python-checkins at python.org Sun Sep 7 14:40:49 2014 From: python-checkins at python.org (larry.hastings) Date: Sun, 7 Sep 2014 14:40:49 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_Updated_3=2E4_release_schedul?= =?utf-8?q?e_for_all_extant_and_planned_releases=2E?= Message-ID: <3hrXKY0lM3z7LlJ@mail.python.org> http://hg.python.org/peps/rev/a20a9f37945f changeset: 5548:a20a9f37945f user: Larry Hastings date: Sun Sep 07 05:41:05 2014 -0700 summary: Updated 3.4 release schedule for all extant and planned releases. files: pep-0429.txt | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-) diff --git a/pep-0429.txt b/pep-0429.txt --- a/pep-0429.txt +++ b/pep-0429.txt @@ -50,6 +50,18 @@ (Beta 1 was also "feature freeze"--no new features beyond this point.) +3.4.1 schedule +-------------- + +- 3.4.1 candidate 1: May 5, 2013 +- 3.4.1 final: May 18, 2013 + +3.4.2 schedule +-------------- + +- 3.4.2 candidate 1: May 5, 2013 +- 3.4.2 final: May 18, 2013 + Features for 3.4 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Mon Sep 8 03:35:51 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 8 Sep 2014 03:35:51 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_Was_there_a_single_thing_in_m?= =?utf-8?q?y_3=2E4=2E1_/_3=2E4=2E2_schedules_I_didn=27t_screw_up=3F__Nope!?= Message-ID: <3hrsWq6mtWz7Ljn@mail.python.org> http://hg.python.org/peps/rev/a49c7ee793fa changeset: 5549:a49c7ee793fa user: Larry Hastings date: Sun Sep 07 18:36:08 2014 -0700 summary: Was there a single thing in my 3.4.1 / 3.4.2 schedules I didn't screw up? Nope! files: pep-0429.txt | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pep-0429.txt b/pep-0429.txt --- a/pep-0429.txt +++ b/pep-0429.txt @@ -53,14 +53,14 @@ 3.4.1 schedule -------------- -- 3.4.1 candidate 1: May 5, 2013 -- 3.4.1 final: May 18, 2013 +- 3.4.1 candidate 1: May 5, 2014 +- 3.4.1 final: May 18, 2014 3.4.2 schedule -------------- -- 3.4.2 candidate 1: May 5, 2013 -- 3.4.2 final: May 18, 2013 +- 3.4.2 candidate 1: September 15, 2014 +- 3.4.2 final: September 28, 2014 -- Repository URL: http://hg.python.org/peps From solipsis at pitrou.net Mon Sep 8 10:40:41 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 08 Sep 2014 10:40:41 +0200 Subject: [Python-checkins] Daily reference leaks (688701337b1a): sum=151976 Message-ID: results for 688701337b1a on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [-2, -4, 0] references, sum=-6 test_collections leaked [-1, -2, 0] memory blocks, sum=-3 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_multiprocessing_spawn leaked [0, 0, 38] references, sum=38 test_multiprocessing_spawn leaked [0, 0, 17] memory blocks, sum=17 test_site leaked [-2, 2, -2] references, sum=-2 test_site leaked [-2, 2, -2] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog1YOlaw', '-x'] From python-checkins at python.org Mon Sep 8 20:24:28 2014 From: python-checkins at python.org (barry.warsaw) Date: Mon, 8 Sep 2014 20:24:28 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_-_Issue_=2316662=3A_load?= =?utf-8?q?=5Ftests=28=29_is_now_unconditionally_run_when_it_is_present_in?= Message-ID: <3hsHvc0BmGz7Ljb@mail.python.org> http://hg.python.org/cpython/rev/d0ff527c53da changeset: 92377:d0ff527c53da user: Barry Warsaw date: Mon Sep 08 14:21:37 2014 -0400 summary: - Issue #16662: load_tests() is now unconditionally run when it is present in a package's __init__.py. TestLoader.loadTestsFromModule() still accepts use_load_tests, but it is deprecated and ignored. A new keyword-only attribute `pattern` is added and documented. Patch given by Robert Collins, tweaked by Barry Warsaw. files: Doc/library/unittest.rst | 71 ++- Lib/unittest/loader.py | 53 ++- Lib/unittest/test/test_discovery.py | 254 +++++++++++++++- Lib/unittest/test/test_loader.py | 153 +++++++++- Misc/NEWS | 6 + 5 files changed, 474 insertions(+), 63 deletions(-) diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -1561,7 +1561,7 @@ :class:`testCaseClass`. - .. method:: loadTestsFromModule(module) + .. method:: loadTestsFromModule(module, pattern=None) Return a suite of all tests cases contained in the given module. This method searches *module* for classes derived from :class:`TestCase` and @@ -1578,11 +1578,18 @@ If a module provides a ``load_tests`` function it will be called to load the tests. This allows modules to customize test loading. - This is the `load_tests protocol`_. + This is the `load_tests protocol`_. The *pattern* argument is passed as + the third argument to ``load_tests``. .. versionchanged:: 3.2 Support for ``load_tests`` added. + .. versionchanged:: 3.5 + The undocumented and unofficial *use_load_tests* default argument is + deprecated and ignored, although it is still accepted for backward + compatibility. The method also now accepts a keyword-only argument + *pattern* which is passed to ``load_tests`` as the third argument. + .. method:: loadTestsFromName(name, module=None) @@ -1634,18 +1641,18 @@ the start directory is not the top level directory then the top level directory must be specified separately. - If importing a module fails, for example due to a syntax error, then this - will be recorded as a single error and discovery will continue. If the - import failure is due to :exc:`SkipTest` being raised, it will be recorded - as a skip instead of an error. - - If a test package name (directory with :file:`__init__.py`) matches the - pattern then the package will be checked for a ``load_tests`` - function. If this exists then it will be called with *loader*, *tests*, - *pattern*. - - If load_tests exists then discovery does *not* recurse into the package, - ``load_tests`` is responsible for loading all tests in the package. + If importing a module fails, for example due to a syntax error, then + this will be recorded as a single error and discovery will continue. If + the import failure is due to :exc:`SkipTest` being raised, it will be + recorded as a skip instead of an error. + + If a package (a directory containing a file named :file:`__init__.py`) is + found, the package will be checked for a ``load_tests`` function. If this + exists then it will be called with *loader*, *tests*, *pattern*. + + If ``load_tests`` exists then discovery does *not* recurse into the + package, ``load_tests`` is responsible for loading all tests in the + package. The pattern is deliberately not stored as a loader attribute so that packages can continue discovery themselves. *top_level_dir* is stored so @@ -1664,6 +1671,11 @@ the same even if the underlying file system's ordering is not dependent on file name. + .. versionchanged:: 3.5 + Found packages are now checked for ``load_tests`` regardless of + whether their path matches *pattern*, because it is impossible for + a package name to match the default pattern. + The following attributes of a :class:`TestLoader` can be configured either by subclassing or assignment on an instance: @@ -2032,7 +2044,10 @@ If a test module defines ``load_tests`` it will be called by :meth:`TestLoader.loadTestsFromModule` with the following arguments:: - load_tests(loader, standard_tests, None) + load_tests(loader, standard_tests, pattern) + +where *pattern* is passed straight through from ``loadTestsFromModule``. It +defaults to ``None``. It should return a :class:`TestSuite`. @@ -2054,21 +2069,12 @@ suite.addTests(tests) return suite -If discovery is started, either from the command line or by calling -:meth:`TestLoader.discover`, with a pattern that matches a package -name then the package :file:`__init__.py` will be checked for ``load_tests``. - -.. note:: - - The default pattern is ``'test*.py'``. This matches all Python files - that start with ``'test'`` but *won't* match any test directories. - - A pattern like ``'test*'`` will match test packages as well as - modules. - -If the package :file:`__init__.py` defines ``load_tests`` then it will be -called and discovery not continued into the package. ``load_tests`` -is called with the following arguments:: +If discovery is started in a directory containing a package, either from the +command line or by calling :meth:`TestLoader.discover`, then the package +:file:`__init__.py` will be checked for ``load_tests``. If that function does +not exist, discovery will recurse into the package as though it were just +another directory. Otherwise, discovery of the package's tests will be left up +to ``load_tests`` which is called with the following arguments:: load_tests(loader, standard_tests, pattern) @@ -2087,6 +2093,11 @@ standard_tests.addTests(package_tests) return standard_tests +.. versionchanged:: 3.5 + Discovery no longer checks package names for matching *pattern* due to the + impossibility of package names matching the default pattern. + + Class and Module Fixtures ------------------------- diff --git a/Lib/unittest/loader.py b/Lib/unittest/loader.py --- a/Lib/unittest/loader.py +++ b/Lib/unittest/loader.py @@ -6,6 +6,7 @@ import traceback import types import functools +import warnings from fnmatch import fnmatch @@ -70,8 +71,27 @@ loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) return loaded_suite - def loadTestsFromModule(self, module, use_load_tests=True): + # XXX After Python 3.5, remove backward compatibility hacks for + # use_load_tests deprecation via *args and **kws. See issue 16662. + def loadTestsFromModule(self, module, *args, pattern=None, **kws): """Return a suite of all tests cases contained in the given module""" + # This method used to take an undocumented and unofficial + # use_load_tests argument. For backward compatibility, we still + # accept the argument (which can also be the first position) but we + # ignore it and issue a deprecation warning if it's present. + if len(args) == 1 or 'use_load_tests' in kws: + warnings.warn('use_load_tests is deprecated and ignored', + DeprecationWarning) + kws.pop('use_load_tests', None) + if len(args) > 1: + raise TypeError('loadTestsFromModule() takes 1 positional argument but {} were given'.format(len(args))) + if len(kws) != 0: + # Since the keyword arguments are unsorted (see PEP 468), just + # pick the alphabetically sorted first argument to complain about, + # if multiple were given. At least the error message will be + # predictable. + complaint = sorted(kws)[0] + raise TypeError("loadTestsFromModule() got an unexpected keyword argument '{}'".format(complaint)) tests = [] for name in dir(module): obj = getattr(module, name) @@ -80,9 +100,9 @@ load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) - if use_load_tests and load_tests is not None: + if load_tests is not None: try: - return load_tests(self, tests, None) + return load_tests(self, tests, pattern) except Exception as e: return _make_failed_load_tests(module.__name__, e, self.suiteClass) @@ -325,7 +345,7 @@ msg = ("%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) - yield self.loadTestsFromModule(module) + yield self.loadTestsFromModule(module, pattern=pattern) elif os.path.isdir(full_path): if (not namespace and not os.path.isfile(os.path.join(full_path, '__init__.py'))): @@ -333,26 +353,27 @@ load_tests = None tests = None - if fnmatch(path, pattern): - # only check load_tests if the package directory itself matches the filter - name = self._get_name_from_path(full_path) + name = self._get_name_from_path(full_path) + try: package = self._get_module_from_name(name) + except case.SkipTest as e: + yield _make_skipped_test(name, e, self.suiteClass) + except: + yield _make_failed_import_test(name, self.suiteClass) + else: load_tests = getattr(package, 'load_tests', None) - tests = self.loadTestsFromModule(package, use_load_tests=False) - - if load_tests is None: + tests = self.loadTestsFromModule(package, pattern=pattern) if tests is not None: # tests loaded from package file yield tests + + if load_tests is not None: + # loadTestsFromModule(package) has load_tests for us. + continue # recurse into the package yield from self._find_tests(full_path, pattern, namespace=namespace) - else: - try: - yield load_tests(self, tests, pattern) - except Exception as e: - yield _make_failed_load_tests(package.__name__, e, - self.suiteClass) + defaultTestLoader = TestLoader() diff --git a/Lib/unittest/test/test_discovery.py b/Lib/unittest/test/test_discovery.py --- a/Lib/unittest/test/test_discovery.py +++ b/Lib/unittest/test/test_discovery.py @@ -68,7 +68,13 @@ self.addCleanup(restore_isfile) loader._get_module_from_name = lambda path: path + ' module' - loader.loadTestsFromModule = lambda module: module + ' tests' + orig_load_tests = loader.loadTestsFromModule + def loadTestsFromModule(module, pattern=None): + # This is where load_tests is called. + base = orig_load_tests(module, pattern=pattern) + return base + [module + ' tests'] + loader.loadTestsFromModule = loadTestsFromModule + loader.suiteClass = lambda thing: thing top_level = os.path.abspath('/foo') loader._top_level_dir = top_level @@ -76,9 +82,9 @@ # The test suites found should be sorted alphabetically for reliable # execution order. - expected = [name + ' module tests' for name in - ('test1', 'test2')] - expected.extend([('test_dir.%s' % name) + ' module tests' for name in + expected = [[name + ' module tests'] for name in + ('test1', 'test2', 'test_dir')] + expected.extend([[('test_dir.%s' % name) + ' module tests'] for name in ('test3', 'test4')]) self.assertEqual(suite, expected) @@ -116,34 +122,204 @@ if os.path.basename(path) == 'test_directory': def load_tests(loader, tests, pattern): self.load_tests_args.append((loader, tests, pattern)) - return 'load_tests' + return [self.path + ' load_tests'] self.load_tests = load_tests def __eq__(self, other): return self.path == other.path loader._get_module_from_name = lambda name: Module(name) - def loadTestsFromModule(module, use_load_tests): - if use_load_tests: - raise self.failureException('use_load_tests should be False for packages') - return module.path + ' module tests' + orig_load_tests = loader.loadTestsFromModule + def loadTestsFromModule(module, pattern=None): + # This is where load_tests is called. + base = orig_load_tests(module, pattern=pattern) + return base + [module.path + ' module tests'] loader.loadTestsFromModule = loadTestsFromModule + loader.suiteClass = lambda thing: thing loader._top_level_dir = '/foo' # this time no '.py' on the pattern so that it can match # a test package suite = list(loader._find_tests('/foo', 'test*')) - # We should have loaded tests from the test_directory package by calling load_tests - # and directly from the test_directory2 package + # We should have loaded tests from the a_directory and test_directory2 + # directly and via load_tests for the test_directory package, which + # still calls the baseline module loader. self.assertEqual(suite, - ['load_tests', 'test_directory2' + ' module tests']) + [['a_directory module tests'], + ['test_directory load_tests', + 'test_directory module tests'], + ['test_directory2 module tests']]) + + # The test module paths should be sorted for reliable execution order - self.assertEqual(Module.paths, ['test_directory', 'test_directory2']) + self.assertEqual(Module.paths, + ['a_directory', 'test_directory', 'test_directory2']) + + # load_tests should have been called once with loader, tests and pattern + # (but there are no tests in our stub module itself, so thats [] at the + # time of call. + self.assertEqual(Module.load_tests_args, + [(loader, [], 'test*')]) + + def test_find_tests_default_calls_package_load_tests(self): + loader = unittest.TestLoader() + + original_listdir = os.listdir + def restore_listdir(): + os.listdir = original_listdir + original_isfile = os.path.isfile + def restore_isfile(): + os.path.isfile = original_isfile + original_isdir = os.path.isdir + def restore_isdir(): + os.path.isdir = original_isdir + + directories = ['a_directory', 'test_directory', 'test_directory2'] + path_lists = [directories, [], [], []] + os.listdir = lambda path: path_lists.pop(0) + self.addCleanup(restore_listdir) + + os.path.isdir = lambda path: True + self.addCleanup(restore_isdir) + + os.path.isfile = lambda path: os.path.basename(path) not in directories + self.addCleanup(restore_isfile) + + class Module(object): + paths = [] + load_tests_args = [] + + def __init__(self, path): + self.path = path + self.paths.append(path) + if os.path.basename(path) == 'test_directory': + def load_tests(loader, tests, pattern): + self.load_tests_args.append((loader, tests, pattern)) + return [self.path + ' load_tests'] + self.load_tests = load_tests + + def __eq__(self, other): + return self.path == other.path + + loader._get_module_from_name = lambda name: Module(name) + orig_load_tests = loader.loadTestsFromModule + def loadTestsFromModule(module, pattern=None): + # This is where load_tests is called. + base = orig_load_tests(module, pattern=pattern) + return base + [module.path + ' module tests'] + loader.loadTestsFromModule = loadTestsFromModule + loader.suiteClass = lambda thing: thing + + loader._top_level_dir = '/foo' + # this time no '.py' on the pattern so that it can match + # a test package + suite = list(loader._find_tests('/foo', 'test*.py')) + + # We should have loaded tests from the a_directory and test_directory2 + # directly and via load_tests for the test_directory package, which + # still calls the baseline module loader. + self.assertEqual(suite, + [['a_directory module tests'], + ['test_directory load_tests', + 'test_directory module tests'], + ['test_directory2 module tests']]) + # The test module paths should be sorted for reliable execution order + self.assertEqual(Module.paths, + ['a_directory', 'test_directory', 'test_directory2']) + # load_tests should have been called once with loader, tests and pattern self.assertEqual(Module.load_tests_args, - [(loader, 'test_directory' + ' module tests', 'test*')]) + [(loader, [], 'test*.py')]) + + def test_find_tests_customise_via_package_pattern(self): + # This test uses the example 'do-nothing' load_tests from + # https://docs.python.org/3/library/unittest.html#load-tests-protocol + # to make sure that that actually works. + # Housekeeping + original_listdir = os.listdir + def restore_listdir(): + os.listdir = original_listdir + self.addCleanup(restore_listdir) + original_isfile = os.path.isfile + def restore_isfile(): + os.path.isfile = original_isfile + self.addCleanup(restore_isfile) + original_isdir = os.path.isdir + def restore_isdir(): + os.path.isdir = original_isdir + self.addCleanup(restore_isdir) + self.addCleanup(sys.path.remove, '/foo') + + # Test data: we expect the following: + # a listdir to find our package, and a isfile and isdir check on it. + # a module-from-name call to turn that into a module + # followed by load_tests. + # then our load_tests will call discover() which is messy + # but that finally chains into find_tests again for the child dir - + # which is why we don't have a infinite loop. + # We expect to see: + # the module load tests for both package and plain module called, + # and the plain module result nested by the package module load_tests + # indicating that it was processed and could have been mutated. + vfs = {'/foo': ['my_package'], + '/foo/my_package': ['__init__.py', 'test_module.py']} + def list_dir(path): + return list(vfs[path]) + os.listdir = list_dir + os.path.isdir = lambda path: not path.endswith('.py') + os.path.isfile = lambda path: path.endswith('.py') + + class Module(object): + paths = [] + load_tests_args = [] + + def __init__(self, path): + self.path = path + self.paths.append(path) + if path.endswith('test_module'): + def load_tests(loader, tests, pattern): + self.load_tests_args.append((loader, tests, pattern)) + return [self.path + ' load_tests'] + else: + def load_tests(loader, tests, pattern): + self.load_tests_args.append((loader, tests, pattern)) + # top level directory cached on loader instance + __file__ = '/foo/my_package/__init__.py' + this_dir = os.path.dirname(__file__) + pkg_tests = loader.discover( + start_dir=this_dir, pattern=pattern) + return [self.path + ' load_tests', tests + ] + pkg_tests + self.load_tests = load_tests + + def __eq__(self, other): + return self.path == other.path + + loader = unittest.TestLoader() + loader._get_module_from_name = lambda name: Module(name) + loader.suiteClass = lambda thing: thing + + loader._top_level_dir = '/foo' + # this time no '.py' on the pattern so that it can match + # a test package + suite = list(loader._find_tests('/foo', 'test*.py')) + + # We should have loaded tests from both my_package and + # my_pacakge.test_module, and also run the load_tests hook in both. + # (normally this would be nested TestSuites.) + self.assertEqual(suite, + [['my_package load_tests', [], + ['my_package.test_module load_tests']]]) + # Parents before children. + self.assertEqual(Module.paths, + ['my_package', 'my_package.test_module']) + + # load_tests should have been called twice with loader, tests and pattern + self.assertEqual(Module.load_tests_args, + [(loader, [], 'test*.py'), + (loader, [], 'test*.py')]) def test_discover(self): loader = unittest.TestLoader() @@ -203,6 +379,17 @@ sys.path[:] = orig_sys_path self.addCleanup(restore) + def setup_import_issue_package_tests(self, vfs): + self.addCleanup(setattr, os, 'listdir', os.listdir) + self.addCleanup(setattr, os.path, 'isfile', os.path.isfile) + self.addCleanup(setattr, os.path, 'isdir', os.path.isdir) + self.addCleanup(sys.path.__setitem__, slice(None), list(sys.path)) + def list_dir(path): + return list(vfs[path]) + os.listdir = list_dir + os.path.isdir = lambda path: not path.endswith('.py') + os.path.isfile = lambda path: path.endswith('.py') + def test_discover_with_modules_that_fail_to_import(self): loader = unittest.TestLoader() @@ -216,6 +403,25 @@ with self.assertRaises(ImportError): test.test_this_does_not_exist() + def test_discover_with_init_modules_that_fail_to_import(self): + vfs = {'/foo': ['my_package'], + '/foo/my_package': ['__init__.py', 'test_module.py']} + self.setup_import_issue_package_tests(vfs) + import_calls = [] + def _get_module_from_name(name): + import_calls.append(name) + raise ImportError("Cannot import Name") + loader = unittest.TestLoader() + loader._get_module_from_name = _get_module_from_name + suite = loader.discover('/foo') + + self.assertIn('/foo', sys.path) + self.assertEqual(suite.countTestCases(), 1) + test = list(list(suite)[0])[0] # extract test from suite + with self.assertRaises(ImportError): + test.my_package() + self.assertEqual(import_calls, ['my_package']) + def test_discover_with_module_that_raises_SkipTest_on_import(self): loader = unittest.TestLoader() @@ -232,6 +438,26 @@ suite.run(result) self.assertEqual(len(result.skipped), 1) + def test_discover_with_init_module_that_raises_SkipTest_on_import(self): + vfs = {'/foo': ['my_package'], + '/foo/my_package': ['__init__.py', 'test_module.py']} + self.setup_import_issue_package_tests(vfs) + import_calls = [] + def _get_module_from_name(name): + import_calls.append(name) + raise unittest.SkipTest('skipperoo') + loader = unittest.TestLoader() + loader._get_module_from_name = _get_module_from_name + suite = loader.discover('/foo') + + self.assertIn('/foo', sys.path) + self.assertEqual(suite.countTestCases(), 1) + result = unittest.TestResult() + suite.run(result) + self.assertEqual(len(result.skipped), 1) + self.assertEqual(result.testsRun, 1) + self.assertEqual(import_calls, ['my_package']) + def test_command_line_handling_parseArgs(self): program = TestableTestProgram() diff --git a/Lib/unittest/test/test_loader.py b/Lib/unittest/test/test_loader.py --- a/Lib/unittest/test/test_loader.py +++ b/Lib/unittest/test/test_loader.py @@ -1,9 +1,26 @@ import sys import types - +import warnings import unittest +# Decorator used in the deprecation tests to reset the warning registry for +# test isolation and reproducibility. +def warningregistry(func): + def wrapper(*args, **kws): + missing = object() + saved = getattr(warnings, '__warningregistry__', missing).copy() + try: + return func(*args, **kws) + finally: + if saved is missing: + try: + del warnings.__warningregistry__ + except AttributeError: + pass + else: + warnings.__warningregistry__ = saved + class Test_TestLoader(unittest.TestCase): @@ -150,6 +167,7 @@ # Check that loadTestsFromModule honors (or not) a module # with a load_tests function. + @warningregistry def test_loadTestsFromModule__load_tests(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): @@ -168,10 +186,139 @@ suite = loader.loadTestsFromModule(m) self.assertIsInstance(suite, unittest.TestSuite) self.assertEqual(load_tests_args, [loader, suite, None]) + # With Python 3.5, the undocumented and unofficial use_load_tests is + # ignored (and deprecated). + load_tests_args = [] + with warnings.catch_warnings(record=False): + warnings.simplefilter('never') + suite = loader.loadTestsFromModule(m, use_load_tests=False) + self.assertEqual(load_tests_args, [loader, suite, None]) + + @warningregistry + def test_loadTestsFromModule__use_load_tests_deprecated_positional(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + # The method still works. + loader = unittest.TestLoader() + # use_load_tests=True as a positional argument. + suite = loader.loadTestsFromModule(m, False) + self.assertIsInstance(suite, unittest.TestSuite) + # load_tests was still called because use_load_tests is deprecated + # and ignored. + self.assertEqual(load_tests_args, [loader, suite, None]) + # We got a warning. + self.assertIs(w[-1].category, DeprecationWarning) + self.assertEqual(str(w[-1].message), + 'use_load_tests is deprecated and ignored') + + @warningregistry + def test_loadTestsFromModule__use_load_tests_deprecated_keyword(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + # The method still works. + loader = unittest.TestLoader() + suite = loader.loadTestsFromModule(m, use_load_tests=False) + self.assertIsInstance(suite, unittest.TestSuite) + # load_tests was still called because use_load_tests is deprecated + # and ignored. + self.assertEqual(load_tests_args, [loader, suite, None]) + # We got a warning. + self.assertIs(w[-1].category, DeprecationWarning) + self.assertEqual(str(w[-1].message), + 'use_load_tests is deprecated and ignored') + + def test_loadTestsFromModule__too_many_positional_args(self): + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase load_tests_args = [] - suite = loader.loadTestsFromModule(m, use_load_tests=False) - self.assertEqual(load_tests_args, []) + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + loader = unittest.TestLoader() + with self.assertRaises(TypeError) as cm: + loader.loadTestsFromModule(m, False, 'testme.*') + self.assertEqual(type(cm.exception), TypeError) + # The error message names the first bad argument alphabetically, + # however use_load_tests (which sorts first) is ignored. + self.assertEqual( + str(cm.exception), + 'loadTestsFromModule() takes 1 positional argument but 2 were given') + + @warningregistry + def test_loadTestsFromModule__use_load_tests_other_bad_keyword(self): + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + loader = unittest.TestLoader() + with warnings.catch_warnings(): + warnings.simplefilter('never') + with self.assertRaises(TypeError) as cm: + loader.loadTestsFromModule( + m, use_load_tests=False, very_bad=True, worse=False) + self.assertEqual(type(cm.exception), TypeError) + # The error message names the first bad argument alphabetically, + # however use_load_tests (which sorts first) is ignored. + self.assertEqual( + str(cm.exception), + "loadTestsFromModule() got an unexpected keyword argument 'very_bad'") + + def test_loadTestsFromModule__pattern(self): + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + + loader = unittest.TestLoader() + suite = loader.loadTestsFromModule(m, pattern='testme.*') + self.assertIsInstance(suite, unittest.TestSuite) + self.assertEqual(load_tests_args, [loader, suite, 'testme.*']) def test_loadTestsFromModule__faulty_load_tests(self): m = types.ModuleType('m') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,12 @@ Library ------- +- Issue #16662: load_tests() is now unconditionally run when it is present in + a package's __init__.py. TestLoader.loadTestsFromModule() still accepts + use_load_tests, but it is deprecated and ignored. A new keyword-only + attribute `pattern` is added and documented. Patch given by Robert Collins, + tweaked by Barry Warsaw. + - Issue #22226: First letter no longer is stripped from the "status" key in the result of Treeview.heading(). -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 8 23:29:10 2014 From: python-checkins at python.org (barry.warsaw) Date: Mon, 8 Sep 2014 23:29:10 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_A_few_tweaks_for_issue1666?= =?utf-8?q?2_based_on_feedback_from_Robert_Collins=2E?= Message-ID: <3hsN0k6yJPz7LkJ@mail.python.org> http://hg.python.org/cpython/rev/92b292d68104 changeset: 92378:92b292d68104 user: Barry Warsaw date: Mon Sep 08 17:29:02 2014 -0400 summary: A few tweaks for issue16662 based on feedback from Robert Collins. files: Lib/unittest/loader.py | 7 +- Lib/unittest/test/test_loader.py | 77 ++++++++++--------- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/Lib/unittest/loader.py b/Lib/unittest/loader.py --- a/Lib/unittest/loader.py +++ b/Lib/unittest/loader.py @@ -79,12 +79,15 @@ # use_load_tests argument. For backward compatibility, we still # accept the argument (which can also be the first position) but we # ignore it and issue a deprecation warning if it's present. - if len(args) == 1 or 'use_load_tests' in kws: + if len(args) > 0 or 'use_load_tests' in kws: warnings.warn('use_load_tests is deprecated and ignored', DeprecationWarning) kws.pop('use_load_tests', None) if len(args) > 1: - raise TypeError('loadTestsFromModule() takes 1 positional argument but {} were given'.format(len(args))) + # Complain about the number of arguments, but don't forget the + # required `module` argument. + complaint = len(args) + 1 + raise TypeError('loadTestsFromModule() takes 1 positional argument but {} were given'.format(complaint)) if len(kws) != 0: # Since the keyword arguments are unsorted (see PEP 468), just # pick the alphabetically sorted first argument to complain about, diff --git a/Lib/unittest/test/test_loader.py b/Lib/unittest/test/test_loader.py --- a/Lib/unittest/test/test_loader.py +++ b/Lib/unittest/test/test_loader.py @@ -196,23 +196,23 @@ @warningregistry def test_loadTestsFromModule__use_load_tests_deprecated_positional(self): + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + # The method still works. + loader = unittest.TestLoader() + # use_load_tests=True as a positional argument. with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - m = types.ModuleType('m') - class MyTestCase(unittest.TestCase): - def test(self): - pass - m.testcase_1 = MyTestCase - - load_tests_args = [] - def load_tests(loader, tests, pattern): - self.assertIsInstance(tests, unittest.TestSuite) - load_tests_args.extend((loader, tests, pattern)) - return tests - m.load_tests = load_tests - # The method still works. - loader = unittest.TestLoader() - # use_load_tests=True as a positional argument. suite = loader.loadTestsFromModule(m, False) self.assertIsInstance(suite, unittest.TestSuite) # load_tests was still called because use_load_tests is deprecated @@ -225,22 +225,22 @@ @warningregistry def test_loadTestsFromModule__use_load_tests_deprecated_keyword(self): + m = types.ModuleType('m') + class MyTestCase(unittest.TestCase): + def test(self): + pass + m.testcase_1 = MyTestCase + + load_tests_args = [] + def load_tests(loader, tests, pattern): + self.assertIsInstance(tests, unittest.TestSuite) + load_tests_args.extend((loader, tests, pattern)) + return tests + m.load_tests = load_tests + # The method still works. + loader = unittest.TestLoader() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') - m = types.ModuleType('m') - class MyTestCase(unittest.TestCase): - def test(self): - pass - m.testcase_1 = MyTestCase - - load_tests_args = [] - def load_tests(loader, tests, pattern): - self.assertIsInstance(tests, unittest.TestSuite) - load_tests_args.extend((loader, tests, pattern)) - return tests - m.load_tests = load_tests - # The method still works. - loader = unittest.TestLoader() suite = loader.loadTestsFromModule(m, use_load_tests=False) self.assertIsInstance(suite, unittest.TestSuite) # load_tests was still called because use_load_tests is deprecated @@ -251,6 +251,7 @@ self.assertEqual(str(w[-1].message), 'use_load_tests is deprecated and ignored') + @warningregistry def test_loadTestsFromModule__too_many_positional_args(self): m = types.ModuleType('m') class MyTestCase(unittest.TestCase): @@ -265,14 +266,18 @@ return tests m.load_tests = load_tests loader = unittest.TestLoader() - with self.assertRaises(TypeError) as cm: + with self.assertRaises(TypeError) as cm, \ + warnings.catch_warning(record=True) as w: loader.loadTestsFromModule(m, False, 'testme.*') - self.assertEqual(type(cm.exception), TypeError) - # The error message names the first bad argument alphabetically, - # however use_load_tests (which sorts first) is ignored. - self.assertEqual( - str(cm.exception), - 'loadTestsFromModule() takes 1 positional argument but 2 were given') + # We still got the deprecation warning. + self.assertIs(w[-1].category, DeprecationWarning) + self.assertEqual(str(w[-1].message), + 'use_load_tests is deprecated and ignored') + # We also got a TypeError for too many positional arguments. + self.assertEqual(type(cm.exception), TypeError) + self.assertEqual( + str(cm.exception), + 'loadTestsFromModule() takes 1 positional argument but 3 were given') @warningregistry def test_loadTestsFromModule__use_load_tests_other_bad_keyword(self): -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Tue Sep 9 09:38:27 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 09 Sep 2014 09:38:27 +0200 Subject: [Python-checkins] Daily reference leaks (92b292d68104): sum=151934 Message-ID: results for 92b292d68104 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogIMQ681', '-x'] From python-checkins at python.org Tue Sep 9 17:47:15 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 9 Sep 2014 17:47:15 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_show_how_to_purge_CDN?= Message-ID: <3hsrMl2ht0z7Ljc@mail.python.org> http://hg.python.org/peps/rev/b8b1cad3aacd changeset: 5550:b8b1cad3aacd user: Benjamin Peterson date: Tue Sep 09 11:47:10 2014 -0400 summary: show how to purge CDN files: pep-0101.txt | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -439,6 +439,12 @@ checkout. The Doc's version_switcher.js script also needs to be updated. + ___ Note both the documentation and downloads are behind a caching CDN. If + you change archives after downloading them through the website, you'll + need to purge the stale data in the CDN like this: + + $ curl -X PURGE https://www.python.org/ftp/python/2.7.5/Python-2.7.5.tar.xz + ___ For the extra paranoid, do a completely clean test of the release. This includes downloading the tarball from www.python.org. -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Sep 9 18:09:20 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 9 Sep 2014 18:09:20 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_imaplib=2EIMAP4_now_suppor?= =?utf-8?q?ts_the_context_manager_protocol=2E?= Message-ID: <3hsrsD12PNz7LjR@mail.python.org> http://hg.python.org/cpython/rev/e1b1be597736 changeset: 92379:e1b1be597736 user: Serhiy Storchaka date: Tue Sep 09 19:07:49 2014 +0300 summary: imaplib.IMAP4 now supports the context manager protocol. Original patch by Tarek Ziad?. files: Doc/library/imaplib.rst | 13 ++++++++++ Doc/whatsnew/3.5.rst | 8 ++++++ Lib/imaplib.py | 8 ++++++ Lib/test/test_imaplib.py | 35 ++++++++++++++++++++++++++++ Misc/NEWS | 3 ++ 5 files changed, 67 insertions(+), 0 deletions(-) diff --git a/Doc/library/imaplib.rst b/Doc/library/imaplib.rst --- a/Doc/library/imaplib.rst +++ b/Doc/library/imaplib.rst @@ -37,6 +37,19 @@ initialized. If *host* is not specified, ``''`` (the local host) is used. If *port* is omitted, the standard IMAP4 port (143) is used. + The :class:`IMAP4` class supports the :keyword:`with` statement. When used + like this, the IMAP4 ``LOGOUT`` command is issued automatically when the + :keyword:`with` statement exits. E.g.:: + + >>> from imaplib import IMAP4 + >>> with IMAP4("domain.org") as M: + ... M.noop() + ... + ('OK', [b'Nothing Accomplished. d25if65hy903weo.87']) + + .. versionchanged:: 3.5 + Support for the :keyword:`with` statement was added. + Three exceptions are defined as attributes of the :class:`IMAP4` class: diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -141,6 +141,14 @@ *module* contains no docstrings instead of raising :exc:`ValueError` (contributed by Glenn Jones in :issue:`15916`). +imaplib +------- + +* :class:`IMAP4` now supports the context management protocol. When used in a + :keyword:`with` statement, the IMAP4 ``LOGOUT`` command will be called + automatically at the end of the block. (Contributed by Tarek Ziad? and + Serhiy Storchaka in :issue:`4972`). + imghdr ------ diff --git a/Lib/imaplib.py b/Lib/imaplib.py --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -238,6 +238,14 @@ return getattr(self, attr.lower()) raise AttributeError("Unknown IMAP4 command: '%s'" % attr) + def __enter__(self): + return self + + def __exit__(self, *args): + try: + self.logout() + except OSError: + pass # Overridable methods diff --git a/Lib/test/test_imaplib.py b/Lib/test/test_imaplib.py --- a/Lib/test/test_imaplib.py +++ b/Lib/test/test_imaplib.py @@ -98,6 +98,10 @@ continuation = None capabilities = '' + def setup(self): + super().setup() + self.server.logged = None + def _send(self, message): if verbose: print("SENT: %r" % message.strip()) @@ -162,9 +166,14 @@ self._send_tagged(tag, 'OK', 'CAPABILITY completed') def cmd_LOGOUT(self, tag, args): + self.server.logged = None self._send_textline('* BYE IMAP4ref1 Server logging out') self._send_tagged(tag, 'OK', 'LOGOUT completed') + def cmd_LOGIN(self, tag, args): + self.server.logged = args[0] + self._send_tagged(tag, 'OK', 'LOGIN completed') + class ThreadedNetworkedTests(unittest.TestCase): server_class = socketserver.TCPServer @@ -345,6 +354,32 @@ self.assertRaises(imaplib.IMAP4.error, self.imap_class, *server.server_address) + @reap_threads + def test_simple_with_statement(self): + # simplest call + with self.reaped_server(SimpleIMAPHandler) as server: + with self.imap_class(*server.server_address): + pass + + @reap_threads + def test_with_statement(self): + with self.reaped_server(SimpleIMAPHandler) as server: + with self.imap_class(*server.server_address) as imap: + imap.login('user', 'pass') + self.assertEqual(server.logged, 'user') + self.assertIsNone(server.logged) + + @reap_threads + def test_with_statement_logout(self): + # what happens if already logout in the block? + with self.reaped_server(SimpleIMAPHandler) as server: + with self.imap_class(*server.server_address) as imap: + imap.login('user', 'pass') + self.assertEqual(server.logged, 'user') + imap.logout() + self.assertIsNone(server.logged) + self.assertIsNone(server.logged) + @unittest.skipUnless(ssl, "SSL not available") class ThreadedNetworkedTestsSSL(ThreadedNetworkedTests): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #12410: imaplib.IMAP4 now supports the context manager protocol. + Original patch by Tarek Ziad?. + - Issue #16662: load_tests() is now unconditionally run when it is present in a package's __init__.py. TestLoader.loadTestsFromModule() still accepts use_load_tests, but it is deprecated and ignored. A new keyword-only -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 9 19:59:10 2014 From: python-checkins at python.org (stefan.krah) Date: Tue, 9 Sep 2014 19:59:10 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjg0?= =?utf-8?b?OiBVcGRhdGUgZGVjaW1hbC5fX2FsbF9f?= Message-ID: <3hsvHy1VFNz7Ljc@mail.python.org> http://hg.python.org/cpython/rev/2b3dbbd2bd92 changeset: 92380:2b3dbbd2bd92 branch: 3.4 parent: 92375:7fece97e605d user: Stefan Krah date: Tue Sep 09 19:56:56 2014 +0200 summary: Issue #22284: Update decimal.__all__ files: Lib/decimal.py | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/Lib/decimal.py b/Lib/decimal.py --- a/Lib/decimal.py +++ b/Lib/decimal.py @@ -116,6 +116,9 @@ # Two major classes 'Decimal', 'Context', + # Named tuple representation + 'DecimalTuple', + # Contexts 'DefaultContext', 'BasicContext', 'ExtendedContext', @@ -124,6 +127,9 @@ 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', 'FloatOperation', + # Exceptional conditions that trigger InvalidOperation + 'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined', + # Constants for use in setting up contexts 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Sep 9 19:59:11 2014 From: python-checkins at python.org (stefan.krah) Date: Tue, 9 Sep 2014 19:59:11 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322284=3A_Merge_3=2E4?= Message-ID: <3hsvHz49Lmz7Lks@mail.python.org> http://hg.python.org/cpython/rev/5bc23c111de1 changeset: 92381:5bc23c111de1 parent: 92379:e1b1be597736 parent: 92380:2b3dbbd2bd92 user: Stefan Krah date: Tue Sep 09 19:57:59 2014 +0200 summary: Issue #22284: Merge 3.4 files: Lib/decimal.py | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/Lib/decimal.py b/Lib/decimal.py --- a/Lib/decimal.py +++ b/Lib/decimal.py @@ -116,6 +116,9 @@ # Two major classes 'Decimal', 'Context', + # Named tuple representation + 'DecimalTuple', + # Contexts 'DefaultContext', 'BasicContext', 'ExtendedContext', @@ -124,6 +127,9 @@ 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', 'FloatOperation', + # Exceptional conditions that trigger InvalidOperation + 'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined', + # Constants for use in setting up contexts 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Wed Sep 10 10:40:12 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 10 Sep 2014 10:40:12 +0200 Subject: [Python-checkins] Daily reference leaks (5bc23c111de1): sum=151934 Message-ID: results for 5bc23c111de1 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, 0, -2] references, sum=0 test_site leaked [2, 0, -2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogw4ILWE', '-x'] From python-checkins at python.org Wed Sep 10 18:01:22 2014 From: python-checkins at python.org (stefan.krah) Date: Wed, 10 Sep 2014 18:01:22 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2319232=3A_Speed_up?= =?utf-8?q?_decimal_import=2E__Additionally=2C_since_=5Fdecimal_is?= Message-ID: <3htSdZ6Z4vz7Lk6@mail.python.org> http://hg.python.org/cpython/rev/8bf51cf94405 changeset: 92382:8bf51cf94405 user: Stefan Krah date: Wed Sep 10 17:58:15 2014 +0200 summary: Issue #19232: Speed up decimal import. Additionally, since _decimal is self-contained, this change facilitates maintenance and the Python version can be easily imported for experimentation. files: Lib/decimal.py | 17 +- Lib/decimal.py | 6406 +--------------- Lib/test/test_decimal.py | 4 +- Modules/_decimal/tests/deccheck.py | 7 +- 4 files changed, 14 insertions(+), 6420 deletions(-) diff --git a/Lib/decimal.py b/Lib/_pydecimal.py copy from Lib/decimal.py copy to Lib/_pydecimal.py --- a/Lib/decimal.py +++ b/Lib/_pydecimal.py @@ -144,6 +144,7 @@ 'HAVE_THREADS' ] +__name__ = 'decimal' # For pickling __version__ = '1.70' # Highest version of the spec this complies with # See http://speleotrove.com/decimal/ __libmpdec_version__ = "2.4.1" # compatible libmpdec version @@ -6386,19 +6387,3 @@ # _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS _PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) del sys - -try: - import _decimal -except ImportError: - pass -else: - s1 = set(dir()) - s2 = set(dir(_decimal)) - for name in s1 - s2: - del globals()[name] - del s1, s2, name - from _decimal import * - -if __name__ == '__main__': - import doctest, decimal - doctest.testmod(decimal) diff --git a/Lib/decimal.py b/Lib/decimal.py --- a/Lib/decimal.py +++ b/Lib/decimal.py @@ -1,6404 +1,14 @@ -# Copyright (c) 2004 Python Software Foundation. -# All rights reserved. - -# Written by Eric Price -# and Facundo Batista -# and Raymond Hettinger -# and Aahz -# and Tim Peters - -# This module should be kept in sync with the latest updates of the -# IBM specification as it evolves. Those updates will be treated -# as bug fixes (deviation from the spec is a compatibility, usability -# bug) and will be backported. At this point the spec is stabilizing -# and the updates are becoming fewer, smaller, and less significant. - -""" -This is an implementation of decimal floating point arithmetic based on -the General Decimal Arithmetic Specification: - - http://speleotrove.com/decimal/decarith.html - -and IEEE standard 854-1987: - - http://en.wikipedia.org/wiki/IEEE_854-1987 - -Decimal floating point has finite precision with arbitrarily large bounds. - -The purpose of this module is to support arithmetic using familiar -"schoolhouse" rules and to avoid some of the tricky representation -issues associated with binary floating point. The package is especially -useful for financial applications or for contexts where users have -expectations that are at odds with binary floating point (for instance, -in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead -of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected -Decimal('0.00')). - -Here are some examples of using the decimal module: - ->>> from decimal import * ->>> setcontext(ExtendedContext) ->>> Decimal(0) -Decimal('0') ->>> Decimal('1') -Decimal('1') ->>> Decimal('-.0123') -Decimal('-0.0123') ->>> Decimal(123456) -Decimal('123456') ->>> Decimal('123.45e12345678') -Decimal('1.2345E+12345680') ->>> Decimal('1.33') + Decimal('1.27') -Decimal('2.60') ->>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') -Decimal('-2.20') ->>> dig = Decimal(1) ->>> print(dig / Decimal(3)) -0.333333333 ->>> getcontext().prec = 18 ->>> print(dig / Decimal(3)) -0.333333333333333333 ->>> print(dig.sqrt()) -1 ->>> print(Decimal(3).sqrt()) -1.73205080756887729 ->>> print(Decimal(3) ** 123) -4.85192780976896427E+58 ->>> inf = Decimal(1) / Decimal(0) ->>> print(inf) -Infinity ->>> neginf = Decimal(-1) / Decimal(0) ->>> print(neginf) --Infinity ->>> print(neginf + inf) -NaN ->>> print(neginf * inf) --Infinity ->>> print(dig / 0) -Infinity ->>> getcontext().traps[DivisionByZero] = 1 ->>> print(dig / 0) -Traceback (most recent call last): - ... - ... - ... -decimal.DivisionByZero: x / 0 ->>> c = Context() ->>> c.traps[InvalidOperation] = 0 ->>> print(c.flags[InvalidOperation]) -0 ->>> c.divide(Decimal(0), Decimal(0)) -Decimal('NaN') ->>> c.traps[InvalidOperation] = 1 ->>> print(c.flags[InvalidOperation]) -1 ->>> c.flags[InvalidOperation] = 0 ->>> print(c.flags[InvalidOperation]) -0 ->>> print(c.divide(Decimal(0), Decimal(0))) -Traceback (most recent call last): - ... - ... - ... -decimal.InvalidOperation: 0 / 0 ->>> print(c.flags[InvalidOperation]) -1 ->>> c.flags[InvalidOperation] = 0 ->>> c.traps[InvalidOperation] = 0 ->>> print(c.divide(Decimal(0), Decimal(0))) -NaN ->>> print(c.flags[InvalidOperation]) -1 ->>> -""" - -__all__ = [ - # Two major classes - 'Decimal', 'Context', - - # Named tuple representation - 'DecimalTuple', - - # Contexts - 'DefaultContext', 'BasicContext', 'ExtendedContext', - - # Exceptions - 'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero', - 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', - 'FloatOperation', - - # Exceptional conditions that trigger InvalidOperation - 'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined', - - # Constants for use in setting up contexts - 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', - 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', - - # Functions for manipulating contexts - 'setcontext', 'getcontext', 'localcontext', - - # Limits for the C version for compatibility - 'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY', - - # C version: compile time choice that enables the thread local context - 'HAVE_THREADS' -] - -__version__ = '1.70' # Highest version of the spec this complies with - # See http://speleotrove.com/decimal/ -__libmpdec_version__ = "2.4.1" # compatible libmpdec version - -import math as _math -import numbers as _numbers -import sys try: - from collections import namedtuple as _namedtuple - DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent') + from _decimal import * + from _decimal import __doc__ + from _decimal import __version__ + from _decimal import __libmpdec_version__ except ImportError: - DecimalTuple = lambda *args: args + from _pydecimal import * + from _pydecimal import __doc__ + from _pydecimal import __version__ + from _pydecimal import __libmpdec_version__ -# Rounding -ROUND_DOWN = 'ROUND_DOWN' -ROUND_HALF_UP = 'ROUND_HALF_UP' -ROUND_HALF_EVEN = 'ROUND_HALF_EVEN' -ROUND_CEILING = 'ROUND_CEILING' -ROUND_FLOOR = 'ROUND_FLOOR' -ROUND_UP = 'ROUND_UP' -ROUND_HALF_DOWN = 'ROUND_HALF_DOWN' -ROUND_05UP = 'ROUND_05UP' -# Compatibility with the C version -HAVE_THREADS = True -if sys.maxsize == 2**63-1: - MAX_PREC = 999999999999999999 - MAX_EMAX = 999999999999999999 - MIN_EMIN = -999999999999999999 -else: - MAX_PREC = 425000000 - MAX_EMAX = 425000000 - MIN_EMIN = -425000000 -MIN_ETINY = MIN_EMIN - (MAX_PREC-1) - -# Errors - -class DecimalException(ArithmeticError): - """Base exception class. - - Used exceptions derive from this. - If an exception derives from another exception besides this (such as - Underflow (Inexact, Rounded, Subnormal) that indicates that it is only - called if the others are present. This isn't actually used for - anything, though. - - handle -- Called when context._raise_error is called and the - trap_enabler is not set. First argument is self, second is the - context. More arguments can be given, those being after - the explanation in _raise_error (For example, - context._raise_error(NewError, '(-x)!', self._sign) would - call NewError().handle(context, self._sign).) - - To define a new exception, it should be sufficient to have it derive - from DecimalException. - """ - def handle(self, context, *args): - pass - - -class Clamped(DecimalException): - """Exponent of a 0 changed to fit bounds. - - This occurs and signals clamped if the exponent of a result has been - altered in order to fit the constraints of a specific concrete - representation. This may occur when the exponent of a zero result would - be outside the bounds of a representation, or when a large normal - number would have an encoded exponent that cannot be represented. In - this latter case, the exponent is reduced to fit and the corresponding - number of zero digits are appended to the coefficient ("fold-down"). - """ - -class InvalidOperation(DecimalException): - """An invalid operation was performed. - - Various bad things cause this: - - Something creates a signaling NaN - -INF + INF - 0 * (+-)INF - (+-)INF / (+-)INF - x % 0 - (+-)INF % x - x._rescale( non-integer ) - sqrt(-x) , x > 0 - 0 ** 0 - x ** (non-integer) - x ** (+-)INF - An operand is invalid - - The result of the operation after these is a quiet positive NaN, - except when the cause is a signaling NaN, in which case the result is - also a quiet NaN, but with the original sign, and an optional - diagnostic information. - """ - def handle(self, context, *args): - if args: - ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) - return ans._fix_nan(context) - return _NaN - -class ConversionSyntax(InvalidOperation): - """Trying to convert badly formed string. - - This occurs and signals invalid-operation if an string is being - converted to a number and it does not conform to the numeric string - syntax. The result is [0,qNaN]. - """ - def handle(self, context, *args): - return _NaN - -class DivisionByZero(DecimalException, ZeroDivisionError): - """Division by 0. - - This occurs and signals division-by-zero if division of a finite number - by zero was attempted (during a divide-integer or divide operation, or a - power operation with negative right-hand operand), and the dividend was - not zero. - - The result of the operation is [sign,inf], where sign is the exclusive - or of the signs of the operands for divide, or is 1 for an odd power of - -0, for power. - """ - - def handle(self, context, sign, *args): - return _SignedInfinity[sign] - -class DivisionImpossible(InvalidOperation): - """Cannot perform the division adequately. - - This occurs and signals invalid-operation if the integer result of a - divide-integer or remainder operation had too many digits (would be - longer than precision). The result is [0,qNaN]. - """ - - def handle(self, context, *args): - return _NaN - -class DivisionUndefined(InvalidOperation, ZeroDivisionError): - """Undefined result of division. - - This occurs and signals invalid-operation if division by zero was - attempted (during a divide-integer, divide, or remainder operation), and - the dividend is also zero. The result is [0,qNaN]. - """ - - def handle(self, context, *args): - return _NaN - -class Inexact(DecimalException): - """Had to round, losing information. - - This occurs and signals inexact whenever the result of an operation is - not exact (that is, it needed to be rounded and any discarded digits - were non-zero), or if an overflow or underflow condition occurs. The - result in all cases is unchanged. - - The inexact signal may be tested (or trapped) to determine if a given - operation (or sequence of operations) was inexact. - """ - -class InvalidContext(InvalidOperation): - """Invalid context. Unknown rounding, for example. - - This occurs and signals invalid-operation if an invalid context was - detected during an operation. This can occur if contexts are not checked - on creation and either the precision exceeds the capability of the - underlying concrete representation or an unknown or unsupported rounding - was specified. These aspects of the context need only be checked when - the values are required to be used. The result is [0,qNaN]. - """ - - def handle(self, context, *args): - return _NaN - -class Rounded(DecimalException): - """Number got rounded (not necessarily changed during rounding). - - This occurs and signals rounded whenever the result of an operation is - rounded (that is, some zero or non-zero digits were discarded from the - coefficient), or if an overflow or underflow condition occurs. The - result in all cases is unchanged. - - The rounded signal may be tested (or trapped) to determine if a given - operation (or sequence of operations) caused a loss of precision. - """ - -class Subnormal(DecimalException): - """Exponent < Emin before rounding. - - This occurs and signals subnormal whenever the result of a conversion or - operation is subnormal (that is, its adjusted exponent is less than - Emin, before any rounding). The result in all cases is unchanged. - - The subnormal signal may be tested (or trapped) to determine if a given - or operation (or sequence of operations) yielded a subnormal result. - """ - -class Overflow(Inexact, Rounded): - """Numerical overflow. - - This occurs and signals overflow if the adjusted exponent of a result - (from a conversion or from an operation that is not an attempt to divide - by zero), after rounding, would be greater than the largest value that - can be handled by the implementation (the value Emax). - - The result depends on the rounding mode: - - For round-half-up and round-half-even (and for round-half-down and - round-up, if implemented), the result of the operation is [sign,inf], - where sign is the sign of the intermediate result. For round-down, the - result is the largest finite number that can be represented in the - current precision, with the sign of the intermediate result. For - round-ceiling, the result is the same as for round-down if the sign of - the intermediate result is 1, or is [0,inf] otherwise. For round-floor, - the result is the same as for round-down if the sign of the intermediate - result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded - will also be raised. - """ - - def handle(self, context, sign, *args): - if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN, - ROUND_HALF_DOWN, ROUND_UP): - return _SignedInfinity[sign] - if sign == 0: - if context.rounding == ROUND_CEILING: - return _SignedInfinity[sign] - return _dec_from_triple(sign, '9'*context.prec, - context.Emax-context.prec+1) - if sign == 1: - if context.rounding == ROUND_FLOOR: - return _SignedInfinity[sign] - return _dec_from_triple(sign, '9'*context.prec, - context.Emax-context.prec+1) - - -class Underflow(Inexact, Rounded, Subnormal): - """Numerical underflow with result rounded to 0. - - This occurs and signals underflow if a result is inexact and the - adjusted exponent of the result would be smaller (more negative) than - the smallest value that can be handled by the implementation (the value - Emin). That is, the result is both inexact and subnormal. - - The result after an underflow will be a subnormal number rounded, if - necessary, so that its exponent is not less than Etiny. This may result - in 0 with the sign of the intermediate result and an exponent of Etiny. - - In all cases, Inexact, Rounded, and Subnormal will also be raised. - """ - -class FloatOperation(DecimalException, TypeError): - """Enable stricter semantics for mixing floats and Decimals. - - If the signal is not trapped (default), mixing floats and Decimals is - permitted in the Decimal() constructor, context.create_decimal() and - all comparison operators. Both conversion and comparisons are exact. - Any occurrence of a mixed operation is silently recorded by setting - FloatOperation in the context flags. Explicit conversions with - Decimal.from_float() or context.create_decimal_from_float() do not - set the flag. - - Otherwise (the signal is trapped), only equality comparisons and explicit - conversions are silent. All other mixed operations raise FloatOperation. - """ - -# List of public traps and flags -_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded, - Underflow, InvalidOperation, Subnormal, FloatOperation] - -# Map conditions (per the spec) to signals -_condition_map = {ConversionSyntax:InvalidOperation, - DivisionImpossible:InvalidOperation, - DivisionUndefined:InvalidOperation, - InvalidContext:InvalidOperation} - -# Valid rounding modes -_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING, - ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP) - -##### Context Functions ################################################## - -# The getcontext() and setcontext() function manage access to a thread-local -# current context. Py2.4 offers direct support for thread locals. If that -# is not available, use threading.current_thread() which is slower but will -# work for older Pythons. If threads are not part of the build, create a -# mock threading object with threading.local() returning the module namespace. - -try: - import threading -except ImportError: - # Python was compiled without threads; create a mock object instead - class MockThreading(object): - def local(self, sys=sys): - return sys.modules[__name__] - threading = MockThreading() - del MockThreading - -try: - threading.local - -except AttributeError: - - # To fix reloading, force it to create a new context - # Old contexts have different exceptions in their dicts, making problems. - if hasattr(threading.current_thread(), '__decimal_context__'): - del threading.current_thread().__decimal_context__ - - def setcontext(context): - """Set this thread's context to context.""" - if context in (DefaultContext, BasicContext, ExtendedContext): - context = context.copy() - context.clear_flags() - threading.current_thread().__decimal_context__ = context - - def getcontext(): - """Returns this thread's context. - - If this thread does not yet have a context, returns - a new context and sets this thread's context. - New contexts are copies of DefaultContext. - """ - try: - return threading.current_thread().__decimal_context__ - except AttributeError: - context = Context() - threading.current_thread().__decimal_context__ = context - return context - -else: - - local = threading.local() - if hasattr(local, '__decimal_context__'): - del local.__decimal_context__ - - def getcontext(_local=local): - """Returns this thread's context. - - If this thread does not yet have a context, returns - a new context and sets this thread's context. - New contexts are copies of DefaultContext. - """ - try: - return _local.__decimal_context__ - except AttributeError: - context = Context() - _local.__decimal_context__ = context - return context - - def setcontext(context, _local=local): - """Set this thread's context to context.""" - if context in (DefaultContext, BasicContext, ExtendedContext): - context = context.copy() - context.clear_flags() - _local.__decimal_context__ = context - - del threading, local # Don't contaminate the namespace - -def localcontext(ctx=None): - """Return a context manager for a copy of the supplied context - - Uses a copy of the current context if no context is specified - The returned context manager creates a local decimal context - in a with statement: - def sin(x): - with localcontext() as ctx: - ctx.prec += 2 - # Rest of sin calculation algorithm - # uses a precision 2 greater than normal - return +s # Convert result to normal precision - - def sin(x): - with localcontext(ExtendedContext): - # Rest of sin calculation algorithm - # uses the Extended Context from the - # General Decimal Arithmetic Specification - return +s # Convert result to normal context - - >>> setcontext(DefaultContext) - >>> print(getcontext().prec) - 28 - >>> with localcontext(): - ... ctx = getcontext() - ... ctx.prec += 2 - ... print(ctx.prec) - ... - 30 - >>> with localcontext(ExtendedContext): - ... print(getcontext().prec) - ... - 9 - >>> print(getcontext().prec) - 28 - """ - if ctx is None: ctx = getcontext() - return _ContextManager(ctx) - - -##### Decimal class ####################################################### - -# Do not subclass Decimal from numbers.Real and do not register it as such -# (because Decimals are not interoperable with floats). See the notes in -# numbers.py for more detail. - -class Decimal(object): - """Floating point class for decimal arithmetic.""" - - __slots__ = ('_exp','_int','_sign', '_is_special') - # Generally, the value of the Decimal instance is given by - # (-1)**_sign * _int * 10**_exp - # Special values are signified by _is_special == True - - # We're immutable, so use __new__ not __init__ - def __new__(cls, value="0", context=None): - """Create a decimal point instance. - - >>> Decimal('3.14') # string input - Decimal('3.14') - >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) - Decimal('3.14') - >>> Decimal(314) # int - Decimal('314') - >>> Decimal(Decimal(314)) # another decimal instance - Decimal('314') - >>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay - Decimal('3.14') - """ - - # Note that the coefficient, self._int, is actually stored as - # a string rather than as a tuple of digits. This speeds up - # the "digits to integer" and "integer to digits" conversions - # that are used in almost every arithmetic operation on - # Decimals. This is an internal detail: the as_tuple function - # and the Decimal constructor still deal with tuples of - # digits. - - self = object.__new__(cls) - - # From a string - # REs insist on real strings, so we can too. - if isinstance(value, str): - m = _parser(value.strip()) - if m is None: - if context is None: - context = getcontext() - return context._raise_error(ConversionSyntax, - "Invalid literal for Decimal: %r" % value) - - if m.group('sign') == "-": - self._sign = 1 - else: - self._sign = 0 - intpart = m.group('int') - if intpart is not None: - # finite number - fracpart = m.group('frac') or '' - exp = int(m.group('exp') or '0') - self._int = str(int(intpart+fracpart)) - self._exp = exp - len(fracpart) - self._is_special = False - else: - diag = m.group('diag') - if diag is not None: - # NaN - self._int = str(int(diag or '0')).lstrip('0') - if m.group('signal'): - self._exp = 'N' - else: - self._exp = 'n' - else: - # infinity - self._int = '0' - self._exp = 'F' - self._is_special = True - return self - - # From an integer - if isinstance(value, int): - if value >= 0: - self._sign = 0 - else: - self._sign = 1 - self._exp = 0 - self._int = str(abs(value)) - self._is_special = False - return self - - # From another decimal - if isinstance(value, Decimal): - self._exp = value._exp - self._sign = value._sign - self._int = value._int - self._is_special = value._is_special - return self - - # From an internal working value - if isinstance(value, _WorkRep): - self._sign = value.sign - self._int = str(value.int) - self._exp = int(value.exp) - self._is_special = False - return self - - # tuple/list conversion (possibly from as_tuple()) - if isinstance(value, (list,tuple)): - if len(value) != 3: - raise ValueError('Invalid tuple size in creation of Decimal ' - 'from list or tuple. The list or tuple ' - 'should have exactly three elements.') - # process sign. The isinstance test rejects floats - if not (isinstance(value[0], int) and value[0] in (0,1)): - raise ValueError("Invalid sign. The first value in the tuple " - "should be an integer; either 0 for a " - "positive number or 1 for a negative number.") - self._sign = value[0] - if value[2] == 'F': - # infinity: value[1] is ignored - self._int = '0' - self._exp = value[2] - self._is_special = True - else: - # process and validate the digits in value[1] - digits = [] - for digit in value[1]: - if isinstance(digit, int) and 0 <= digit <= 9: - # skip leading zeros - if digits or digit != 0: - digits.append(digit) - else: - raise ValueError("The second value in the tuple must " - "be composed of integers in the range " - "0 through 9.") - if value[2] in ('n', 'N'): - # NaN: digits form the diagnostic - self._int = ''.join(map(str, digits)) - self._exp = value[2] - self._is_special = True - elif isinstance(value[2], int): - # finite number: digits give the coefficient - self._int = ''.join(map(str, digits or [0])) - self._exp = value[2] - self._is_special = False - else: - raise ValueError("The third value in the tuple must " - "be an integer, or one of the " - "strings 'F', 'n', 'N'.") - return self - - if isinstance(value, float): - if context is None: - context = getcontext() - context._raise_error(FloatOperation, - "strict semantics for mixing floats and Decimals are " - "enabled") - value = Decimal.from_float(value) - self._exp = value._exp - self._sign = value._sign - self._int = value._int - self._is_special = value._is_special - return self - - raise TypeError("Cannot convert %r to Decimal" % value) - - @classmethod - def from_float(cls, f): - """Converts a float to a decimal number, exactly. - - Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). - Since 0.1 is not exactly representable in binary floating point, the - value is stored as the nearest representable value which is - 0x1.999999999999ap-4. The exact equivalent of the value in decimal - is 0.1000000000000000055511151231257827021181583404541015625. - - >>> Decimal.from_float(0.1) - Decimal('0.1000000000000000055511151231257827021181583404541015625') - >>> Decimal.from_float(float('nan')) - Decimal('NaN') - >>> Decimal.from_float(float('inf')) - Decimal('Infinity') - >>> Decimal.from_float(-float('inf')) - Decimal('-Infinity') - >>> Decimal.from_float(-0.0) - Decimal('-0') - - """ - if isinstance(f, int): # handle integer inputs - return cls(f) - if not isinstance(f, float): - raise TypeError("argument must be int or float.") - if _math.isinf(f) or _math.isnan(f): - return cls(repr(f)) - if _math.copysign(1.0, f) == 1.0: - sign = 0 - else: - sign = 1 - n, d = abs(f).as_integer_ratio() - k = d.bit_length() - 1 - result = _dec_from_triple(sign, str(n*5**k), -k) - if cls is Decimal: - return result - else: - return cls(result) - - def _isnan(self): - """Returns whether the number is not actually one. - - 0 if a number - 1 if NaN - 2 if sNaN - """ - if self._is_special: - exp = self._exp - if exp == 'n': - return 1 - elif exp == 'N': - return 2 - return 0 - - def _isinfinity(self): - """Returns whether the number is infinite - - 0 if finite or not a number - 1 if +INF - -1 if -INF - """ - if self._exp == 'F': - if self._sign: - return -1 - return 1 - return 0 - - def _check_nans(self, other=None, context=None): - """Returns whether the number is not actually one. - - if self, other are sNaN, signal - if self, other are NaN return nan - return 0 - - Done before operations. - """ - - self_is_nan = self._isnan() - if other is None: - other_is_nan = False - else: - other_is_nan = other._isnan() - - if self_is_nan or other_is_nan: - if context is None: - context = getcontext() - - if self_is_nan == 2: - return context._raise_error(InvalidOperation, 'sNaN', - self) - if other_is_nan == 2: - return context._raise_error(InvalidOperation, 'sNaN', - other) - if self_is_nan: - return self._fix_nan(context) - - return other._fix_nan(context) - return 0 - - def _compare_check_nans(self, other, context): - """Version of _check_nans used for the signaling comparisons - compare_signal, __le__, __lt__, __ge__, __gt__. - - Signal InvalidOperation if either self or other is a (quiet - or signaling) NaN. Signaling NaNs take precedence over quiet - NaNs. - - Return 0 if neither operand is a NaN. - - """ - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - if self.is_snan(): - return context._raise_error(InvalidOperation, - 'comparison involving sNaN', - self) - elif other.is_snan(): - return context._raise_error(InvalidOperation, - 'comparison involving sNaN', - other) - elif self.is_qnan(): - return context._raise_error(InvalidOperation, - 'comparison involving NaN', - self) - elif other.is_qnan(): - return context._raise_error(InvalidOperation, - 'comparison involving NaN', - other) - return 0 - - def __bool__(self): - """Return True if self is nonzero; otherwise return False. - - NaNs and infinities are considered nonzero. - """ - return self._is_special or self._int != '0' - - def _cmp(self, other): - """Compare the two non-NaN decimal instances self and other. - - Returns -1 if self < other, 0 if self == other and 1 - if self > other. This routine is for internal use only.""" - - if self._is_special or other._is_special: - self_inf = self._isinfinity() - other_inf = other._isinfinity() - if self_inf == other_inf: - return 0 - elif self_inf < other_inf: - return -1 - else: - return 1 - - # check for zeros; Decimal('0') == Decimal('-0') - if not self: - if not other: - return 0 - else: - return -((-1)**other._sign) - if not other: - return (-1)**self._sign - - # If different signs, neg one is less - if other._sign < self._sign: - return -1 - if self._sign < other._sign: - return 1 - - self_adjusted = self.adjusted() - other_adjusted = other.adjusted() - if self_adjusted == other_adjusted: - self_padded = self._int + '0'*(self._exp - other._exp) - other_padded = other._int + '0'*(other._exp - self._exp) - if self_padded == other_padded: - return 0 - elif self_padded < other_padded: - return -(-1)**self._sign - else: - return (-1)**self._sign - elif self_adjusted > other_adjusted: - return (-1)**self._sign - else: # self_adjusted < other_adjusted - return -((-1)**self._sign) - - # Note: The Decimal standard doesn't cover rich comparisons for - # Decimals. In particular, the specification is silent on the - # subject of what should happen for a comparison involving a NaN. - # We take the following approach: - # - # == comparisons involving a quiet NaN always return False - # != comparisons involving a quiet NaN always return True - # == or != comparisons involving a signaling NaN signal - # InvalidOperation, and return False or True as above if the - # InvalidOperation is not trapped. - # <, >, <= and >= comparisons involving a (quiet or signaling) - # NaN signal InvalidOperation, and return False if the - # InvalidOperation is not trapped. - # - # This behavior is designed to conform as closely as possible to - # that specified by IEEE 754. - - def __eq__(self, other, context=None): - self, other = _convert_for_comparison(self, other, equality_op=True) - if other is NotImplemented: - return other - if self._check_nans(other, context): - return False - return self._cmp(other) == 0 - - def __ne__(self, other, context=None): - self, other = _convert_for_comparison(self, other, equality_op=True) - if other is NotImplemented: - return other - if self._check_nans(other, context): - return True - return self._cmp(other) != 0 - - - def __lt__(self, other, context=None): - self, other = _convert_for_comparison(self, other) - if other is NotImplemented: - return other - ans = self._compare_check_nans(other, context) - if ans: - return False - return self._cmp(other) < 0 - - def __le__(self, other, context=None): - self, other = _convert_for_comparison(self, other) - if other is NotImplemented: - return other - ans = self._compare_check_nans(other, context) - if ans: - return False - return self._cmp(other) <= 0 - - def __gt__(self, other, context=None): - self, other = _convert_for_comparison(self, other) - if other is NotImplemented: - return other - ans = self._compare_check_nans(other, context) - if ans: - return False - return self._cmp(other) > 0 - - def __ge__(self, other, context=None): - self, other = _convert_for_comparison(self, other) - if other is NotImplemented: - return other - ans = self._compare_check_nans(other, context) - if ans: - return False - return self._cmp(other) >= 0 - - def compare(self, other, context=None): - """Compares one to another. - - -1 => a < b - 0 => a = b - 1 => a > b - NaN => one is NaN - Like __cmp__, but returns Decimal instances. - """ - other = _convert_other(other, raiseit=True) - - # Compare(NaN, NaN) = NaN - if (self._is_special or other and other._is_special): - ans = self._check_nans(other, context) - if ans: - return ans - - return Decimal(self._cmp(other)) - - def __hash__(self): - """x.__hash__() <==> hash(x)""" - - # In order to make sure that the hash of a Decimal instance - # agrees with the hash of a numerically equal integer, float - # or Fraction, we follow the rules for numeric hashes outlined - # in the documentation. (See library docs, 'Built-in Types'). - if self._is_special: - if self.is_snan(): - raise TypeError('Cannot hash a signaling NaN value.') - elif self.is_nan(): - return _PyHASH_NAN - else: - if self._sign: - return -_PyHASH_INF - else: - return _PyHASH_INF - - if self._exp >= 0: - exp_hash = pow(10, self._exp, _PyHASH_MODULUS) - else: - exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS) - hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS - ans = hash_ if self >= 0 else -hash_ - return -2 if ans == -1 else ans - - def as_tuple(self): - """Represents the number as a triple tuple. - - To show the internals exactly as they are. - """ - return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp) - - def __repr__(self): - """Represents the number as an instance of Decimal.""" - # Invariant: eval(repr(d)) == d - return "Decimal('%s')" % str(self) - - def __str__(self, eng=False, context=None): - """Return string representation of the number in scientific notation. - - Captures all of the information in the underlying representation. - """ - - sign = ['', '-'][self._sign] - if self._is_special: - if self._exp == 'F': - return sign + 'Infinity' - elif self._exp == 'n': - return sign + 'NaN' + self._int - else: # self._exp == 'N' - return sign + 'sNaN' + self._int - - # number of digits of self._int to left of decimal point - leftdigits = self._exp + len(self._int) - - # dotplace is number of digits of self._int to the left of the - # decimal point in the mantissa of the output string (that is, - # after adjusting the exponent) - if self._exp <= 0 and leftdigits > -6: - # no exponent required - dotplace = leftdigits - elif not eng: - # usual scientific notation: 1 digit on left of the point - dotplace = 1 - elif self._int == '0': - # engineering notation, zero - dotplace = (leftdigits + 1) % 3 - 1 - else: - # engineering notation, nonzero - dotplace = (leftdigits - 1) % 3 + 1 - - if dotplace <= 0: - intpart = '0' - fracpart = '.' + '0'*(-dotplace) + self._int - elif dotplace >= len(self._int): - intpart = self._int+'0'*(dotplace-len(self._int)) - fracpart = '' - else: - intpart = self._int[:dotplace] - fracpart = '.' + self._int[dotplace:] - if leftdigits == dotplace: - exp = '' - else: - if context is None: - context = getcontext() - exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace) - - return sign + intpart + fracpart + exp - - def to_eng_string(self, context=None): - """Convert to engineering-type string. - - Engineering notation has an exponent which is a multiple of 3, so there - are up to 3 digits left of the decimal place. - - Same rules for when in exponential and when as a value as in __str__. - """ - return self.__str__(eng=True, context=context) - - def __neg__(self, context=None): - """Returns a copy with the sign switched. - - Rounds, if it has reason. - """ - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - - if context is None: - context = getcontext() - - if not self and context.rounding != ROUND_FLOOR: - # -Decimal('0') is Decimal('0'), not Decimal('-0'), except - # in ROUND_FLOOR rounding mode. - ans = self.copy_abs() - else: - ans = self.copy_negate() - - return ans._fix(context) - - def __pos__(self, context=None): - """Returns a copy, unless it is a sNaN. - - Rounds the number (if more then precision digits) - """ - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - - if context is None: - context = getcontext() - - if not self and context.rounding != ROUND_FLOOR: - # + (-0) = 0, except in ROUND_FLOOR rounding mode. - ans = self.copy_abs() - else: - ans = Decimal(self) - - return ans._fix(context) - - def __abs__(self, round=True, context=None): - """Returns the absolute value of self. - - If the keyword argument 'round' is false, do not round. The - expression self.__abs__(round=False) is equivalent to - self.copy_abs(). - """ - if not round: - return self.copy_abs() - - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - - if self._sign: - ans = self.__neg__(context=context) - else: - ans = self.__pos__(context=context) - - return ans - - def __add__(self, other, context=None): - """Returns self + other. - - -INF + INF (or the reverse) cause InvalidOperation errors. - """ - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans - - if self._isinfinity(): - # If both INF, same sign => same as both, opposite => error. - if self._sign != other._sign and other._isinfinity(): - return context._raise_error(InvalidOperation, '-INF + INF') - return Decimal(self) - if other._isinfinity(): - return Decimal(other) # Can't both be infinity here - - exp = min(self._exp, other._exp) - negativezero = 0 - if context.rounding == ROUND_FLOOR and self._sign != other._sign: - # If the answer is 0, the sign should be negative, in this case. - negativezero = 1 - - if not self and not other: - sign = min(self._sign, other._sign) - if negativezero: - sign = 1 - ans = _dec_from_triple(sign, '0', exp) - ans = ans._fix(context) - return ans - if not self: - exp = max(exp, other._exp - context.prec-1) - ans = other._rescale(exp, context.rounding) - ans = ans._fix(context) - return ans - if not other: - exp = max(exp, self._exp - context.prec-1) - ans = self._rescale(exp, context.rounding) - ans = ans._fix(context) - return ans - - op1 = _WorkRep(self) - op2 = _WorkRep(other) - op1, op2 = _normalize(op1, op2, context.prec) - - result = _WorkRep() - if op1.sign != op2.sign: - # Equal and opposite - if op1.int == op2.int: - ans = _dec_from_triple(negativezero, '0', exp) - ans = ans._fix(context) - return ans - if op1.int < op2.int: - op1, op2 = op2, op1 - # OK, now abs(op1) > abs(op2) - if op1.sign == 1: - result.sign = 1 - op1.sign, op2.sign = op2.sign, op1.sign - else: - result.sign = 0 - # So we know the sign, and op1 > 0. - elif op1.sign == 1: - result.sign = 1 - op1.sign, op2.sign = (0, 0) - else: - result.sign = 0 - # Now, op1 > abs(op2) > 0 - - if op2.sign == 0: - result.int = op1.int + op2.int - else: - result.int = op1.int - op2.int - - result.exp = op1.exp - ans = Decimal(result) - ans = ans._fix(context) - return ans - - __radd__ = __add__ - - def __sub__(self, other, context=None): - """Return self - other""" - other = _convert_other(other) - if other is NotImplemented: - return other - - if self._is_special or other._is_special: - ans = self._check_nans(other, context=context) - if ans: - return ans - - # self - other is computed as self + other.copy_negate() - return self.__add__(other.copy_negate(), context=context) - - def __rsub__(self, other, context=None): - """Return other - self""" - other = _convert_other(other) - if other is NotImplemented: - return other - - return other.__sub__(self, context=context) - - def __mul__(self, other, context=None): - """Return self * other. - - (+-) INF * 0 (or its reverse) raise InvalidOperation. - """ - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - resultsign = self._sign ^ other._sign - - if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans - - if self._isinfinity(): - if not other: - return context._raise_error(InvalidOperation, '(+-)INF * 0') - return _SignedInfinity[resultsign] - - if other._isinfinity(): - if not self: - return context._raise_error(InvalidOperation, '0 * (+-)INF') - return _SignedInfinity[resultsign] - - resultexp = self._exp + other._exp - - # Special case for multiplying by zero - if not self or not other: - ans = _dec_from_triple(resultsign, '0', resultexp) - # Fixing in case the exponent is out of bounds - ans = ans._fix(context) - return ans - - # Special case for multiplying by power of 10 - if self._int == '1': - ans = _dec_from_triple(resultsign, other._int, resultexp) - ans = ans._fix(context) - return ans - if other._int == '1': - ans = _dec_from_triple(resultsign, self._int, resultexp) - ans = ans._fix(context) - return ans - - op1 = _WorkRep(self) - op2 = _WorkRep(other) - - ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) - ans = ans._fix(context) - - return ans - __rmul__ = __mul__ - - def __truediv__(self, other, context=None): - """Return self / other.""" - other = _convert_other(other) - if other is NotImplemented: - return NotImplemented - - if context is None: - context = getcontext() - - sign = self._sign ^ other._sign - - if self._is_special or other._is_special: - ans = self._check_nans(other, context) - if ans: - return ans - - if self._isinfinity() and other._isinfinity(): - return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF') - - if self._isinfinity(): - return _SignedInfinity[sign] - - if other._isinfinity(): - context._raise_error(Clamped, 'Division by infinity') - return _dec_from_triple(sign, '0', context.Etiny()) - - # Special cases for zeroes - if not other: - if not self: - return context._raise_error(DivisionUndefined, '0 / 0') - return context._raise_error(DivisionByZero, 'x / 0', sign) - - if not self: - exp = self._exp - other._exp - coeff = 0 - else: - # OK, so neither = 0, INF or NaN - shift = len(other._int) - len(self._int) + context.prec + 1 - exp = self._exp - other._exp - shift - op1 = _WorkRep(self) - op2 = _WorkRep(other) - if shift >= 0: - coeff, remainder = divmod(op1.int * 10**shift, op2.int) - else: - coeff, remainder = divmod(op1.int, op2.int * 10**-shift) - if remainder: - # result is not exact; adjust to ensure correct rounding - if coeff % 5 == 0: - coeff += 1 - else: - # result is exact; get as close to ideal exponent as possible - ideal_exp = self._exp - other._exp - while exp < ideal_exp and coeff % 10 == 0: - coeff //= 10 - exp += 1 - - ans = _dec_from_triple(sign, str(coeff), exp) - return ans._fix(context) - - def _divide(self, other, context): - """Return (self // other, self % other), to context.prec precision. - - Assumes that neither self nor other is a NaN, that self is not - infinite and that other is nonzero. - """ - sign = self._sign ^ other._sign - if other._isinfinity(): - ideal_exp = self._exp - else: - ideal_exp = min(self._exp, other._exp) - - expdiff = self.adjusted() - other.adjusted() - if not self or other._isinfinity() or expdiff <= -2: - return (_dec_from_triple(sign, '0', 0), - self._rescale(ideal_exp, context.rounding)) - if expdiff <= context.prec: - op1 = _WorkRep(self) - op2 = _WorkRep(other) - if op1.exp >= op2.exp: - op1.int *= 10**(op1.exp - op2.exp) - else: - op2.int *= 10**(op2.exp - op1.exp) - q, r = divmod(op1.int, op2.int) - if q < 10**context.prec: - return (_dec_from_triple(sign, str(q), 0), - _dec_from_triple(self._sign, str(r), ideal_exp)) - - # Here the quotient is too large to be representable - ans = context._raise_error(DivisionImpossible, - 'quotient too large in //, % or divmod') - return ans, ans - - def __rtruediv__(self, other, context=None): - """Swaps self/other and returns __truediv__.""" - other = _convert_other(other) - if other is NotImplemented: - return other - return other.__truediv__(self, context=context) - - def __divmod__(self, other, context=None): - """ - Return (self // other, self % other) - """ - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - ans = self._check_nans(other, context) - if ans: - return (ans, ans) - - sign = self._sign ^ other._sign - if self._isinfinity(): - if other._isinfinity(): - ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') - return ans, ans - else: - return (_SignedInfinity[sign], - context._raise_error(InvalidOperation, 'INF % x')) - - if not other: - if not self: - ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') - return ans, ans - else: - return (context._raise_error(DivisionByZero, 'x // 0', sign), - context._raise_error(InvalidOperation, 'x % 0')) - - quotient, remainder = self._divide(other, context) - remainder = remainder._fix(context) - return quotient, remainder - - def __rdivmod__(self, other, context=None): - """Swaps self/other and returns __divmod__.""" - other = _convert_other(other) - if other is NotImplemented: - return other - return other.__divmod__(self, context=context) - - def __mod__(self, other, context=None): - """ - self % other - """ - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - ans = self._check_nans(other, context) - if ans: - return ans - - if self._isinfinity(): - return context._raise_error(InvalidOperation, 'INF % x') - elif not other: - if self: - return context._raise_error(InvalidOperation, 'x % 0') - else: - return context._raise_error(DivisionUndefined, '0 % 0') - - remainder = self._divide(other, context)[1] - remainder = remainder._fix(context) - return remainder - - def __rmod__(self, other, context=None): - """Swaps self/other and returns __mod__.""" - other = _convert_other(other) - if other is NotImplemented: - return other - return other.__mod__(self, context=context) - - def remainder_near(self, other, context=None): - """ - Remainder nearest to 0- abs(remainder-near) <= other/2 - """ - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - ans = self._check_nans(other, context) - if ans: - return ans - - # self == +/-infinity -> InvalidOperation - if self._isinfinity(): - return context._raise_error(InvalidOperation, - 'remainder_near(infinity, x)') - - # other == 0 -> either InvalidOperation or DivisionUndefined - if not other: - if self: - return context._raise_error(InvalidOperation, - 'remainder_near(x, 0)') - else: - return context._raise_error(DivisionUndefined, - 'remainder_near(0, 0)') - - # other = +/-infinity -> remainder = self - if other._isinfinity(): - ans = Decimal(self) - return ans._fix(context) - - # self = 0 -> remainder = self, with ideal exponent - ideal_exponent = min(self._exp, other._exp) - if not self: - ans = _dec_from_triple(self._sign, '0', ideal_exponent) - return ans._fix(context) - - # catch most cases of large or small quotient - expdiff = self.adjusted() - other.adjusted() - if expdiff >= context.prec + 1: - # expdiff >= prec+1 => abs(self/other) > 10**prec - return context._raise_error(DivisionImpossible) - if expdiff <= -2: - # expdiff <= -2 => abs(self/other) < 0.1 - ans = self._rescale(ideal_exponent, context.rounding) - return ans._fix(context) - - # adjust both arguments to have the same exponent, then divide - op1 = _WorkRep(self) - op2 = _WorkRep(other) - if op1.exp >= op2.exp: - op1.int *= 10**(op1.exp - op2.exp) - else: - op2.int *= 10**(op2.exp - op1.exp) - q, r = divmod(op1.int, op2.int) - # remainder is r*10**ideal_exponent; other is +/-op2.int * - # 10**ideal_exponent. Apply correction to ensure that - # abs(remainder) <= abs(other)/2 - if 2*r + (q&1) > op2.int: - r -= op2.int - q += 1 - - if q >= 10**context.prec: - return context._raise_error(DivisionImpossible) - - # result has same sign as self unless r is negative - sign = self._sign - if r < 0: - sign = 1-sign - r = -r - - ans = _dec_from_triple(sign, str(r), ideal_exponent) - return ans._fix(context) - - def __floordiv__(self, other, context=None): - """self // other""" - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - ans = self._check_nans(other, context) - if ans: - return ans - - if self._isinfinity(): - if other._isinfinity(): - return context._raise_error(InvalidOperation, 'INF // INF') - else: - return _SignedInfinity[self._sign ^ other._sign] - - if not other: - if self: - return context._raise_error(DivisionByZero, 'x // 0', - self._sign ^ other._sign) - else: - return context._raise_error(DivisionUndefined, '0 // 0') - - return self._divide(other, context)[0] - - def __rfloordiv__(self, other, context=None): - """Swaps self/other and returns __floordiv__.""" - other = _convert_other(other) - if other is NotImplemented: - return other - return other.__floordiv__(self, context=context) - - def __float__(self): - """Float representation.""" - if self._isnan(): - if self.is_snan(): - raise ValueError("Cannot convert signaling NaN to float") - s = "-nan" if self._sign else "nan" - else: - s = str(self) - return float(s) - - def __int__(self): - """Converts self to an int, truncating if necessary.""" - if self._is_special: - if self._isnan(): - raise ValueError("Cannot convert NaN to integer") - elif self._isinfinity(): - raise OverflowError("Cannot convert infinity to integer") - s = (-1)**self._sign - if self._exp >= 0: - return s*int(self._int)*10**self._exp - else: - return s*int(self._int[:self._exp] or '0') - - __trunc__ = __int__ - - def real(self): - return self - real = property(real) - - def imag(self): - return Decimal(0) - imag = property(imag) - - def conjugate(self): - return self - - def __complex__(self): - return complex(float(self)) - - def _fix_nan(self, context): - """Decapitate the payload of a NaN to fit the context""" - payload = self._int - - # maximum length of payload is precision if clamp=0, - # precision-1 if clamp=1. - max_payload_len = context.prec - context.clamp - if len(payload) > max_payload_len: - payload = payload[len(payload)-max_payload_len:].lstrip('0') - return _dec_from_triple(self._sign, payload, self._exp, True) - return Decimal(self) - - def _fix(self, context): - """Round if it is necessary to keep self within prec precision. - - Rounds and fixes the exponent. Does not raise on a sNaN. - - Arguments: - self - Decimal instance - context - context used. - """ - - if self._is_special: - if self._isnan(): - # decapitate payload if necessary - return self._fix_nan(context) - else: - # self is +/-Infinity; return unaltered - return Decimal(self) - - # if self is zero then exponent should be between Etiny and - # Emax if clamp==0, and between Etiny and Etop if clamp==1. - Etiny = context.Etiny() - Etop = context.Etop() - if not self: - exp_max = [context.Emax, Etop][context.clamp] - new_exp = min(max(self._exp, Etiny), exp_max) - if new_exp != self._exp: - context._raise_error(Clamped) - return _dec_from_triple(self._sign, '0', new_exp) - else: - return Decimal(self) - - # exp_min is the smallest allowable exponent of the result, - # equal to max(self.adjusted()-context.prec+1, Etiny) - exp_min = len(self._int) + self._exp - context.prec - if exp_min > Etop: - # overflow: exp_min > Etop iff self.adjusted() > Emax - ans = context._raise_error(Overflow, 'above Emax', self._sign) - context._raise_error(Inexact) - context._raise_error(Rounded) - return ans - - self_is_subnormal = exp_min < Etiny - if self_is_subnormal: - exp_min = Etiny - - # round if self has too many digits - if self._exp < exp_min: - digits = len(self._int) + self._exp - exp_min - if digits < 0: - self = _dec_from_triple(self._sign, '1', exp_min-1) - digits = 0 - rounding_method = self._pick_rounding_function[context.rounding] - changed = rounding_method(self, digits) - coeff = self._int[:digits] or '0' - if changed > 0: - coeff = str(int(coeff)+1) - if len(coeff) > context.prec: - coeff = coeff[:-1] - exp_min += 1 - - # check whether the rounding pushed the exponent out of range - if exp_min > Etop: - ans = context._raise_error(Overflow, 'above Emax', self._sign) - else: - ans = _dec_from_triple(self._sign, coeff, exp_min) - - # raise the appropriate signals, taking care to respect - # the precedence described in the specification - if changed and self_is_subnormal: - context._raise_error(Underflow) - if self_is_subnormal: - context._raise_error(Subnormal) - if changed: - context._raise_error(Inexact) - context._raise_error(Rounded) - if not ans: - # raise Clamped on underflow to 0 - context._raise_error(Clamped) - return ans - - if self_is_subnormal: - context._raise_error(Subnormal) - - # fold down if clamp == 1 and self has too few digits - if context.clamp == 1 and self._exp > Etop: - context._raise_error(Clamped) - self_padded = self._int + '0'*(self._exp - Etop) - return _dec_from_triple(self._sign, self_padded, Etop) - - # here self was representable to begin with; return unchanged - return Decimal(self) - - # for each of the rounding functions below: - # self is a finite, nonzero Decimal - # prec is an integer satisfying 0 <= prec < len(self._int) - # - # each function returns either -1, 0, or 1, as follows: - # 1 indicates that self should be rounded up (away from zero) - # 0 indicates that self should be truncated, and that all the - # digits to be truncated are zeros (so the value is unchanged) - # -1 indicates that there are nonzero digits to be truncated - - def _round_down(self, prec): - """Also known as round-towards-0, truncate.""" - if _all_zeros(self._int, prec): - return 0 - else: - return -1 - - def _round_up(self, prec): - """Rounds away from 0.""" - return -self._round_down(prec) - - def _round_half_up(self, prec): - """Rounds 5 up (away from 0)""" - if self._int[prec] in '56789': - return 1 - elif _all_zeros(self._int, prec): - return 0 - else: - return -1 - - def _round_half_down(self, prec): - """Round 5 down""" - if _exact_half(self._int, prec): - return -1 - else: - return self._round_half_up(prec) - - def _round_half_even(self, prec): - """Round 5 to even, rest to nearest.""" - if _exact_half(self._int, prec) and \ - (prec == 0 or self._int[prec-1] in '02468'): - return -1 - else: - return self._round_half_up(prec) - - def _round_ceiling(self, prec): - """Rounds up (not away from 0 if negative.)""" - if self._sign: - return self._round_down(prec) - else: - return -self._round_down(prec) - - def _round_floor(self, prec): - """Rounds down (not towards 0 if negative)""" - if not self._sign: - return self._round_down(prec) - else: - return -self._round_down(prec) - - def _round_05up(self, prec): - """Round down unless digit prec-1 is 0 or 5.""" - if prec and self._int[prec-1] not in '05': - return self._round_down(prec) - else: - return -self._round_down(prec) - - _pick_rounding_function = dict( - ROUND_DOWN = _round_down, - ROUND_UP = _round_up, - ROUND_HALF_UP = _round_half_up, - ROUND_HALF_DOWN = _round_half_down, - ROUND_HALF_EVEN = _round_half_even, - ROUND_CEILING = _round_ceiling, - ROUND_FLOOR = _round_floor, - ROUND_05UP = _round_05up, - ) - - def __round__(self, n=None): - """Round self to the nearest integer, or to a given precision. - - If only one argument is supplied, round a finite Decimal - instance self to the nearest integer. If self is infinite or - a NaN then a Python exception is raised. If self is finite - and lies exactly halfway between two integers then it is - rounded to the integer with even last digit. - - >>> round(Decimal('123.456')) - 123 - >>> round(Decimal('-456.789')) - -457 - >>> round(Decimal('-3.0')) - -3 - >>> round(Decimal('2.5')) - 2 - >>> round(Decimal('3.5')) - 4 - >>> round(Decimal('Inf')) - Traceback (most recent call last): - ... - OverflowError: cannot round an infinity - >>> round(Decimal('NaN')) - Traceback (most recent call last): - ... - ValueError: cannot round a NaN - - If a second argument n is supplied, self is rounded to n - decimal places using the rounding mode for the current - context. - - For an integer n, round(self, -n) is exactly equivalent to - self.quantize(Decimal('1En')). - - >>> round(Decimal('123.456'), 0) - Decimal('123') - >>> round(Decimal('123.456'), 2) - Decimal('123.46') - >>> round(Decimal('123.456'), -2) - Decimal('1E+2') - >>> round(Decimal('-Infinity'), 37) - Decimal('NaN') - >>> round(Decimal('sNaN123'), 0) - Decimal('NaN123') - - """ - if n is not None: - # two-argument form: use the equivalent quantize call - if not isinstance(n, int): - raise TypeError('Second argument to round should be integral') - exp = _dec_from_triple(0, '1', -n) - return self.quantize(exp) - - # one-argument form - if self._is_special: - if self.is_nan(): - raise ValueError("cannot round a NaN") - else: - raise OverflowError("cannot round an infinity") - return int(self._rescale(0, ROUND_HALF_EVEN)) - - def __floor__(self): - """Return the floor of self, as an integer. - - For a finite Decimal instance self, return the greatest - integer n such that n <= self. If self is infinite or a NaN - then a Python exception is raised. - - """ - if self._is_special: - if self.is_nan(): - raise ValueError("cannot round a NaN") - else: - raise OverflowError("cannot round an infinity") - return int(self._rescale(0, ROUND_FLOOR)) - - def __ceil__(self): - """Return the ceiling of self, as an integer. - - For a finite Decimal instance self, return the least integer n - such that n >= self. If self is infinite or a NaN then a - Python exception is raised. - - """ - if self._is_special: - if self.is_nan(): - raise ValueError("cannot round a NaN") - else: - raise OverflowError("cannot round an infinity") - return int(self._rescale(0, ROUND_CEILING)) - - def fma(self, other, third, context=None): - """Fused multiply-add. - - Returns self*other+third with no rounding of the intermediate - product self*other. - - self and other are multiplied together, with no rounding of - the result. The third operand is then added to the result, - and a single final rounding is performed. - """ - - other = _convert_other(other, raiseit=True) - third = _convert_other(third, raiseit=True) - - # compute product; raise InvalidOperation if either operand is - # a signaling NaN or if the product is zero times infinity. - if self._is_special or other._is_special: - if context is None: - context = getcontext() - if self._exp == 'N': - return context._raise_error(InvalidOperation, 'sNaN', self) - if other._exp == 'N': - return context._raise_error(InvalidOperation, 'sNaN', other) - if self._exp == 'n': - product = self - elif other._exp == 'n': - product = other - elif self._exp == 'F': - if not other: - return context._raise_error(InvalidOperation, - 'INF * 0 in fma') - product = _SignedInfinity[self._sign ^ other._sign] - elif other._exp == 'F': - if not self: - return context._raise_error(InvalidOperation, - '0 * INF in fma') - product = _SignedInfinity[self._sign ^ other._sign] - else: - product = _dec_from_triple(self._sign ^ other._sign, - str(int(self._int) * int(other._int)), - self._exp + other._exp) - - return product.__add__(third, context) - - def _power_modulo(self, other, modulo, context=None): - """Three argument version of __pow__""" - - other = _convert_other(other) - if other is NotImplemented: - return other - modulo = _convert_other(modulo) - if modulo is NotImplemented: - return modulo - - if context is None: - context = getcontext() - - # deal with NaNs: if there are any sNaNs then first one wins, - # (i.e. behaviour for NaNs is identical to that of fma) - self_is_nan = self._isnan() - other_is_nan = other._isnan() - modulo_is_nan = modulo._isnan() - if self_is_nan or other_is_nan or modulo_is_nan: - if self_is_nan == 2: - return context._raise_error(InvalidOperation, 'sNaN', - self) - if other_is_nan == 2: - return context._raise_error(InvalidOperation, 'sNaN', - other) - if modulo_is_nan == 2: - return context._raise_error(InvalidOperation, 'sNaN', - modulo) - if self_is_nan: - return self._fix_nan(context) - if other_is_nan: - return other._fix_nan(context) - return modulo._fix_nan(context) - - # check inputs: we apply same restrictions as Python's pow() - if not (self._isinteger() and - other._isinteger() and - modulo._isinteger()): - return context._raise_error(InvalidOperation, - 'pow() 3rd argument not allowed ' - 'unless all arguments are integers') - if other < 0: - return context._raise_error(InvalidOperation, - 'pow() 2nd argument cannot be ' - 'negative when 3rd argument specified') - if not modulo: - return context._raise_error(InvalidOperation, - 'pow() 3rd argument cannot be 0') - - # additional restriction for decimal: the modulus must be less - # than 10**prec in absolute value - if modulo.adjusted() >= context.prec: - return context._raise_error(InvalidOperation, - 'insufficient precision: pow() 3rd ' - 'argument must not have more than ' - 'precision digits') - - # define 0**0 == NaN, for consistency with two-argument pow - # (even though it hurts!) - if not other and not self: - return context._raise_error(InvalidOperation, - 'at least one of pow() 1st argument ' - 'and 2nd argument must be nonzero ;' - '0**0 is not defined') - - # compute sign of result - if other._iseven(): - sign = 0 - else: - sign = self._sign - - # convert modulo to a Python integer, and self and other to - # Decimal integers (i.e. force their exponents to be >= 0) - modulo = abs(int(modulo)) - base = _WorkRep(self.to_integral_value()) - exponent = _WorkRep(other.to_integral_value()) - - # compute result using integer pow() - base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo - for i in range(exponent.exp): - base = pow(base, 10, modulo) - base = pow(base, exponent.int, modulo) - - return _dec_from_triple(sign, str(base), 0) - - def _power_exact(self, other, p): - """Attempt to compute self**other exactly. - - Given Decimals self and other and an integer p, attempt to - compute an exact result for the power self**other, with p - digits of precision. Return None if self**other is not - exactly representable in p digits. - - Assumes that elimination of special cases has already been - performed: self and other must both be nonspecial; self must - be positive and not numerically equal to 1; other must be - nonzero. For efficiency, other._exp should not be too large, - so that 10**abs(other._exp) is a feasible calculation.""" - - # In the comments below, we write x for the value of self and y for the - # value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc - # and yc positive integers not divisible by 10. - - # The main purpose of this method is to identify the *failure* - # of x**y to be exactly representable with as little effort as - # possible. So we look for cheap and easy tests that - # eliminate the possibility of x**y being exact. Only if all - # these tests are passed do we go on to actually compute x**y. - - # Here's the main idea. Express y as a rational number m/n, with m and - # n relatively prime and n>0. Then for x**y to be exactly - # representable (at *any* precision), xc must be the nth power of a - # positive integer and xe must be divisible by n. If y is negative - # then additionally xc must be a power of either 2 or 5, hence a power - # of 2**n or 5**n. - # - # There's a limit to how small |y| can be: if y=m/n as above - # then: - # - # (1) if xc != 1 then for the result to be representable we - # need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So - # if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <= - # 2**(1/|y|), hence xc**|y| < 2 and the result is not - # representable. - # - # (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if - # |y| < 1/|xe| then the result is not representable. - # - # Note that since x is not equal to 1, at least one of (1) and - # (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) < - # 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye. - # - # There's also a limit to how large y can be, at least if it's - # positive: the normalized result will have coefficient xc**y, - # so if it's representable then xc**y < 10**p, and y < - # p/log10(xc). Hence if y*log10(xc) >= p then the result is - # not exactly representable. - - # if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye, - # so |y| < 1/xe and the result is not representable. - # Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y| - # < 1/nbits(xc). - - x = _WorkRep(self) - xc, xe = x.int, x.exp - while xc % 10 == 0: - xc //= 10 - xe += 1 - - y = _WorkRep(other) - yc, ye = y.int, y.exp - while yc % 10 == 0: - yc //= 10 - ye += 1 - - # case where xc == 1: result is 10**(xe*y), with xe*y - # required to be an integer - if xc == 1: - xe *= yc - # result is now 10**(xe * 10**ye); xe * 10**ye must be integral - while xe % 10 == 0: - xe //= 10 - ye += 1 - if ye < 0: - return None - exponent = xe * 10**ye - if y.sign == 1: - exponent = -exponent - # if other is a nonnegative integer, use ideal exponent - if other._isinteger() and other._sign == 0: - ideal_exponent = self._exp*int(other) - zeros = min(exponent-ideal_exponent, p-1) - else: - zeros = 0 - return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros) - - # case where y is negative: xc must be either a power - # of 2 or a power of 5. - if y.sign == 1: - last_digit = xc % 10 - if last_digit in (2,4,6,8): - # quick test for power of 2 - if xc & -xc != xc: - return None - # now xc is a power of 2; e is its exponent - e = _nbits(xc)-1 - - # We now have: - # - # x = 2**e * 10**xe, e > 0, and y < 0. - # - # The exact result is: - # - # x**y = 5**(-e*y) * 10**(e*y + xe*y) - # - # provided that both e*y and xe*y are integers. Note that if - # 5**(-e*y) >= 10**p, then the result can't be expressed - # exactly with p digits of precision. - # - # Using the above, we can guard against large values of ye. - # 93/65 is an upper bound for log(10)/log(5), so if - # - # ye >= len(str(93*p//65)) - # - # then - # - # -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5), - # - # so 5**(-e*y) >= 10**p, and the coefficient of the result - # can't be expressed in p digits. - - # emax >= largest e such that 5**e < 10**p. - emax = p*93//65 - if ye >= len(str(emax)): - return None - - # Find -e*y and -xe*y; both must be integers - e = _decimal_lshift_exact(e * yc, ye) - xe = _decimal_lshift_exact(xe * yc, ye) - if e is None or xe is None: - return None - - if e > emax: - return None - xc = 5**e - - elif last_digit == 5: - # e >= log_5(xc) if xc is a power of 5; we have - # equality all the way up to xc=5**2658 - e = _nbits(xc)*28//65 - xc, remainder = divmod(5**e, xc) - if remainder: - return None - while xc % 5 == 0: - xc //= 5 - e -= 1 - - # Guard against large values of ye, using the same logic as in - # the 'xc is a power of 2' branch. 10/3 is an upper bound for - # log(10)/log(2). - emax = p*10//3 - if ye >= len(str(emax)): - return None - - e = _decimal_lshift_exact(e * yc, ye) - xe = _decimal_lshift_exact(xe * yc, ye) - if e is None or xe is None: - return None - - if e > emax: - return None - xc = 2**e - else: - return None - - if xc >= 10**p: - return None - xe = -e-xe - return _dec_from_triple(0, str(xc), xe) - - # now y is positive; find m and n such that y = m/n - if ye >= 0: - m, n = yc*10**ye, 1 - else: - if xe != 0 and len(str(abs(yc*xe))) <= -ye: - return None - xc_bits = _nbits(xc) - if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye: - return None - m, n = yc, 10**(-ye) - while m % 2 == n % 2 == 0: - m //= 2 - n //= 2 - while m % 5 == n % 5 == 0: - m //= 5 - n //= 5 - - # compute nth root of xc*10**xe - if n > 1: - # if 1 < xc < 2**n then xc isn't an nth power - if xc != 1 and xc_bits <= n: - return None - - xe, rem = divmod(xe, n) - if rem != 0: - return None - - # compute nth root of xc using Newton's method - a = 1 << -(-_nbits(xc)//n) # initial estimate - while True: - q, r = divmod(xc, a**(n-1)) - if a <= q: - break - else: - a = (a*(n-1) + q)//n - if not (a == q and r == 0): - return None - xc = a - - # now xc*10**xe is the nth root of the original xc*10**xe - # compute mth power of xc*10**xe - - # if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m > - # 10**p and the result is not representable. - if xc > 1 and m > p*100//_log10_lb(xc): - return None - xc = xc**m - xe *= m - if xc > 10**p: - return None - - # by this point the result *is* exactly representable - # adjust the exponent to get as close as possible to the ideal - # exponent, if necessary - str_xc = str(xc) - if other._isinteger() and other._sign == 0: - ideal_exponent = self._exp*int(other) - zeros = min(xe-ideal_exponent, p-len(str_xc)) - else: - zeros = 0 - return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros) - - def __pow__(self, other, modulo=None, context=None): - """Return self ** other [ % modulo]. - - With two arguments, compute self**other. - - With three arguments, compute (self**other) % modulo. For the - three argument form, the following restrictions on the - arguments hold: - - - all three arguments must be integral - - other must be nonnegative - - either self or other (or both) must be nonzero - - modulo must be nonzero and must have at most p digits, - where p is the context precision. - - If any of these restrictions is violated the InvalidOperation - flag is raised. - - The result of pow(self, other, modulo) is identical to the - result that would be obtained by computing (self**other) % - modulo with unbounded precision, but is computed more - efficiently. It is always exact. - """ - - if modulo is not None: - return self._power_modulo(other, modulo, context) - - other = _convert_other(other) - if other is NotImplemented: - return other - - if context is None: - context = getcontext() - - # either argument is a NaN => result is NaN - ans = self._check_nans(other, context) - if ans: - return ans - - # 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity) - if not other: - if not self: - return context._raise_error(InvalidOperation, '0 ** 0') - else: - return _One - - # result has sign 1 iff self._sign is 1 and other is an odd integer - result_sign = 0 - if self._sign == 1: - if other._isinteger(): - if not other._iseven(): - result_sign = 1 - else: - # -ve**noninteger = NaN - # (-0)**noninteger = 0**noninteger - if self: - return context._raise_error(InvalidOperation, - 'x ** y with x negative and y not an integer') - # negate self, without doing any unwanted rounding - self = self.copy_negate() - - # 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity - if not self: - if other._sign == 0: - return _dec_from_triple(result_sign, '0', 0) - else: - return _SignedInfinity[result_sign] - - # Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0 - if self._isinfinity(): - if other._sign == 0: - return _SignedInfinity[result_sign] - else: - return _dec_from_triple(result_sign, '0', 0) - - # 1**other = 1, but the choice of exponent and the flags - # depend on the exponent of self, and on whether other is a - # positive integer, a negative integer, or neither - if self == _One: - if other._isinteger(): - # exp = max(self._exp*max(int(other), 0), - # 1-context.prec) but evaluating int(other) directly - # is dangerous until we know other is small (other - # could be 1e999999999) - if other._sign == 1: - multiplier = 0 - elif other > context.prec: - multiplier = context.prec - else: - multiplier = int(other) - - exp = self._exp * multiplier - if exp < 1-context.prec: - exp = 1-context.prec - context._raise_error(Rounded) - else: - context._raise_error(Inexact) - context._raise_error(Rounded) - exp = 1-context.prec - - return _dec_from_triple(result_sign, '1'+'0'*-exp, exp) - - # compute adjusted exponent of self - self_adj = self.adjusted() - - # self ** infinity is infinity if self > 1, 0 if self < 1 - # self ** -infinity is infinity if self < 1, 0 if self > 1 - if other._isinfinity(): - if (other._sign == 0) == (self_adj < 0): - return _dec_from_triple(result_sign, '0', 0) - else: - return _SignedInfinity[result_sign] - - # from here on, the result always goes through the call - # to _fix at the end of this function. - ans = None - exact = False - - # crude test to catch cases of extreme overflow/underflow. If - # log10(self)*other >= 10**bound and bound >= len(str(Emax)) - # then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence - # self**other >= 10**(Emax+1), so overflow occurs. The test - # for underflow is similar. - bound = self._log10_exp_bound() + other.adjusted() - if (self_adj >= 0) == (other._sign == 0): - # self > 1 and other +ve, or self < 1 and other -ve - # possibility of overflow - if bound >= len(str(context.Emax)): - ans = _dec_from_triple(result_sign, '1', context.Emax+1) - else: - # self > 1 and other -ve, or self < 1 and other +ve - # possibility of underflow to 0 - Etiny = context.Etiny() - if bound >= len(str(-Etiny)): - ans = _dec_from_triple(result_sign, '1', Etiny-1) - - # try for an exact result with precision +1 - if ans is None: - ans = self._power_exact(other, context.prec + 1) - if ans is not None: - if result_sign == 1: - ans = _dec_from_triple(1, ans._int, ans._exp) - exact = True - - # usual case: inexact result, x**y computed directly as exp(y*log(x)) - if ans is None: - p = context.prec - x = _WorkRep(self) - xc, xe = x.int, x.exp - y = _WorkRep(other) - yc, ye = y.int, y.exp - if y.sign == 1: - yc = -yc - - # compute correctly rounded result: start with precision +3, - # then increase precision until result is unambiguously roundable - extra = 3 - while True: - coeff, exp = _dpower(xc, xe, yc, ye, p+extra) - if coeff % (5*10**(len(str(coeff))-p-1)): - break - extra += 3 - - ans = _dec_from_triple(result_sign, str(coeff), exp) - - # unlike exp, ln and log10, the power function respects the - # rounding mode; no need to switch to ROUND_HALF_EVEN here - - # There's a difficulty here when 'other' is not an integer and - # the result is exact. In this case, the specification - # requires that the Inexact flag be raised (in spite of - # exactness), but since the result is exact _fix won't do this - # for us. (Correspondingly, the Underflow signal should also - # be raised for subnormal results.) We can't directly raise - # these signals either before or after calling _fix, since - # that would violate the precedence for signals. So we wrap - # the ._fix call in a temporary context, and reraise - # afterwards. - if exact and not other._isinteger(): - # pad with zeros up to length context.prec+1 if necessary; this - # ensures that the Rounded signal will be raised. - if len(ans._int) <= context.prec: - expdiff = context.prec + 1 - len(ans._int) - ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff, - ans._exp-expdiff) - - # create a copy of the current context, with cleared flags/traps - newcontext = context.copy() - newcontext.clear_flags() - for exception in _signals: - newcontext.traps[exception] = 0 - - # round in the new context - ans = ans._fix(newcontext) - - # raise Inexact, and if necessary, Underflow - newcontext._raise_error(Inexact) - if newcontext.flags[Subnormal]: - newcontext._raise_error(Underflow) - - # propagate signals to the original context; _fix could - # have raised any of Overflow, Underflow, Subnormal, - # Inexact, Rounded, Clamped. Overflow needs the correct - # arguments. Note that the order of the exceptions is - # important here. - if newcontext.flags[Overflow]: - context._raise_error(Overflow, 'above Emax', ans._sign) - for exception in Underflow, Subnormal, Inexact, Rounded, Clamped: - if newcontext.flags[exception]: - context._raise_error(exception) - - else: - ans = ans._fix(context) - - return ans - - def __rpow__(self, other, context=None): - """Swaps self/other and returns __pow__.""" - other = _convert_other(other) - if other is NotImplemented: - return other - return other.__pow__(self, context=context) - - def normalize(self, context=None): - """Normalize- strip trailing 0s, change anything equal to 0 to 0e0""" - - if context is None: - context = getcontext() - - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - - dup = self._fix(context) - if dup._isinfinity(): - return dup - - if not dup: - return _dec_from_triple(dup._sign, '0', 0) - exp_max = [context.Emax, context.Etop()][context.clamp] - end = len(dup._int) - exp = dup._exp - while dup._int[end-1] == '0' and exp < exp_max: - exp += 1 - end -= 1 - return _dec_from_triple(dup._sign, dup._int[:end], exp) - - def quantize(self, exp, rounding=None, context=None): - """Quantize self so its exponent is the same as that of exp. - - Similar to self._rescale(exp._exp) but with error checking. - """ - exp = _convert_other(exp, raiseit=True) - - if context is None: - context = getcontext() - if rounding is None: - rounding = context.rounding - - if self._is_special or exp._is_special: - ans = self._check_nans(exp, context) - if ans: - return ans - - if exp._isinfinity() or self._isinfinity(): - if exp._isinfinity() and self._isinfinity(): - return Decimal(self) # if both are inf, it is OK - return context._raise_error(InvalidOperation, - 'quantize with one INF') - - # exp._exp should be between Etiny and Emax - if not (context.Etiny() <= exp._exp <= context.Emax): - return context._raise_error(InvalidOperation, - 'target exponent out of bounds in quantize') - - if not self: - ans = _dec_from_triple(self._sign, '0', exp._exp) - return ans._fix(context) - - self_adjusted = self.adjusted() - if self_adjusted > context.Emax: - return context._raise_error(InvalidOperation, - 'exponent of quantize result too large for current context') - if self_adjusted - exp._exp + 1 > context.prec: - return context._raise_error(InvalidOperation, - 'quantize result has too many digits for current context') - - ans = self._rescale(exp._exp, rounding) - if ans.adjusted() > context.Emax: - return context._raise_error(InvalidOperation, - 'exponent of quantize result too large for current context') - if len(ans._int) > context.prec: - return context._raise_error(InvalidOperation, - 'quantize result has too many digits for current context') - - # raise appropriate flags - if ans and ans.adjusted() < context.Emin: - context._raise_error(Subnormal) - if ans._exp > self._exp: - if ans != self: - context._raise_error(Inexact) - context._raise_error(Rounded) - - # call to fix takes care of any necessary folddown, and - # signals Clamped if necessary - ans = ans._fix(context) - return ans - - def same_quantum(self, other, context=None): - """Return True if self and other have the same exponent; otherwise - return False. - - If either operand is a special value, the following rules are used: - * return True if both operands are infinities - * return True if both operands are NaNs - * otherwise, return False. - """ - other = _convert_other(other, raiseit=True) - if self._is_special or other._is_special: - return (self.is_nan() and other.is_nan() or - self.is_infinite() and other.is_infinite()) - return self._exp == other._exp - - def _rescale(self, exp, rounding): - """Rescale self so that the exponent is exp, either by padding with zeros - or by truncating digits, using the given rounding mode. - - Specials are returned without change. This operation is - quiet: it raises no flags, and uses no information from the - context. - - exp = exp to scale to (an integer) - rounding = rounding mode - """ - if self._is_special: - return Decimal(self) - if not self: - return _dec_from_triple(self._sign, '0', exp) - - if self._exp >= exp: - # pad answer with zeros if necessary - return _dec_from_triple(self._sign, - self._int + '0'*(self._exp - exp), exp) - - # too many digits; round and lose data. If self.adjusted() < - # exp-1, replace self by 10**(exp-1) before rounding - digits = len(self._int) + self._exp - exp - if digits < 0: - self = _dec_from_triple(self._sign, '1', exp-1) - digits = 0 - this_function = self._pick_rounding_function[rounding] - changed = this_function(self, digits) - coeff = self._int[:digits] or '0' - if changed == 1: - coeff = str(int(coeff)+1) - return _dec_from_triple(self._sign, coeff, exp) - - def _round(self, places, rounding): - """Round a nonzero, nonspecial Decimal to a fixed number of - significant figures, using the given rounding mode. - - Infinities, NaNs and zeros are returned unaltered. - - This operation is quiet: it raises no flags, and uses no - information from the context. - - """ - if places <= 0: - raise ValueError("argument should be at least 1 in _round") - if self._is_special or not self: - return Decimal(self) - ans = self._rescale(self.adjusted()+1-places, rounding) - # it can happen that the rescale alters the adjusted exponent; - # for example when rounding 99.97 to 3 significant figures. - # When this happens we end up with an extra 0 at the end of - # the number; a second rescale fixes this. - if ans.adjusted() != self.adjusted(): - ans = ans._rescale(ans.adjusted()+1-places, rounding) - return ans - - def to_integral_exact(self, rounding=None, context=None): - """Rounds to a nearby integer. - - If no rounding mode is specified, take the rounding mode from - the context. This method raises the Rounded and Inexact flags - when appropriate. - - See also: to_integral_value, which does exactly the same as - this method except that it doesn't raise Inexact or Rounded. - """ - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - return Decimal(self) - if self._exp >= 0: - return Decimal(self) - if not self: - return _dec_from_triple(self._sign, '0', 0) - if context is None: - context = getcontext() - if rounding is None: - rounding = context.rounding - ans = self._rescale(0, rounding) - if ans != self: - context._raise_error(Inexact) - context._raise_error(Rounded) - return ans - - def to_integral_value(self, rounding=None, context=None): - """Rounds to the nearest integer, without raising inexact, rounded.""" - if context is None: - context = getcontext() - if rounding is None: - rounding = context.rounding - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - return Decimal(self) - if self._exp >= 0: - return Decimal(self) - else: - return self._rescale(0, rounding) - - # the method name changed, but we provide also the old one, for compatibility - to_integral = to_integral_value - - def sqrt(self, context=None): - """Return the square root of self.""" - if context is None: - context = getcontext() - - if self._is_special: - ans = self._check_nans(context=context) - if ans: - return ans - - if self._isinfinity() and self._sign == 0: - return Decimal(self) - - if not self: - # exponent = self._exp // 2. sqrt(-0) = -0 - ans = _dec_from_triple(self._sign, '0', self._exp // 2) - return ans._fix(context) - - if self._sign == 1: - return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0') - - # At this point self represents a positive number. Let p be - # the desired precision and express self in the form c*100**e - # with c a positive real number and e an integer, c and e - # being chosen so that 100**(p-1) <= c < 100**p. Then the - # (exact) square root of self is sqrt(c)*10**e, and 10**(p-1) - # <= sqrt(c) < 10**p, so the closest representable Decimal at - # precision p is n*10**e where n = round_half_even(sqrt(c)), - # the closest integer to sqrt(c) with the even integer chosen - # in the case of a tie. - # - # To ensure correct rounding in all cases, we use the - # following trick: we compute the square root to an extra - # place (precision p+1 instead of precision p), rounding down. - # Then, if the result is inexact and its last digit is 0 or 5, - # we increase the last digit to 1 or 6 respectively; if it's - # exact we leave the last digit alone. Now the final round to - # p places (or fewer in the case of underflow) will round - # correctly and raise the appropriate flags. - - # use an extra digit of precision - prec = context.prec+1 - - # write argument in the form c*100**e where e = self._exp//2 - # is the 'ideal' exponent, to be used if the square root is - # exactly representable. l is the number of 'digits' of c in - # base 100, so that 100**(l-1) <= c < 100**l. - op = _WorkRep(self) - e = op.exp >> 1 - if op.exp & 1: - c = op.int * 10 - l = (len(self._int) >> 1) + 1 - else: - c = op.int - l = len(self._int)+1 >> 1 - - # rescale so that c has exactly prec base 100 'digits' - shift = prec-l - if shift >= 0: - c *= 100**shift - exact = True - else: - c, remainder = divmod(c, 100**-shift) - exact = not remainder - e -= shift - - # find n = floor(sqrt(c)) using Newton's method - n = 10**prec - while True: - q = c//n - if n <= q: - break - else: - n = n + q >> 1 - exact = exact and n*n == c - - if exact: - # result is exact; rescale to use ideal exponent e - if shift >= 0: - # assert n % 10**shift == 0 - n //= 10**shift - else: - n *= 10**-shift - e += shift - else: - # result is not exact; fix last digit as described above - if n % 5 == 0: - n += 1 - - ans = _dec_from_triple(0, str(n), e) - - # round, and fit to current context - context = context._shallow_copy() - rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) - context.rounding = rounding - - return ans - - def max(self, other, context=None): - """Returns the larger value. - - Like max(self, other) except if one is not a number, returns - NaN (and signals if one is sNaN). Also rounds. - """ - other = _convert_other(other, raiseit=True) - - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - # If one operand is a quiet NaN and the other is number, then the - # number is always returned - sn = self._isnan() - on = other._isnan() - if sn or on: - if on == 1 and sn == 0: - return self._fix(context) - if sn == 1 and on == 0: - return other._fix(context) - return self._check_nans(other, context) - - c = self._cmp(other) - if c == 0: - # If both operands are finite and equal in numerical value - # then an ordering is applied: - # - # If the signs differ then max returns the operand with the - # positive sign and min returns the operand with the negative sign - # - # If the signs are the same then the exponent is used to select - # the result. This is exactly the ordering used in compare_total. - c = self.compare_total(other) - - if c == -1: - ans = other - else: - ans = self - - return ans._fix(context) - - def min(self, other, context=None): - """Returns the smaller value. - - Like min(self, other) except if one is not a number, returns - NaN (and signals if one is sNaN). Also rounds. - """ - other = _convert_other(other, raiseit=True) - - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - # If one operand is a quiet NaN and the other is number, then the - # number is always returned - sn = self._isnan() - on = other._isnan() - if sn or on: - if on == 1 and sn == 0: - return self._fix(context) - if sn == 1 and on == 0: - return other._fix(context) - return self._check_nans(other, context) - - c = self._cmp(other) - if c == 0: - c = self.compare_total(other) - - if c == -1: - ans = self - else: - ans = other - - return ans._fix(context) - - def _isinteger(self): - """Returns whether self is an integer""" - if self._is_special: - return False - if self._exp >= 0: - return True - rest = self._int[self._exp:] - return rest == '0'*len(rest) - - def _iseven(self): - """Returns True if self is even. Assumes self is an integer.""" - if not self or self._exp > 0: - return True - return self._int[-1+self._exp] in '02468' - - def adjusted(self): - """Return the adjusted exponent of self""" - try: - return self._exp + len(self._int) - 1 - # If NaN or Infinity, self._exp is string - except TypeError: - return 0 - - def canonical(self): - """Returns the same Decimal object. - - As we do not have different encodings for the same number, the - received object already is in its canonical form. - """ - return self - - def compare_signal(self, other, context=None): - """Compares self to the other operand numerically. - - It's pretty much like compare(), but all NaNs signal, with signaling - NaNs taking precedence over quiet NaNs. - """ - other = _convert_other(other, raiseit = True) - ans = self._compare_check_nans(other, context) - if ans: - return ans - return self.compare(other, context=context) - - def compare_total(self, other, context=None): - """Compares self to other using the abstract representations. - - This is not like the standard compare, which use their numerical - value. Note that a total ordering is defined for all possible abstract - representations. - """ - other = _convert_other(other, raiseit=True) - - # if one is negative and the other is positive, it's easy - if self._sign and not other._sign: - return _NegativeOne - if not self._sign and other._sign: - return _One - sign = self._sign - - # let's handle both NaN types - self_nan = self._isnan() - other_nan = other._isnan() - if self_nan or other_nan: - if self_nan == other_nan: - # compare payloads as though they're integers - self_key = len(self._int), self._int - other_key = len(other._int), other._int - if self_key < other_key: - if sign: - return _One - else: - return _NegativeOne - if self_key > other_key: - if sign: - return _NegativeOne - else: - return _One - return _Zero - - if sign: - if self_nan == 1: - return _NegativeOne - if other_nan == 1: - return _One - if self_nan == 2: - return _NegativeOne - if other_nan == 2: - return _One - else: - if self_nan == 1: - return _One - if other_nan == 1: - return _NegativeOne - if self_nan == 2: - return _One - if other_nan == 2: - return _NegativeOne - - if self < other: - return _NegativeOne - if self > other: - return _One - - if self._exp < other._exp: - if sign: - return _One - else: - return _NegativeOne - if self._exp > other._exp: - if sign: - return _NegativeOne - else: - return _One - return _Zero - - - def compare_total_mag(self, other, context=None): - """Compares self to other using abstract repr., ignoring sign. - - Like compare_total, but with operand's sign ignored and assumed to be 0. - """ - other = _convert_other(other, raiseit=True) - - s = self.copy_abs() - o = other.copy_abs() - return s.compare_total(o) - - def copy_abs(self): - """Returns a copy with the sign set to 0. """ - return _dec_from_triple(0, self._int, self._exp, self._is_special) - - def copy_negate(self): - """Returns a copy with the sign inverted.""" - if self._sign: - return _dec_from_triple(0, self._int, self._exp, self._is_special) - else: - return _dec_from_triple(1, self._int, self._exp, self._is_special) - - def copy_sign(self, other, context=None): - """Returns self with the sign of other.""" - other = _convert_other(other, raiseit=True) - return _dec_from_triple(other._sign, self._int, - self._exp, self._is_special) - - def exp(self, context=None): - """Returns e ** self.""" - - if context is None: - context = getcontext() - - # exp(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans - - # exp(-Infinity) = 0 - if self._isinfinity() == -1: - return _Zero - - # exp(0) = 1 - if not self: - return _One - - # exp(Infinity) = Infinity - if self._isinfinity() == 1: - return Decimal(self) - - # the result is now guaranteed to be inexact (the true - # mathematical result is transcendental). There's no need to - # raise Rounded and Inexact here---they'll always be raised as - # a result of the call to _fix. - p = context.prec - adj = self.adjusted() - - # we only need to do any computation for quite a small range - # of adjusted exponents---for example, -29 <= adj <= 10 for - # the default context. For smaller exponent the result is - # indistinguishable from 1 at the given precision, while for - # larger exponent the result either overflows or underflows. - if self._sign == 0 and adj > len(str((context.Emax+1)*3)): - # overflow - ans = _dec_from_triple(0, '1', context.Emax+1) - elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)): - # underflow to 0 - ans = _dec_from_triple(0, '1', context.Etiny()-1) - elif self._sign == 0 and adj < -p: - # p+1 digits; final round will raise correct flags - ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) - elif self._sign == 1 and adj < -p-1: - # p+1 digits; final round will raise correct flags - ans = _dec_from_triple(0, '9'*(p+1), -p-1) - # general case - else: - op = _WorkRep(self) - c, e = op.int, op.exp - if op.sign == 1: - c = -c - - # compute correctly rounded result: increase precision by - # 3 digits at a time until we get an unambiguously - # roundable result - extra = 3 - while True: - coeff, exp = _dexp(c, e, p+extra) - if coeff % (5*10**(len(str(coeff))-p-1)): - break - extra += 3 - - ans = _dec_from_triple(0, str(coeff), exp) - - # at this stage, ans should round correctly with *any* - # rounding mode, not just with ROUND_HALF_EVEN - context = context._shallow_copy() - rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) - context.rounding = rounding - - return ans - - def is_canonical(self): - """Return True if self is canonical; otherwise return False. - - Currently, the encoding of a Decimal instance is always - canonical, so this method returns True for any Decimal. - """ - return True - - def is_finite(self): - """Return True if self is finite; otherwise return False. - - A Decimal instance is considered finite if it is neither - infinite nor a NaN. - """ - return not self._is_special - - def is_infinite(self): - """Return True if self is infinite; otherwise return False.""" - return self._exp == 'F' - - def is_nan(self): - """Return True if self is a qNaN or sNaN; otherwise return False.""" - return self._exp in ('n', 'N') - - def is_normal(self, context=None): - """Return True if self is a normal number; otherwise return False.""" - if self._is_special or not self: - return False - if context is None: - context = getcontext() - return context.Emin <= self.adjusted() - - def is_qnan(self): - """Return True if self is a quiet NaN; otherwise return False.""" - return self._exp == 'n' - - def is_signed(self): - """Return True if self is negative; otherwise return False.""" - return self._sign == 1 - - def is_snan(self): - """Return True if self is a signaling NaN; otherwise return False.""" - return self._exp == 'N' - - def is_subnormal(self, context=None): - """Return True if self is subnormal; otherwise return False.""" - if self._is_special or not self: - return False - if context is None: - context = getcontext() - return self.adjusted() < context.Emin - - def is_zero(self): - """Return True if self is a zero; otherwise return False.""" - return not self._is_special and self._int == '0' - - def _ln_exp_bound(self): - """Compute a lower bound for the adjusted exponent of self.ln(). - In other words, compute r such that self.ln() >= 10**r. Assumes - that self is finite and positive and that self != 1. - """ - - # for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1 - adj = self._exp + len(self._int) - 1 - if adj >= 1: - # argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10) - return len(str(adj*23//10)) - 1 - if adj <= -2: - # argument <= 0.1 - return len(str((-1-adj)*23//10)) - 1 - op = _WorkRep(self) - c, e = op.int, op.exp - if adj == 0: - # 1 < self < 10 - num = str(c-10**-e) - den = str(c) - return len(num) - len(den) - (num < den) - # adj == -1, 0.1 <= self < 1 - return e + len(str(10**-e - c)) - 1 - - - def ln(self, context=None): - """Returns the natural (base e) logarithm of self.""" - - if context is None: - context = getcontext() - - # ln(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans - - # ln(0.0) == -Infinity - if not self: - return _NegativeInfinity - - # ln(Infinity) = Infinity - if self._isinfinity() == 1: - return _Infinity - - # ln(1.0) == 0.0 - if self == _One: - return _Zero - - # ln(negative) raises InvalidOperation - if self._sign == 1: - return context._raise_error(InvalidOperation, - 'ln of a negative value') - - # result is irrational, so necessarily inexact - op = _WorkRep(self) - c, e = op.int, op.exp - p = context.prec - - # correctly rounded result: repeatedly increase precision by 3 - # until we get an unambiguously roundable result - places = p - self._ln_exp_bound() + 2 # at least p+3 places - while True: - coeff = _dlog(c, e, places) - # assert len(str(abs(coeff)))-p >= 1 - if coeff % (5*10**(len(str(abs(coeff)))-p-1)): - break - places += 3 - ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) - - context = context._shallow_copy() - rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) - context.rounding = rounding - return ans - - def _log10_exp_bound(self): - """Compute a lower bound for the adjusted exponent of self.log10(). - In other words, find r such that self.log10() >= 10**r. - Assumes that self is finite and positive and that self != 1. - """ - - # For x >= 10 or x < 0.1 we only need a bound on the integer - # part of log10(self), and this comes directly from the - # exponent of x. For 0.1 <= x <= 10 we use the inequalities - # 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| > - # (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0 - - adj = self._exp + len(self._int) - 1 - if adj >= 1: - # self >= 10 - return len(str(adj))-1 - if adj <= -2: - # self < 0.1 - return len(str(-1-adj))-1 - op = _WorkRep(self) - c, e = op.int, op.exp - if adj == 0: - # 1 < self < 10 - num = str(c-10**-e) - den = str(231*c) - return len(num) - len(den) - (num < den) + 2 - # adj == -1, 0.1 <= self < 1 - num = str(10**-e-c) - return len(num) + e - (num < "231") - 1 - - def log10(self, context=None): - """Returns the base 10 logarithm of self.""" - - if context is None: - context = getcontext() - - # log10(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans - - # log10(0.0) == -Infinity - if not self: - return _NegativeInfinity - - # log10(Infinity) = Infinity - if self._isinfinity() == 1: - return _Infinity - - # log10(negative or -Infinity) raises InvalidOperation - if self._sign == 1: - return context._raise_error(InvalidOperation, - 'log10 of a negative value') - - # log10(10**n) = n - if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1): - # answer may need rounding - ans = Decimal(self._exp + len(self._int) - 1) - else: - # result is irrational, so necessarily inexact - op = _WorkRep(self) - c, e = op.int, op.exp - p = context.prec - - # correctly rounded result: repeatedly increase precision - # until result is unambiguously roundable - places = p-self._log10_exp_bound()+2 - while True: - coeff = _dlog10(c, e, places) - # assert len(str(abs(coeff)))-p >= 1 - if coeff % (5*10**(len(str(abs(coeff)))-p-1)): - break - places += 3 - ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) - - context = context._shallow_copy() - rounding = context._set_rounding(ROUND_HALF_EVEN) - ans = ans._fix(context) - context.rounding = rounding - return ans - - def logb(self, context=None): - """ Returns the exponent of the magnitude of self's MSD. - - The result is the integer which is the exponent of the magnitude - of the most significant digit of self (as though it were truncated - to a single digit while maintaining the value of that digit and - without limiting the resulting exponent). - """ - # logb(NaN) = NaN - ans = self._check_nans(context=context) - if ans: - return ans - - if context is None: - context = getcontext() - - # logb(+/-Inf) = +Inf - if self._isinfinity(): - return _Infinity - - # logb(0) = -Inf, DivisionByZero - if not self: - return context._raise_error(DivisionByZero, 'logb(0)', 1) - - # otherwise, simply return the adjusted exponent of self, as a - # Decimal. Note that no attempt is made to fit the result - # into the current context. - ans = Decimal(self.adjusted()) - return ans._fix(context) - - def _islogical(self): - """Return True if self is a logical operand. - - For being logical, it must be a finite number with a sign of 0, - an exponent of 0, and a coefficient whose digits must all be - either 0 or 1. - """ - if self._sign != 0 or self._exp != 0: - return False - for dig in self._int: - if dig not in '01': - return False - return True - - def _fill_logical(self, context, opa, opb): - dif = context.prec - len(opa) - if dif > 0: - opa = '0'*dif + opa - elif dif < 0: - opa = opa[-context.prec:] - dif = context.prec - len(opb) - if dif > 0: - opb = '0'*dif + opb - elif dif < 0: - opb = opb[-context.prec:] - return opa, opb - - def logical_and(self, other, context=None): - """Applies an 'and' operation between self and other's digits.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - if not self._islogical() or not other._islogical(): - return context._raise_error(InvalidOperation) - - # fill to context.prec - (opa, opb) = self._fill_logical(context, self._int, other._int) - - # make the operation, and clean starting zeroes - result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)]) - return _dec_from_triple(0, result.lstrip('0') or '0', 0) - - def logical_invert(self, context=None): - """Invert all its digits.""" - if context is None: - context = getcontext() - return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0), - context) - - def logical_or(self, other, context=None): - """Applies an 'or' operation between self and other's digits.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - if not self._islogical() or not other._islogical(): - return context._raise_error(InvalidOperation) - - # fill to context.prec - (opa, opb) = self._fill_logical(context, self._int, other._int) - - # make the operation, and clean starting zeroes - result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)]) - return _dec_from_triple(0, result.lstrip('0') or '0', 0) - - def logical_xor(self, other, context=None): - """Applies an 'xor' operation between self and other's digits.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - if not self._islogical() or not other._islogical(): - return context._raise_error(InvalidOperation) - - # fill to context.prec - (opa, opb) = self._fill_logical(context, self._int, other._int) - - # make the operation, and clean starting zeroes - result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)]) - return _dec_from_triple(0, result.lstrip('0') or '0', 0) - - def max_mag(self, other, context=None): - """Compares the values numerically with their sign ignored.""" - other = _convert_other(other, raiseit=True) - - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - # If one operand is a quiet NaN and the other is number, then the - # number is always returned - sn = self._isnan() - on = other._isnan() - if sn or on: - if on == 1 and sn == 0: - return self._fix(context) - if sn == 1 and on == 0: - return other._fix(context) - return self._check_nans(other, context) - - c = self.copy_abs()._cmp(other.copy_abs()) - if c == 0: - c = self.compare_total(other) - - if c == -1: - ans = other - else: - ans = self - - return ans._fix(context) - - def min_mag(self, other, context=None): - """Compares the values numerically with their sign ignored.""" - other = _convert_other(other, raiseit=True) - - if context is None: - context = getcontext() - - if self._is_special or other._is_special: - # If one operand is a quiet NaN and the other is number, then the - # number is always returned - sn = self._isnan() - on = other._isnan() - if sn or on: - if on == 1 and sn == 0: - return self._fix(context) - if sn == 1 and on == 0: - return other._fix(context) - return self._check_nans(other, context) - - c = self.copy_abs()._cmp(other.copy_abs()) - if c == 0: - c = self.compare_total(other) - - if c == -1: - ans = self - else: - ans = other - - return ans._fix(context) - - def next_minus(self, context=None): - """Returns the largest representable number smaller than itself.""" - if context is None: - context = getcontext() - - ans = self._check_nans(context=context) - if ans: - return ans - - if self._isinfinity() == -1: - return _NegativeInfinity - if self._isinfinity() == 1: - return _dec_from_triple(0, '9'*context.prec, context.Etop()) - - context = context.copy() - context._set_rounding(ROUND_FLOOR) - context._ignore_all_flags() - new_self = self._fix(context) - if new_self != self: - return new_self - return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1), - context) - - def next_plus(self, context=None): - """Returns the smallest representable number larger than itself.""" - if context is None: - context = getcontext() - - ans = self._check_nans(context=context) - if ans: - return ans - - if self._isinfinity() == 1: - return _Infinity - if self._isinfinity() == -1: - return _dec_from_triple(1, '9'*context.prec, context.Etop()) - - context = context.copy() - context._set_rounding(ROUND_CEILING) - context._ignore_all_flags() - new_self = self._fix(context) - if new_self != self: - return new_self - return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1), - context) - - def next_toward(self, other, context=None): - """Returns the number closest to self, in the direction towards other. - - The result is the closest representable number to self - (excluding self) that is in the direction towards other, - unless both have the same value. If the two operands are - numerically equal, then the result is a copy of self with the - sign set to be the same as the sign of other. - """ - other = _convert_other(other, raiseit=True) - - if context is None: - context = getcontext() - - ans = self._check_nans(other, context) - if ans: - return ans - - comparison = self._cmp(other) - if comparison == 0: - return self.copy_sign(other) - - if comparison == -1: - ans = self.next_plus(context) - else: # comparison == 1 - ans = self.next_minus(context) - - # decide which flags to raise using value of ans - if ans._isinfinity(): - context._raise_error(Overflow, - 'Infinite result from next_toward', - ans._sign) - context._raise_error(Inexact) - context._raise_error(Rounded) - elif ans.adjusted() < context.Emin: - context._raise_error(Underflow) - context._raise_error(Subnormal) - context._raise_error(Inexact) - context._raise_error(Rounded) - # if precision == 1 then we don't raise Clamped for a - # result 0E-Etiny. - if not ans: - context._raise_error(Clamped) - - return ans - - def number_class(self, context=None): - """Returns an indication of the class of self. - - The class is one of the following strings: - sNaN - NaN - -Infinity - -Normal - -Subnormal - -Zero - +Zero - +Subnormal - +Normal - +Infinity - """ - if self.is_snan(): - return "sNaN" - if self.is_qnan(): - return "NaN" - inf = self._isinfinity() - if inf == 1: - return "+Infinity" - if inf == -1: - return "-Infinity" - if self.is_zero(): - if self._sign: - return "-Zero" - else: - return "+Zero" - if context is None: - context = getcontext() - if self.is_subnormal(context=context): - if self._sign: - return "-Subnormal" - else: - return "+Subnormal" - # just a normal, regular, boring number, :) - if self._sign: - return "-Normal" - else: - return "+Normal" - - def radix(self): - """Just returns 10, as this is Decimal, :)""" - return Decimal(10) - - def rotate(self, other, context=None): - """Returns a rotated copy of self, value-of-other times.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - ans = self._check_nans(other, context) - if ans: - return ans - - if other._exp != 0: - return context._raise_error(InvalidOperation) - if not (-context.prec <= int(other) <= context.prec): - return context._raise_error(InvalidOperation) - - if self._isinfinity(): - return Decimal(self) - - # get values, pad if necessary - torot = int(other) - rotdig = self._int - topad = context.prec - len(rotdig) - if topad > 0: - rotdig = '0'*topad + rotdig - elif topad < 0: - rotdig = rotdig[-topad:] - - # let's rotate! - rotated = rotdig[torot:] + rotdig[:torot] - return _dec_from_triple(self._sign, - rotated.lstrip('0') or '0', self._exp) - - def scaleb(self, other, context=None): - """Returns self operand after adding the second value to its exp.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - ans = self._check_nans(other, context) - if ans: - return ans - - if other._exp != 0: - return context._raise_error(InvalidOperation) - liminf = -2 * (context.Emax + context.prec) - limsup = 2 * (context.Emax + context.prec) - if not (liminf <= int(other) <= limsup): - return context._raise_error(InvalidOperation) - - if self._isinfinity(): - return Decimal(self) - - d = _dec_from_triple(self._sign, self._int, self._exp + int(other)) - d = d._fix(context) - return d - - def shift(self, other, context=None): - """Returns a shifted copy of self, value-of-other times.""" - if context is None: - context = getcontext() - - other = _convert_other(other, raiseit=True) - - ans = self._check_nans(other, context) - if ans: - return ans - - if other._exp != 0: - return context._raise_error(InvalidOperation) - if not (-context.prec <= int(other) <= context.prec): - return context._raise_error(InvalidOperation) - - if self._isinfinity(): - return Decimal(self) - - # get values, pad if necessary - torot = int(other) - rotdig = self._int - topad = context.prec - len(rotdig) - if topad > 0: - rotdig = '0'*topad + rotdig - elif topad < 0: - rotdig = rotdig[-topad:] - - # let's shift! - if torot < 0: - shifted = rotdig[:torot] - else: - shifted = rotdig + '0'*torot - shifted = shifted[-context.prec:] - - return _dec_from_triple(self._sign, - shifted.lstrip('0') or '0', self._exp) - - # Support for pickling, copy, and deepcopy - def __reduce__(self): - return (self.__class__, (str(self),)) - - def __copy__(self): - if type(self) is Decimal: - return self # I'm immutable; therefore I am my own clone - return self.__class__(str(self)) - - def __deepcopy__(self, memo): - if type(self) is Decimal: - return self # My components are also immutable - return self.__class__(str(self)) - - # PEP 3101 support. the _localeconv keyword argument should be - # considered private: it's provided for ease of testing only. - def __format__(self, specifier, context=None, _localeconv=None): - """Format a Decimal instance according to the given specifier. - - The specifier should be a standard format specifier, with the - form described in PEP 3101. Formatting types 'e', 'E', 'f', - 'F', 'g', 'G', 'n' and '%' are supported. If the formatting - type is omitted it defaults to 'g' or 'G', depending on the - value of context.capitals. - """ - - # Note: PEP 3101 says that if the type is not present then - # there should be at least one digit after the decimal point. - # We take the liberty of ignoring this requirement for - # Decimal---it's presumably there to make sure that - # format(float, '') behaves similarly to str(float). - if context is None: - context = getcontext() - - spec = _parse_format_specifier(specifier, _localeconv=_localeconv) - - # special values don't care about the type or precision - if self._is_special: - sign = _format_sign(self._sign, spec) - body = str(self.copy_abs()) - if spec['type'] == '%': - body += '%' - return _format_align(sign, body, spec) - - # a type of None defaults to 'g' or 'G', depending on context - if spec['type'] is None: - spec['type'] = ['g', 'G'][context.capitals] - - # if type is '%', adjust exponent of self accordingly - if spec['type'] == '%': - self = _dec_from_triple(self._sign, self._int, self._exp+2) - - # round if necessary, taking rounding mode from the context - rounding = context.rounding - precision = spec['precision'] - if precision is not None: - if spec['type'] in 'eE': - self = self._round(precision+1, rounding) - elif spec['type'] in 'fF%': - self = self._rescale(-precision, rounding) - elif spec['type'] in 'gG' and len(self._int) > precision: - self = self._round(precision, rounding) - # special case: zeros with a positive exponent can't be - # represented in fixed point; rescale them to 0e0. - if not self and self._exp > 0 and spec['type'] in 'fF%': - self = self._rescale(0, rounding) - - # figure out placement of the decimal point - leftdigits = self._exp + len(self._int) - if spec['type'] in 'eE': - if not self and precision is not None: - dotplace = 1 - precision - else: - dotplace = 1 - elif spec['type'] in 'fF%': - dotplace = leftdigits - elif spec['type'] in 'gG': - if self._exp <= 0 and leftdigits > -6: - dotplace = leftdigits - else: - dotplace = 1 - - # find digits before and after decimal point, and get exponent - if dotplace < 0: - intpart = '0' - fracpart = '0'*(-dotplace) + self._int - elif dotplace > len(self._int): - intpart = self._int + '0'*(dotplace-len(self._int)) - fracpart = '' - else: - intpart = self._int[:dotplace] or '0' - fracpart = self._int[dotplace:] - exp = leftdigits-dotplace - - # done with the decimal-specific stuff; hand over the rest - # of the formatting to the _format_number function - return _format_number(self._sign, intpart, fracpart, exp, spec) - -def _dec_from_triple(sign, coefficient, exponent, special=False): - """Create a decimal instance directly, without any validation, - normalization (e.g. removal of leading zeros) or argument - conversion. - - This function is for *internal use only*. - """ - - self = object.__new__(Decimal) - self._sign = sign - self._int = coefficient - self._exp = exponent - self._is_special = special - - return self - -# Register Decimal as a kind of Number (an abstract base class). -# However, do not register it as Real (because Decimals are not -# interoperable with floats). -_numbers.Number.register(Decimal) - - -##### Context class ####################################################### - -class _ContextManager(object): - """Context manager class to support localcontext(). - - Sets a copy of the supplied context in __enter__() and restores - the previous decimal context in __exit__() - """ - def __init__(self, new_context): - self.new_context = new_context.copy() - def __enter__(self): - self.saved_context = getcontext() - setcontext(self.new_context) - return self.new_context - def __exit__(self, t, v, tb): - setcontext(self.saved_context) - -class Context(object): - """Contains the context for a Decimal instance. - - Contains: - prec - precision (for use in rounding, division, square roots..) - rounding - rounding type (how you round) - traps - If traps[exception] = 1, then the exception is - raised when it is caused. Otherwise, a value is - substituted in. - flags - When an exception is caused, flags[exception] is set. - (Whether or not the trap_enabler is set) - Should be reset by user of Decimal instance. - Emin - Minimum exponent - Emax - Maximum exponent - capitals - If 1, 1*10^1 is printed as 1E+1. - If 0, printed as 1e1 - clamp - If 1, change exponents if too high (Default 0) - """ - - def __init__(self, prec=None, rounding=None, Emin=None, Emax=None, - capitals=None, clamp=None, flags=None, traps=None, - _ignored_flags=None): - # Set defaults; for everything except flags and _ignored_flags, - # inherit from DefaultContext. - try: - dc = DefaultContext - except NameError: - pass - - self.prec = prec if prec is not None else dc.prec - self.rounding = rounding if rounding is not None else dc.rounding - self.Emin = Emin if Emin is not None else dc.Emin - self.Emax = Emax if Emax is not None else dc.Emax - self.capitals = capitals if capitals is not None else dc.capitals - self.clamp = clamp if clamp is not None else dc.clamp - - if _ignored_flags is None: - self._ignored_flags = [] - else: - self._ignored_flags = _ignored_flags - - if traps is None: - self.traps = dc.traps.copy() - elif not isinstance(traps, dict): - self.traps = dict((s, int(s in traps)) for s in _signals + traps) - else: - self.traps = traps - - if flags is None: - self.flags = dict.fromkeys(_signals, 0) - elif not isinstance(flags, dict): - self.flags = dict((s, int(s in flags)) for s in _signals + flags) - else: - self.flags = flags - - def _set_integer_check(self, name, value, vmin, vmax): - if not isinstance(value, int): - raise TypeError("%s must be an integer" % name) - if vmin == '-inf': - if value > vmax: - raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value)) - elif vmax == 'inf': - if value < vmin: - raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value)) - else: - if value < vmin or value > vmax: - raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value)) - return object.__setattr__(self, name, value) - - def _set_signal_dict(self, name, d): - if not isinstance(d, dict): - raise TypeError("%s must be a signal dict" % d) - for key in d: - if not key in _signals: - raise KeyError("%s is not a valid signal dict" % d) - for key in _signals: - if not key in d: - raise KeyError("%s is not a valid signal dict" % d) - return object.__setattr__(self, name, d) - - def __setattr__(self, name, value): - if name == 'prec': - return self._set_integer_check(name, value, 1, 'inf') - elif name == 'Emin': - return self._set_integer_check(name, value, '-inf', 0) - elif name == 'Emax': - return self._set_integer_check(name, value, 0, 'inf') - elif name == 'capitals': - return self._set_integer_check(name, value, 0, 1) - elif name == 'clamp': - return self._set_integer_check(name, value, 0, 1) - elif name == 'rounding': - if not value in _rounding_modes: - # raise TypeError even for strings to have consistency - # among various implementations. - raise TypeError("%s: invalid rounding mode" % value) - return object.__setattr__(self, name, value) - elif name == 'flags' or name == 'traps': - return self._set_signal_dict(name, value) - elif name == '_ignored_flags': - return object.__setattr__(self, name, value) - else: - raise AttributeError( - "'decimal.Context' object has no attribute '%s'" % name) - - def __delattr__(self, name): - raise AttributeError("%s cannot be deleted" % name) - - # Support for pickling, copy, and deepcopy - def __reduce__(self): - flags = [sig for sig, v in self.flags.items() if v] - traps = [sig for sig, v in self.traps.items() if v] - return (self.__class__, - (self.prec, self.rounding, self.Emin, self.Emax, - self.capitals, self.clamp, flags, traps)) - - def __repr__(self): - """Show the current context.""" - s = [] - s.append('Context(prec=%(prec)d, rounding=%(rounding)s, ' - 'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, ' - 'clamp=%(clamp)d' - % vars(self)) - names = [f.__name__ for f, v in self.flags.items() if v] - s.append('flags=[' + ', '.join(names) + ']') - names = [t.__name__ for t, v in self.traps.items() if v] - s.append('traps=[' + ', '.join(names) + ']') - return ', '.join(s) + ')' - - def clear_flags(self): - """Reset all flags to zero""" - for flag in self.flags: - self.flags[flag] = 0 - - def clear_traps(self): - """Reset all traps to zero""" - for flag in self.traps: - self.traps[flag] = 0 - - def _shallow_copy(self): - """Returns a shallow copy from self.""" - nc = Context(self.prec, self.rounding, self.Emin, self.Emax, - self.capitals, self.clamp, self.flags, self.traps, - self._ignored_flags) - return nc - - def copy(self): - """Returns a deep copy from self.""" - nc = Context(self.prec, self.rounding, self.Emin, self.Emax, - self.capitals, self.clamp, - self.flags.copy(), self.traps.copy(), - self._ignored_flags) - return nc - __copy__ = copy - - def _raise_error(self, condition, explanation = None, *args): - """Handles an error - - If the flag is in _ignored_flags, returns the default response. - Otherwise, it sets the flag, then, if the corresponding - trap_enabler is set, it reraises the exception. Otherwise, it returns - the default value after setting the flag. - """ - error = _condition_map.get(condition, condition) - if error in self._ignored_flags: - # Don't touch the flag - return error().handle(self, *args) - - self.flags[error] = 1 - if not self.traps[error]: - # The errors define how to handle themselves. - return condition().handle(self, *args) - - # Errors should only be risked on copies of the context - # self._ignored_flags = [] - raise error(explanation) - - def _ignore_all_flags(self): - """Ignore all flags, if they are raised""" - return self._ignore_flags(*_signals) - - def _ignore_flags(self, *flags): - """Ignore the flags, if they are raised""" - # Do not mutate-- This way, copies of a context leave the original - # alone. - self._ignored_flags = (self._ignored_flags + list(flags)) - return list(flags) - - def _regard_flags(self, *flags): - """Stop ignoring the flags, if they are raised""" - if flags and isinstance(flags[0], (tuple,list)): - flags = flags[0] - for flag in flags: - self._ignored_flags.remove(flag) - - # We inherit object.__hash__, so we must deny this explicitly - __hash__ = None - - def Etiny(self): - """Returns Etiny (= Emin - prec + 1)""" - return int(self.Emin - self.prec + 1) - - def Etop(self): - """Returns maximum exponent (= Emax - prec + 1)""" - return int(self.Emax - self.prec + 1) - - def _set_rounding(self, type): - """Sets the rounding type. - - Sets the rounding type, and returns the current (previous) - rounding type. Often used like: - - context = context.copy() - # so you don't change the calling context - # if an error occurs in the middle. - rounding = context._set_rounding(ROUND_UP) - val = self.__sub__(other, context=context) - context._set_rounding(rounding) - - This will make it round up for that operation. - """ - rounding = self.rounding - self.rounding= type - return rounding - - def create_decimal(self, num='0'): - """Creates a new Decimal instance but using self as context. - - This method implements the to-number operation of the - IBM Decimal specification.""" - - if isinstance(num, str) and num != num.strip(): - return self._raise_error(ConversionSyntax, - "no trailing or leading whitespace is " - "permitted.") - - d = Decimal(num, context=self) - if d._isnan() and len(d._int) > self.prec - self.clamp: - return self._raise_error(ConversionSyntax, - "diagnostic info too long in NaN") - return d._fix(self) - - def create_decimal_from_float(self, f): - """Creates a new Decimal instance from a float but rounding using self - as the context. - - >>> context = Context(prec=5, rounding=ROUND_DOWN) - >>> context.create_decimal_from_float(3.1415926535897932) - Decimal('3.1415') - >>> context = Context(prec=5, traps=[Inexact]) - >>> context.create_decimal_from_float(3.1415926535897932) - Traceback (most recent call last): - ... - decimal.Inexact: None - - """ - d = Decimal.from_float(f) # An exact conversion - return d._fix(self) # Apply the context rounding - - # Methods - def abs(self, a): - """Returns the absolute value of the operand. - - If the operand is negative, the result is the same as using the minus - operation on the operand. Otherwise, the result is the same as using - the plus operation on the operand. - - >>> ExtendedContext.abs(Decimal('2.1')) - Decimal('2.1') - >>> ExtendedContext.abs(Decimal('-100')) - Decimal('100') - >>> ExtendedContext.abs(Decimal('101.5')) - Decimal('101.5') - >>> ExtendedContext.abs(Decimal('-101.5')) - Decimal('101.5') - >>> ExtendedContext.abs(-1) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return a.__abs__(context=self) - - def add(self, a, b): - """Return the sum of the two operands. - - >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) - Decimal('19.00') - >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) - Decimal('1.02E+4') - >>> ExtendedContext.add(1, Decimal(2)) - Decimal('3') - >>> ExtendedContext.add(Decimal(8), 5) - Decimal('13') - >>> ExtendedContext.add(5, 5) - Decimal('10') - """ - a = _convert_other(a, raiseit=True) - r = a.__add__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def _apply(self, a): - return str(a._fix(self)) - - def canonical(self, a): - """Returns the same Decimal object. - - As we do not have different encodings for the same number, the - received object already is in its canonical form. - - >>> ExtendedContext.canonical(Decimal('2.50')) - Decimal('2.50') - """ - if not isinstance(a, Decimal): - raise TypeError("canonical requires a Decimal as an argument.") - return a.canonical() - - def compare(self, a, b): - """Compares values numerically. - - If the signs of the operands differ, a value representing each operand - ('-1' if the operand is less than zero, '0' if the operand is zero or - negative zero, or '1' if the operand is greater than zero) is used in - place of that operand for the comparison instead of the actual - operand. - - The comparison is then effected by subtracting the second operand from - the first and then returning a value according to the result of the - subtraction: '-1' if the result is less than zero, '0' if the result is - zero or negative zero, or '1' if the result is greater than zero. - - >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) - Decimal('-1') - >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) - Decimal('0') - >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) - Decimal('0') - >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) - Decimal('1') - >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) - Decimal('1') - >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) - Decimal('-1') - >>> ExtendedContext.compare(1, 2) - Decimal('-1') - >>> ExtendedContext.compare(Decimal(1), 2) - Decimal('-1') - >>> ExtendedContext.compare(1, Decimal(2)) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.compare(b, context=self) - - def compare_signal(self, a, b): - """Compares the values of the two operands numerically. - - It's pretty much like compare(), but all NaNs signal, with signaling - NaNs taking precedence over quiet NaNs. - - >>> c = ExtendedContext - >>> c.compare_signal(Decimal('2.1'), Decimal('3')) - Decimal('-1') - >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) - Decimal('0') - >>> c.flags[InvalidOperation] = 0 - >>> print(c.flags[InvalidOperation]) - 0 - >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) - Decimal('NaN') - >>> print(c.flags[InvalidOperation]) - 1 - >>> c.flags[InvalidOperation] = 0 - >>> print(c.flags[InvalidOperation]) - 0 - >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) - Decimal('NaN') - >>> print(c.flags[InvalidOperation]) - 1 - >>> c.compare_signal(-1, 2) - Decimal('-1') - >>> c.compare_signal(Decimal(-1), 2) - Decimal('-1') - >>> c.compare_signal(-1, Decimal(2)) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.compare_signal(b, context=self) - - def compare_total(self, a, b): - """Compares two operands using their abstract representation. - - This is not like the standard compare, which use their numerical - value. Note that a total ordering is defined for all possible abstract - representations. - - >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) - Decimal('-1') - >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) - Decimal('-1') - >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) - Decimal('-1') - >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) - Decimal('0') - >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) - Decimal('1') - >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) - Decimal('-1') - >>> ExtendedContext.compare_total(1, 2) - Decimal('-1') - >>> ExtendedContext.compare_total(Decimal(1), 2) - Decimal('-1') - >>> ExtendedContext.compare_total(1, Decimal(2)) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.compare_total(b) - - def compare_total_mag(self, a, b): - """Compares two operands using their abstract representation ignoring sign. - - Like compare_total, but with operand's sign ignored and assumed to be 0. - """ - a = _convert_other(a, raiseit=True) - return a.compare_total_mag(b) - - def copy_abs(self, a): - """Returns a copy of the operand with the sign set to 0. - - >>> ExtendedContext.copy_abs(Decimal('2.1')) - Decimal('2.1') - >>> ExtendedContext.copy_abs(Decimal('-100')) - Decimal('100') - >>> ExtendedContext.copy_abs(-1) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return a.copy_abs() - - def copy_decimal(self, a): - """Returns a copy of the decimal object. - - >>> ExtendedContext.copy_decimal(Decimal('2.1')) - Decimal('2.1') - >>> ExtendedContext.copy_decimal(Decimal('-1.00')) - Decimal('-1.00') - >>> ExtendedContext.copy_decimal(1) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return Decimal(a) - - def copy_negate(self, a): - """Returns a copy of the operand with the sign inverted. - - >>> ExtendedContext.copy_negate(Decimal('101.5')) - Decimal('-101.5') - >>> ExtendedContext.copy_negate(Decimal('-101.5')) - Decimal('101.5') - >>> ExtendedContext.copy_negate(1) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.copy_negate() - - def copy_sign(self, a, b): - """Copies the second operand's sign to the first one. - - In detail, it returns a copy of the first operand with the sign - equal to the sign of the second operand. - - >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) - Decimal('1.50') - >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) - Decimal('1.50') - >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) - Decimal('-1.50') - >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) - Decimal('-1.50') - >>> ExtendedContext.copy_sign(1, -2) - Decimal('-1') - >>> ExtendedContext.copy_sign(Decimal(1), -2) - Decimal('-1') - >>> ExtendedContext.copy_sign(1, Decimal(-2)) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.copy_sign(b) - - def divide(self, a, b): - """Decimal division in a specified context. - - >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) - Decimal('0.333333333') - >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) - Decimal('0.666666667') - >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) - Decimal('2.5') - >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) - Decimal('0.1') - >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) - Decimal('1') - >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) - Decimal('4.00') - >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) - Decimal('1.20') - >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) - Decimal('10') - >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) - Decimal('1000') - >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) - Decimal('1.20E+6') - >>> ExtendedContext.divide(5, 5) - Decimal('1') - >>> ExtendedContext.divide(Decimal(5), 5) - Decimal('1') - >>> ExtendedContext.divide(5, Decimal(5)) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - r = a.__truediv__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def divide_int(self, a, b): - """Divides two numbers and returns the integer part of the result. - - >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) - Decimal('0') - >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) - Decimal('3') - >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) - Decimal('3') - >>> ExtendedContext.divide_int(10, 3) - Decimal('3') - >>> ExtendedContext.divide_int(Decimal(10), 3) - Decimal('3') - >>> ExtendedContext.divide_int(10, Decimal(3)) - Decimal('3') - """ - a = _convert_other(a, raiseit=True) - r = a.__floordiv__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def divmod(self, a, b): - """Return (a // b, a % b). - - >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) - (Decimal('2'), Decimal('2')) - >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) - (Decimal('2'), Decimal('0')) - >>> ExtendedContext.divmod(8, 4) - (Decimal('2'), Decimal('0')) - >>> ExtendedContext.divmod(Decimal(8), 4) - (Decimal('2'), Decimal('0')) - >>> ExtendedContext.divmod(8, Decimal(4)) - (Decimal('2'), Decimal('0')) - """ - a = _convert_other(a, raiseit=True) - r = a.__divmod__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def exp(self, a): - """Returns e ** a. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.exp(Decimal('-Infinity')) - Decimal('0') - >>> c.exp(Decimal('-1')) - Decimal('0.367879441') - >>> c.exp(Decimal('0')) - Decimal('1') - >>> c.exp(Decimal('1')) - Decimal('2.71828183') - >>> c.exp(Decimal('0.693147181')) - Decimal('2.00000000') - >>> c.exp(Decimal('+Infinity')) - Decimal('Infinity') - >>> c.exp(10) - Decimal('22026.4658') - """ - a =_convert_other(a, raiseit=True) - return a.exp(context=self) - - def fma(self, a, b, c): - """Returns a multiplied by b, plus c. - - The first two operands are multiplied together, using multiply, - the third operand is then added to the result of that - multiplication, using add, all with only one final rounding. - - >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) - Decimal('22') - >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) - Decimal('-8') - >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) - Decimal('1.38435736E+12') - >>> ExtendedContext.fma(1, 3, 4) - Decimal('7') - >>> ExtendedContext.fma(1, Decimal(3), 4) - Decimal('7') - >>> ExtendedContext.fma(1, 3, Decimal(4)) - Decimal('7') - """ - a = _convert_other(a, raiseit=True) - return a.fma(b, c, context=self) - - def is_canonical(self, a): - """Return True if the operand is canonical; otherwise return False. - - Currently, the encoding of a Decimal instance is always - canonical, so this method returns True for any Decimal. - - >>> ExtendedContext.is_canonical(Decimal('2.50')) - True - """ - if not isinstance(a, Decimal): - raise TypeError("is_canonical requires a Decimal as an argument.") - return a.is_canonical() - - def is_finite(self, a): - """Return True if the operand is finite; otherwise return False. - - A Decimal instance is considered finite if it is neither - infinite nor a NaN. - - >>> ExtendedContext.is_finite(Decimal('2.50')) - True - >>> ExtendedContext.is_finite(Decimal('-0.3')) - True - >>> ExtendedContext.is_finite(Decimal('0')) - True - >>> ExtendedContext.is_finite(Decimal('Inf')) - False - >>> ExtendedContext.is_finite(Decimal('NaN')) - False - >>> ExtendedContext.is_finite(1) - True - """ - a = _convert_other(a, raiseit=True) - return a.is_finite() - - def is_infinite(self, a): - """Return True if the operand is infinite; otherwise return False. - - >>> ExtendedContext.is_infinite(Decimal('2.50')) - False - >>> ExtendedContext.is_infinite(Decimal('-Inf')) - True - >>> ExtendedContext.is_infinite(Decimal('NaN')) - False - >>> ExtendedContext.is_infinite(1) - False - """ - a = _convert_other(a, raiseit=True) - return a.is_infinite() - - def is_nan(self, a): - """Return True if the operand is a qNaN or sNaN; - otherwise return False. - - >>> ExtendedContext.is_nan(Decimal('2.50')) - False - >>> ExtendedContext.is_nan(Decimal('NaN')) - True - >>> ExtendedContext.is_nan(Decimal('-sNaN')) - True - >>> ExtendedContext.is_nan(1) - False - """ - a = _convert_other(a, raiseit=True) - return a.is_nan() - - def is_normal(self, a): - """Return True if the operand is a normal number; - otherwise return False. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.is_normal(Decimal('2.50')) - True - >>> c.is_normal(Decimal('0.1E-999')) - False - >>> c.is_normal(Decimal('0.00')) - False - >>> c.is_normal(Decimal('-Inf')) - False - >>> c.is_normal(Decimal('NaN')) - False - >>> c.is_normal(1) - True - """ - a = _convert_other(a, raiseit=True) - return a.is_normal(context=self) - - def is_qnan(self, a): - """Return True if the operand is a quiet NaN; otherwise return False. - - >>> ExtendedContext.is_qnan(Decimal('2.50')) - False - >>> ExtendedContext.is_qnan(Decimal('NaN')) - True - >>> ExtendedContext.is_qnan(Decimal('sNaN')) - False - >>> ExtendedContext.is_qnan(1) - False - """ - a = _convert_other(a, raiseit=True) - return a.is_qnan() - - def is_signed(self, a): - """Return True if the operand is negative; otherwise return False. - - >>> ExtendedContext.is_signed(Decimal('2.50')) - False - >>> ExtendedContext.is_signed(Decimal('-12')) - True - >>> ExtendedContext.is_signed(Decimal('-0')) - True - >>> ExtendedContext.is_signed(8) - False - >>> ExtendedContext.is_signed(-8) - True - """ - a = _convert_other(a, raiseit=True) - return a.is_signed() - - def is_snan(self, a): - """Return True if the operand is a signaling NaN; - otherwise return False. - - >>> ExtendedContext.is_snan(Decimal('2.50')) - False - >>> ExtendedContext.is_snan(Decimal('NaN')) - False - >>> ExtendedContext.is_snan(Decimal('sNaN')) - True - >>> ExtendedContext.is_snan(1) - False - """ - a = _convert_other(a, raiseit=True) - return a.is_snan() - - def is_subnormal(self, a): - """Return True if the operand is subnormal; otherwise return False. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.is_subnormal(Decimal('2.50')) - False - >>> c.is_subnormal(Decimal('0.1E-999')) - True - >>> c.is_subnormal(Decimal('0.00')) - False - >>> c.is_subnormal(Decimal('-Inf')) - False - >>> c.is_subnormal(Decimal('NaN')) - False - >>> c.is_subnormal(1) - False - """ - a = _convert_other(a, raiseit=True) - return a.is_subnormal(context=self) - - def is_zero(self, a): - """Return True if the operand is a zero; otherwise return False. - - >>> ExtendedContext.is_zero(Decimal('0')) - True - >>> ExtendedContext.is_zero(Decimal('2.50')) - False - >>> ExtendedContext.is_zero(Decimal('-0E+2')) - True - >>> ExtendedContext.is_zero(1) - False - >>> ExtendedContext.is_zero(0) - True - """ - a = _convert_other(a, raiseit=True) - return a.is_zero() - - def ln(self, a): - """Returns the natural (base e) logarithm of the operand. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.ln(Decimal('0')) - Decimal('-Infinity') - >>> c.ln(Decimal('1.000')) - Decimal('0') - >>> c.ln(Decimal('2.71828183')) - Decimal('1.00000000') - >>> c.ln(Decimal('10')) - Decimal('2.30258509') - >>> c.ln(Decimal('+Infinity')) - Decimal('Infinity') - >>> c.ln(1) - Decimal('0') - """ - a = _convert_other(a, raiseit=True) - return a.ln(context=self) - - def log10(self, a): - """Returns the base 10 logarithm of the operand. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.log10(Decimal('0')) - Decimal('-Infinity') - >>> c.log10(Decimal('0.001')) - Decimal('-3') - >>> c.log10(Decimal('1.000')) - Decimal('0') - >>> c.log10(Decimal('2')) - Decimal('0.301029996') - >>> c.log10(Decimal('10')) - Decimal('1') - >>> c.log10(Decimal('70')) - Decimal('1.84509804') - >>> c.log10(Decimal('+Infinity')) - Decimal('Infinity') - >>> c.log10(0) - Decimal('-Infinity') - >>> c.log10(1) - Decimal('0') - """ - a = _convert_other(a, raiseit=True) - return a.log10(context=self) - - def logb(self, a): - """ Returns the exponent of the magnitude of the operand's MSD. - - The result is the integer which is the exponent of the magnitude - of the most significant digit of the operand (as though the - operand were truncated to a single digit while maintaining the - value of that digit and without limiting the resulting exponent). - - >>> ExtendedContext.logb(Decimal('250')) - Decimal('2') - >>> ExtendedContext.logb(Decimal('2.50')) - Decimal('0') - >>> ExtendedContext.logb(Decimal('0.03')) - Decimal('-2') - >>> ExtendedContext.logb(Decimal('0')) - Decimal('-Infinity') - >>> ExtendedContext.logb(1) - Decimal('0') - >>> ExtendedContext.logb(10) - Decimal('1') - >>> ExtendedContext.logb(100) - Decimal('2') - """ - a = _convert_other(a, raiseit=True) - return a.logb(context=self) - - def logical_and(self, a, b): - """Applies the logical operation 'and' between each operand's digits. - - The operands must be both logical numbers. - - >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) - Decimal('0') - >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) - Decimal('0') - >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) - Decimal('0') - >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) - Decimal('1') - >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) - Decimal('1000') - >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) - Decimal('10') - >>> ExtendedContext.logical_and(110, 1101) - Decimal('100') - >>> ExtendedContext.logical_and(Decimal(110), 1101) - Decimal('100') - >>> ExtendedContext.logical_and(110, Decimal(1101)) - Decimal('100') - """ - a = _convert_other(a, raiseit=True) - return a.logical_and(b, context=self) - - def logical_invert(self, a): - """Invert all the digits in the operand. - - The operand must be a logical number. - - >>> ExtendedContext.logical_invert(Decimal('0')) - Decimal('111111111') - >>> ExtendedContext.logical_invert(Decimal('1')) - Decimal('111111110') - >>> ExtendedContext.logical_invert(Decimal('111111111')) - Decimal('0') - >>> ExtendedContext.logical_invert(Decimal('101010101')) - Decimal('10101010') - >>> ExtendedContext.logical_invert(1101) - Decimal('111110010') - """ - a = _convert_other(a, raiseit=True) - return a.logical_invert(context=self) - - def logical_or(self, a, b): - """Applies the logical operation 'or' between each operand's digits. - - The operands must be both logical numbers. - - >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) - Decimal('0') - >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) - Decimal('1') - >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) - Decimal('1') - >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) - Decimal('1') - >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) - Decimal('1110') - >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) - Decimal('1110') - >>> ExtendedContext.logical_or(110, 1101) - Decimal('1111') - >>> ExtendedContext.logical_or(Decimal(110), 1101) - Decimal('1111') - >>> ExtendedContext.logical_or(110, Decimal(1101)) - Decimal('1111') - """ - a = _convert_other(a, raiseit=True) - return a.logical_or(b, context=self) - - def logical_xor(self, a, b): - """Applies the logical operation 'xor' between each operand's digits. - - The operands must be both logical numbers. - - >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) - Decimal('0') - >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) - Decimal('1') - >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) - Decimal('1') - >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) - Decimal('0') - >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) - Decimal('110') - >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) - Decimal('1101') - >>> ExtendedContext.logical_xor(110, 1101) - Decimal('1011') - >>> ExtendedContext.logical_xor(Decimal(110), 1101) - Decimal('1011') - >>> ExtendedContext.logical_xor(110, Decimal(1101)) - Decimal('1011') - """ - a = _convert_other(a, raiseit=True) - return a.logical_xor(b, context=self) - - def max(self, a, b): - """max compares two values numerically and returns the maximum. - - If either operand is a NaN then the general rules apply. - Otherwise, the operands are compared as though by the compare - operation. If they are numerically equal then the left-hand operand - is chosen as the result. Otherwise the maximum (closer to positive - infinity) of the two operands is chosen as the result. - - >>> ExtendedContext.max(Decimal('3'), Decimal('2')) - Decimal('3') - >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) - Decimal('3') - >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) - Decimal('1') - >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) - Decimal('7') - >>> ExtendedContext.max(1, 2) - Decimal('2') - >>> ExtendedContext.max(Decimal(1), 2) - Decimal('2') - >>> ExtendedContext.max(1, Decimal(2)) - Decimal('2') - """ - a = _convert_other(a, raiseit=True) - return a.max(b, context=self) - - def max_mag(self, a, b): - """Compares the values numerically with their sign ignored. - - >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) - Decimal('7') - >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) - Decimal('-10') - >>> ExtendedContext.max_mag(1, -2) - Decimal('-2') - >>> ExtendedContext.max_mag(Decimal(1), -2) - Decimal('-2') - >>> ExtendedContext.max_mag(1, Decimal(-2)) - Decimal('-2') - """ - a = _convert_other(a, raiseit=True) - return a.max_mag(b, context=self) - - def min(self, a, b): - """min compares two values numerically and returns the minimum. - - If either operand is a NaN then the general rules apply. - Otherwise, the operands are compared as though by the compare - operation. If they are numerically equal then the left-hand operand - is chosen as the result. Otherwise the minimum (closer to negative - infinity) of the two operands is chosen as the result. - - >>> ExtendedContext.min(Decimal('3'), Decimal('2')) - Decimal('2') - >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) - Decimal('-10') - >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) - Decimal('1.0') - >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) - Decimal('7') - >>> ExtendedContext.min(1, 2) - Decimal('1') - >>> ExtendedContext.min(Decimal(1), 2) - Decimal('1') - >>> ExtendedContext.min(1, Decimal(29)) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return a.min(b, context=self) - - def min_mag(self, a, b): - """Compares the values numerically with their sign ignored. - - >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) - Decimal('-2') - >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) - Decimal('-3') - >>> ExtendedContext.min_mag(1, -2) - Decimal('1') - >>> ExtendedContext.min_mag(Decimal(1), -2) - Decimal('1') - >>> ExtendedContext.min_mag(1, Decimal(-2)) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return a.min_mag(b, context=self) - - def minus(self, a): - """Minus corresponds to unary prefix minus in Python. - - The operation is evaluated using the same rules as subtract; the - operation minus(a) is calculated as subtract('0', a) where the '0' - has the same exponent as the operand. - - >>> ExtendedContext.minus(Decimal('1.3')) - Decimal('-1.3') - >>> ExtendedContext.minus(Decimal('-1.3')) - Decimal('1.3') - >>> ExtendedContext.minus(1) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.__neg__(context=self) - - def multiply(self, a, b): - """multiply multiplies two operands. - - If either operand is a special value then the general rules apply. - Otherwise, the operands are multiplied together - ('long multiplication'), resulting in a number which may be as long as - the sum of the lengths of the two operands. - - >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) - Decimal('3.60') - >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) - Decimal('21') - >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) - Decimal('0.72') - >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) - Decimal('-0.0') - >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) - Decimal('4.28135971E+11') - >>> ExtendedContext.multiply(7, 7) - Decimal('49') - >>> ExtendedContext.multiply(Decimal(7), 7) - Decimal('49') - >>> ExtendedContext.multiply(7, Decimal(7)) - Decimal('49') - """ - a = _convert_other(a, raiseit=True) - r = a.__mul__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def next_minus(self, a): - """Returns the largest representable number smaller than a. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> ExtendedContext.next_minus(Decimal('1')) - Decimal('0.999999999') - >>> c.next_minus(Decimal('1E-1007')) - Decimal('0E-1007') - >>> ExtendedContext.next_minus(Decimal('-1.00000003')) - Decimal('-1.00000004') - >>> c.next_minus(Decimal('Infinity')) - Decimal('9.99999999E+999') - >>> c.next_minus(1) - Decimal('0.999999999') - """ - a = _convert_other(a, raiseit=True) - return a.next_minus(context=self) - - def next_plus(self, a): - """Returns the smallest representable number larger than a. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> ExtendedContext.next_plus(Decimal('1')) - Decimal('1.00000001') - >>> c.next_plus(Decimal('-1E-1007')) - Decimal('-0E-1007') - >>> ExtendedContext.next_plus(Decimal('-1.00000003')) - Decimal('-1.00000002') - >>> c.next_plus(Decimal('-Infinity')) - Decimal('-9.99999999E+999') - >>> c.next_plus(1) - Decimal('1.00000001') - """ - a = _convert_other(a, raiseit=True) - return a.next_plus(context=self) - - def next_toward(self, a, b): - """Returns the number closest to a, in direction towards b. - - The result is the closest representable number from the first - operand (but not the first operand) that is in the direction - towards the second operand, unless the operands have the same - value. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.next_toward(Decimal('1'), Decimal('2')) - Decimal('1.00000001') - >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) - Decimal('-0E-1007') - >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) - Decimal('-1.00000002') - >>> c.next_toward(Decimal('1'), Decimal('0')) - Decimal('0.999999999') - >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) - Decimal('0E-1007') - >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) - Decimal('-1.00000004') - >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) - Decimal('-0.00') - >>> c.next_toward(0, 1) - Decimal('1E-1007') - >>> c.next_toward(Decimal(0), 1) - Decimal('1E-1007') - >>> c.next_toward(0, Decimal(1)) - Decimal('1E-1007') - """ - a = _convert_other(a, raiseit=True) - return a.next_toward(b, context=self) - - def normalize(self, a): - """normalize reduces an operand to its simplest form. - - Essentially a plus operation with all trailing zeros removed from the - result. - - >>> ExtendedContext.normalize(Decimal('2.1')) - Decimal('2.1') - >>> ExtendedContext.normalize(Decimal('-2.0')) - Decimal('-2') - >>> ExtendedContext.normalize(Decimal('1.200')) - Decimal('1.2') - >>> ExtendedContext.normalize(Decimal('-120')) - Decimal('-1.2E+2') - >>> ExtendedContext.normalize(Decimal('120.00')) - Decimal('1.2E+2') - >>> ExtendedContext.normalize(Decimal('0.00')) - Decimal('0') - >>> ExtendedContext.normalize(6) - Decimal('6') - """ - a = _convert_other(a, raiseit=True) - return a.normalize(context=self) - - def number_class(self, a): - """Returns an indication of the class of the operand. - - The class is one of the following strings: - -sNaN - -NaN - -Infinity - -Normal - -Subnormal - -Zero - +Zero - +Subnormal - +Normal - +Infinity - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.number_class(Decimal('Infinity')) - '+Infinity' - >>> c.number_class(Decimal('1E-10')) - '+Normal' - >>> c.number_class(Decimal('2.50')) - '+Normal' - >>> c.number_class(Decimal('0.1E-999')) - '+Subnormal' - >>> c.number_class(Decimal('0')) - '+Zero' - >>> c.number_class(Decimal('-0')) - '-Zero' - >>> c.number_class(Decimal('-0.1E-999')) - '-Subnormal' - >>> c.number_class(Decimal('-1E-10')) - '-Normal' - >>> c.number_class(Decimal('-2.50')) - '-Normal' - >>> c.number_class(Decimal('-Infinity')) - '-Infinity' - >>> c.number_class(Decimal('NaN')) - 'NaN' - >>> c.number_class(Decimal('-NaN')) - 'NaN' - >>> c.number_class(Decimal('sNaN')) - 'sNaN' - >>> c.number_class(123) - '+Normal' - """ - a = _convert_other(a, raiseit=True) - return a.number_class(context=self) - - def plus(self, a): - """Plus corresponds to unary prefix plus in Python. - - The operation is evaluated using the same rules as add; the - operation plus(a) is calculated as add('0', a) where the '0' - has the same exponent as the operand. - - >>> ExtendedContext.plus(Decimal('1.3')) - Decimal('1.3') - >>> ExtendedContext.plus(Decimal('-1.3')) - Decimal('-1.3') - >>> ExtendedContext.plus(-1) - Decimal('-1') - """ - a = _convert_other(a, raiseit=True) - return a.__pos__(context=self) - - def power(self, a, b, modulo=None): - """Raises a to the power of b, to modulo if given. - - With two arguments, compute a**b. If a is negative then b - must be integral. The result will be inexact unless b is - integral and the result is finite and can be expressed exactly - in 'precision' digits. - - With three arguments, compute (a**b) % modulo. For the - three argument form, the following restrictions on the - arguments hold: - - - all three arguments must be integral - - b must be nonnegative - - at least one of a or b must be nonzero - - modulo must be nonzero and have at most 'precision' digits - - The result of pow(a, b, modulo) is identical to the result - that would be obtained by computing (a**b) % modulo with - unbounded precision, but is computed more efficiently. It is - always exact. - - >>> c = ExtendedContext.copy() - >>> c.Emin = -999 - >>> c.Emax = 999 - >>> c.power(Decimal('2'), Decimal('3')) - Decimal('8') - >>> c.power(Decimal('-2'), Decimal('3')) - Decimal('-8') - >>> c.power(Decimal('2'), Decimal('-3')) - Decimal('0.125') - >>> c.power(Decimal('1.7'), Decimal('8')) - Decimal('69.7575744') - >>> c.power(Decimal('10'), Decimal('0.301029996')) - Decimal('2.00000000') - >>> c.power(Decimal('Infinity'), Decimal('-1')) - Decimal('0') - >>> c.power(Decimal('Infinity'), Decimal('0')) - Decimal('1') - >>> c.power(Decimal('Infinity'), Decimal('1')) - Decimal('Infinity') - >>> c.power(Decimal('-Infinity'), Decimal('-1')) - Decimal('-0') - >>> c.power(Decimal('-Infinity'), Decimal('0')) - Decimal('1') - >>> c.power(Decimal('-Infinity'), Decimal('1')) - Decimal('-Infinity') - >>> c.power(Decimal('-Infinity'), Decimal('2')) - Decimal('Infinity') - >>> c.power(Decimal('0'), Decimal('0')) - Decimal('NaN') - - >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) - Decimal('11') - >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) - Decimal('-11') - >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) - Decimal('1') - >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) - Decimal('11') - >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) - Decimal('11729830') - >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) - Decimal('-0') - >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) - Decimal('1') - >>> ExtendedContext.power(7, 7) - Decimal('823543') - >>> ExtendedContext.power(Decimal(7), 7) - Decimal('823543') - >>> ExtendedContext.power(7, Decimal(7), 2) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - r = a.__pow__(b, modulo, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def quantize(self, a, b): - """Returns a value equal to 'a' (rounded), having the exponent of 'b'. - - The coefficient of the result is derived from that of the left-hand - operand. It may be rounded using the current rounding setting (if the - exponent is being increased), multiplied by a positive power of ten (if - the exponent is being decreased), or is unchanged (if the exponent is - already equal to that of the right-hand operand). - - Unlike other operations, if the length of the coefficient after the - quantize operation would be greater than precision then an Invalid - operation condition is raised. This guarantees that, unless there is - an error condition, the exponent of the result of a quantize is always - equal to that of the right-hand operand. - - Also unlike other operations, quantize will never raise Underflow, even - if the result is subnormal and inexact. - - >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) - Decimal('2.170') - >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) - Decimal('2.17') - >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) - Decimal('2.2') - >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) - Decimal('2') - >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) - Decimal('0E+1') - >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) - Decimal('-Infinity') - >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) - Decimal('NaN') - >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) - Decimal('-0') - >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) - Decimal('-0E+5') - >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) - Decimal('NaN') - >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) - Decimal('NaN') - >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) - Decimal('217.0') - >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) - Decimal('217') - >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) - Decimal('2.2E+2') - >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) - Decimal('2E+2') - >>> ExtendedContext.quantize(1, 2) - Decimal('1') - >>> ExtendedContext.quantize(Decimal(1), 2) - Decimal('1') - >>> ExtendedContext.quantize(1, Decimal(2)) - Decimal('1') - """ - a = _convert_other(a, raiseit=True) - return a.quantize(b, context=self) - - def radix(self): - """Just returns 10, as this is Decimal, :) - - >>> ExtendedContext.radix() - Decimal('10') - """ - return Decimal(10) - - def remainder(self, a, b): - """Returns the remainder from integer division. - - The result is the residue of the dividend after the operation of - calculating integer division as described for divide-integer, rounded - to precision digits if necessary. The sign of the result, if - non-zero, is the same as that of the original dividend. - - This operation will fail under the same conditions as integer division - (that is, if integer division on the same two operands would fail, the - remainder cannot be calculated). - - >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) - Decimal('2.1') - >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) - Decimal('1') - >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) - Decimal('-1') - >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) - Decimal('0.2') - >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) - Decimal('0.1') - >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) - Decimal('1.0') - >>> ExtendedContext.remainder(22, 6) - Decimal('4') - >>> ExtendedContext.remainder(Decimal(22), 6) - Decimal('4') - >>> ExtendedContext.remainder(22, Decimal(6)) - Decimal('4') - """ - a = _convert_other(a, raiseit=True) - r = a.__mod__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def remainder_near(self, a, b): - """Returns to be "a - b * n", where n is the integer nearest the exact - value of "x / b" (if two integers are equally near then the even one - is chosen). If the result is equal to 0 then its sign will be the - sign of a. - - This operation will fail under the same conditions as integer division - (that is, if integer division on the same two operands would fail, the - remainder cannot be calculated). - - >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) - Decimal('-0.9') - >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) - Decimal('-2') - >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) - Decimal('1') - >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) - Decimal('-1') - >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) - Decimal('0.2') - >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) - Decimal('0.1') - >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) - Decimal('-0.3') - >>> ExtendedContext.remainder_near(3, 11) - Decimal('3') - >>> ExtendedContext.remainder_near(Decimal(3), 11) - Decimal('3') - >>> ExtendedContext.remainder_near(3, Decimal(11)) - Decimal('3') - """ - a = _convert_other(a, raiseit=True) - return a.remainder_near(b, context=self) - - def rotate(self, a, b): - """Returns a rotated copy of a, b times. - - The coefficient of the result is a rotated copy of the digits in - the coefficient of the first operand. The number of places of - rotation is taken from the absolute value of the second operand, - with the rotation being to the left if the second operand is - positive or to the right otherwise. - - >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) - Decimal('400000003') - >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) - Decimal('12') - >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) - Decimal('891234567') - >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) - Decimal('123456789') - >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) - Decimal('345678912') - >>> ExtendedContext.rotate(1333333, 1) - Decimal('13333330') - >>> ExtendedContext.rotate(Decimal(1333333), 1) - Decimal('13333330') - >>> ExtendedContext.rotate(1333333, Decimal(1)) - Decimal('13333330') - """ - a = _convert_other(a, raiseit=True) - return a.rotate(b, context=self) - - def same_quantum(self, a, b): - """Returns True if the two operands have the same exponent. - - The result is never affected by either the sign or the coefficient of - either operand. - - >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) - False - >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) - True - >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) - False - >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) - True - >>> ExtendedContext.same_quantum(10000, -1) - True - >>> ExtendedContext.same_quantum(Decimal(10000), -1) - True - >>> ExtendedContext.same_quantum(10000, Decimal(-1)) - True - """ - a = _convert_other(a, raiseit=True) - return a.same_quantum(b) - - def scaleb (self, a, b): - """Returns the first operand after adding the second value its exp. - - >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) - Decimal('0.0750') - >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) - Decimal('7.50') - >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) - Decimal('7.50E+3') - >>> ExtendedContext.scaleb(1, 4) - Decimal('1E+4') - >>> ExtendedContext.scaleb(Decimal(1), 4) - Decimal('1E+4') - >>> ExtendedContext.scaleb(1, Decimal(4)) - Decimal('1E+4') - """ - a = _convert_other(a, raiseit=True) - return a.scaleb(b, context=self) - - def shift(self, a, b): - """Returns a shifted copy of a, b times. - - The coefficient of the result is a shifted copy of the digits - in the coefficient of the first operand. The number of places - to shift is taken from the absolute value of the second operand, - with the shift being to the left if the second operand is - positive or to the right otherwise. Digits shifted into the - coefficient are zeros. - - >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) - Decimal('400000000') - >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) - Decimal('0') - >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) - Decimal('1234567') - >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) - Decimal('123456789') - >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) - Decimal('345678900') - >>> ExtendedContext.shift(88888888, 2) - Decimal('888888800') - >>> ExtendedContext.shift(Decimal(88888888), 2) - Decimal('888888800') - >>> ExtendedContext.shift(88888888, Decimal(2)) - Decimal('888888800') - """ - a = _convert_other(a, raiseit=True) - return a.shift(b, context=self) - - def sqrt(self, a): - """Square root of a non-negative number to context precision. - - If the result must be inexact, it is rounded using the round-half-even - algorithm. - - >>> ExtendedContext.sqrt(Decimal('0')) - Decimal('0') - >>> ExtendedContext.sqrt(Decimal('-0')) - Decimal('-0') - >>> ExtendedContext.sqrt(Decimal('0.39')) - Decimal('0.624499800') - >>> ExtendedContext.sqrt(Decimal('100')) - Decimal('10') - >>> ExtendedContext.sqrt(Decimal('1')) - Decimal('1') - >>> ExtendedContext.sqrt(Decimal('1.0')) - Decimal('1.0') - >>> ExtendedContext.sqrt(Decimal('1.00')) - Decimal('1.0') - >>> ExtendedContext.sqrt(Decimal('7')) - Decimal('2.64575131') - >>> ExtendedContext.sqrt(Decimal('10')) - Decimal('3.16227766') - >>> ExtendedContext.sqrt(2) - Decimal('1.41421356') - >>> ExtendedContext.prec - 9 - """ - a = _convert_other(a, raiseit=True) - return a.sqrt(context=self) - - def subtract(self, a, b): - """Return the difference between the two operands. - - >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) - Decimal('0.23') - >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) - Decimal('0.00') - >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) - Decimal('-0.77') - >>> ExtendedContext.subtract(8, 5) - Decimal('3') - >>> ExtendedContext.subtract(Decimal(8), 5) - Decimal('3') - >>> ExtendedContext.subtract(8, Decimal(5)) - Decimal('3') - """ - a = _convert_other(a, raiseit=True) - r = a.__sub__(b, context=self) - if r is NotImplemented: - raise TypeError("Unable to convert %s to Decimal" % b) - else: - return r - - def to_eng_string(self, a): - """Converts a number to a string, using scientific notation. - - The operation is not affected by the context. - """ - a = _convert_other(a, raiseit=True) - return a.to_eng_string(context=self) - - def to_sci_string(self, a): - """Converts a number to a string, using scientific notation. - - The operation is not affected by the context. - """ - a = _convert_other(a, raiseit=True) - return a.__str__(context=self) - - def to_integral_exact(self, a): - """Rounds to an integer. - - When the operand has a negative exponent, the result is the same - as using the quantize() operation using the given operand as the - left-hand-operand, 1E+0 as the right-hand-operand, and the precision - of the operand as the precision setting; Inexact and Rounded flags - are allowed in this operation. The rounding mode is taken from the - context. - - >>> ExtendedContext.to_integral_exact(Decimal('2.1')) - Decimal('2') - >>> ExtendedContext.to_integral_exact(Decimal('100')) - Decimal('100') - >>> ExtendedContext.to_integral_exact(Decimal('100.0')) - Decimal('100') - >>> ExtendedContext.to_integral_exact(Decimal('101.5')) - Decimal('102') - >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) - Decimal('-102') - >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) - Decimal('1.0E+6') - >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) - Decimal('7.89E+77') - >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) - Decimal('-Infinity') - """ - a = _convert_other(a, raiseit=True) - return a.to_integral_exact(context=self) - - def to_integral_value(self, a): - """Rounds to an integer. - - When the operand has a negative exponent, the result is the same - as using the quantize() operation using the given operand as the - left-hand-operand, 1E+0 as the right-hand-operand, and the precision - of the operand as the precision setting, except that no flags will - be set. The rounding mode is taken from the context. - - >>> ExtendedContext.to_integral_value(Decimal('2.1')) - Decimal('2') - >>> ExtendedContext.to_integral_value(Decimal('100')) - Decimal('100') - >>> ExtendedContext.to_integral_value(Decimal('100.0')) - Decimal('100') - >>> ExtendedContext.to_integral_value(Decimal('101.5')) - Decimal('102') - >>> ExtendedContext.to_integral_value(Decimal('-101.5')) - Decimal('-102') - >>> ExtendedContext.to_integral_value(Decimal('10E+5')) - Decimal('1.0E+6') - >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) - Decimal('7.89E+77') - >>> ExtendedContext.to_integral_value(Decimal('-Inf')) - Decimal('-Infinity') - """ - a = _convert_other(a, raiseit=True) - return a.to_integral_value(context=self) - - # the method name changed, but we provide also the old one, for compatibility - to_integral = to_integral_value - -class _WorkRep(object): - __slots__ = ('sign','int','exp') - # sign: 0 or 1 - # int: int - # exp: None, int, or string - - def __init__(self, value=None): - if value is None: - self.sign = None - self.int = 0 - self.exp = None - elif isinstance(value, Decimal): - self.sign = value._sign - self.int = int(value._int) - self.exp = value._exp - else: - # assert isinstance(value, tuple) - self.sign = value[0] - self.int = value[1] - self.exp = value[2] - - def __repr__(self): - return "(%r, %r, %r)" % (self.sign, self.int, self.exp) - - __str__ = __repr__ - - - -def _normalize(op1, op2, prec = 0): - """Normalizes op1, op2 to have the same exp and length of coefficient. - - Done during addition. - """ - if op1.exp < op2.exp: - tmp = op2 - other = op1 - else: - tmp = op1 - other = op2 - - # Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1). - # Then adding 10**exp to tmp has the same effect (after rounding) - # as adding any positive quantity smaller than 10**exp; similarly - # for subtraction. So if other is smaller than 10**exp we replace - # it with 10**exp. This avoids tmp.exp - other.exp getting too large. - tmp_len = len(str(tmp.int)) - other_len = len(str(other.int)) - exp = tmp.exp + min(-1, tmp_len - prec - 2) - if other_len + other.exp - 1 < exp: - other.int = 1 - other.exp = exp - - tmp.int *= 10 ** (tmp.exp - other.exp) - tmp.exp = other.exp - return op1, op2 - -##### Integer arithmetic functions used by ln, log10, exp and __pow__ ##### - -_nbits = int.bit_length - -def _decimal_lshift_exact(n, e): - """ Given integers n and e, return n * 10**e if it's an integer, else None. - - The computation is designed to avoid computing large powers of 10 - unnecessarily. - - >>> _decimal_lshift_exact(3, 4) - 30000 - >>> _decimal_lshift_exact(300, -999999999) # returns None - - """ - if n == 0: - return 0 - elif e >= 0: - return n * 10**e - else: - # val_n = largest power of 10 dividing n. - str_n = str(abs(n)) - val_n = len(str_n) - len(str_n.rstrip('0')) - return None if val_n < -e else n // 10**-e - -def _sqrt_nearest(n, a): - """Closest integer to the square root of the positive integer n. a is - an initial approximation to the square root. Any positive integer - will do for a, but the closer a is to the square root of n the - faster convergence will be. - - """ - if n <= 0 or a <= 0: - raise ValueError("Both arguments to _sqrt_nearest should be positive.") - - b=0 - while a != b: - b, a = a, a--n//a>>1 - return a - -def _rshift_nearest(x, shift): - """Given an integer x and a nonnegative integer shift, return closest - integer to x / 2**shift; use round-to-even in case of a tie. - - """ - b, q = 1 << shift, x >> shift - return q + (2*(x & (b-1)) + (q&1) > b) - -def _div_nearest(a, b): - """Closest integer to a/b, a and b positive integers; rounds to even - in the case of a tie. - - """ - q, r = divmod(a, b) - return q + (2*r + (q&1) > b) - -def _ilog(x, M, L = 8): - """Integer approximation to M*log(x/M), with absolute error boundable - in terms only of x/M. - - Given positive integers x and M, return an integer approximation to - M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference - between the approximation and the exact result is at most 22. For - L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In - both cases these are upper bounds on the error; it will usually be - much smaller.""" - - # The basic algorithm is the following: let log1p be the function - # log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use - # the reduction - # - # log1p(y) = 2*log1p(y/(1+sqrt(1+y))) - # - # repeatedly until the argument to log1p is small (< 2**-L in - # absolute value). For small y we can use the Taylor series - # expansion - # - # log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T - # - # truncating at T such that y**T is small enough. The whole - # computation is carried out in a form of fixed-point arithmetic, - # with a real number z being represented by an integer - # approximation to z*M. To avoid loss of precision, the y below - # is actually an integer approximation to 2**R*y*M, where R is the - # number of reductions performed so far. - - y = x-M - # argument reduction; R = number of reductions performed - R = 0 - while (R <= L and abs(y) << L-R >= M or - R > L and abs(y) >> R-L >= M): - y = _div_nearest((M*y) << 1, - M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M)) - R += 1 - - # Taylor series with T terms - T = -int(-10*len(str(M))//(3*L)) - yshift = _rshift_nearest(y, R) - w = _div_nearest(M, T) - for k in range(T-1, 0, -1): - w = _div_nearest(M, k) - _div_nearest(yshift*w, M) - - return _div_nearest(w*y, M) - -def _dlog10(c, e, p): - """Given integers c, e and p with c > 0, p >= 0, compute an integer - approximation to 10**p * log10(c*10**e), with an absolute error of - at most 1. Assumes that c*10**e is not exactly 1.""" - - # increase precision by 2; compensate for this by dividing - # final result by 100 - p += 2 - - # write c*10**e as d*10**f with either: - # f >= 0 and 1 <= d <= 10, or - # f <= 0 and 0.1 <= d <= 1. - # Thus for c*10**e close to 1, f = 0 - l = len(str(c)) - f = e+l - (e+l >= 1) - - if p > 0: - M = 10**p - k = e+p-f - if k >= 0: - c *= 10**k - else: - c = _div_nearest(c, 10**-k) - - log_d = _ilog(c, M) # error < 5 + 22 = 27 - log_10 = _log10_digits(p) # error < 1 - log_d = _div_nearest(log_d*M, log_10) - log_tenpower = f*M # exact - else: - log_d = 0 # error < 2.31 - log_tenpower = _div_nearest(f, 10**-p) # error < 0.5 - - return _div_nearest(log_tenpower+log_d, 100) - -def _dlog(c, e, p): - """Given integers c, e and p with c > 0, compute an integer - approximation to 10**p * log(c*10**e), with an absolute error of - at most 1. Assumes that c*10**e is not exactly 1.""" - - # Increase precision by 2. The precision increase is compensated - # for at the end with a division by 100. - p += 2 - - # rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10, - # or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e) - # as 10**p * log(d) + 10**p*f * log(10). - l = len(str(c)) - f = e+l - (e+l >= 1) - - # compute approximation to 10**p*log(d), with error < 27 - if p > 0: - k = e+p-f - if k >= 0: - c *= 10**k - else: - c = _div_nearest(c, 10**-k) # error of <= 0.5 in c - - # _ilog magnifies existing error in c by a factor of at most 10 - log_d = _ilog(c, 10**p) # error < 5 + 22 = 27 - else: - # p <= 0: just approximate the whole thing by 0; error < 2.31 - log_d = 0 - - # compute approximation to f*10**p*log(10), with error < 11. - if f: - extra = len(str(abs(f)))-1 - if p + extra >= 0: - # error in f * _log10_digits(p+extra) < |f| * 1 = |f| - # after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11 - f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra) - else: - f_log_ten = 0 - else: - f_log_ten = 0 - - # error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1 - return _div_nearest(f_log_ten + log_d, 100) - -class _Log10Memoize(object): - """Class to compute, store, and allow retrieval of, digits of the - constant log(10) = 2.302585.... This constant is needed by - Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.""" - def __init__(self): - self.digits = "23025850929940456840179914546843642076011014886" - - def getdigits(self, p): - """Given an integer p >= 0, return floor(10**p)*log(10). - - For example, self.getdigits(3) returns 2302. - """ - # digits are stored as a string, for quick conversion to - # integer in the case that we've already computed enough - # digits; the stored digits should always be correct - # (truncated, not rounded to nearest). - if p < 0: - raise ValueError("p should be nonnegative") - - if p >= len(self.digits): - # compute p+3, p+6, p+9, ... digits; continue until at - # least one of the extra digits is nonzero - extra = 3 - while True: - # compute p+extra digits, correct to within 1ulp - M = 10**(p+extra+2) - digits = str(_div_nearest(_ilog(10*M, M), 100)) - if digits[-extra:] != '0'*extra: - break - extra += 3 - # keep all reliable digits so far; remove trailing zeros - # and next nonzero digit - self.digits = digits.rstrip('0')[:-1] - return int(self.digits[:p+1]) - -_log10_digits = _Log10Memoize().getdigits - -def _iexp(x, M, L=8): - """Given integers x and M, M > 0, such that x/M is small in absolute - value, compute an integer approximation to M*exp(x/M). For 0 <= - x/M <= 2.4, the absolute error in the result is bounded by 60 (and - is usually much smaller).""" - - # Algorithm: to compute exp(z) for a real number z, first divide z - # by a suitable power R of 2 so that |z/2**R| < 2**-L. Then - # compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor - # series - # - # expm1(x) = x + x**2/2! + x**3/3! + ... - # - # Now use the identity - # - # expm1(2x) = expm1(x)*(expm1(x)+2) - # - # R times to compute the sequence expm1(z/2**R), - # expm1(z/2**(R-1)), ... , exp(z/2), exp(z). - - # Find R such that x/2**R/M <= 2**-L - R = _nbits((x< M - T = -int(-10*len(str(M))//(3*L)) - y = _div_nearest(x, T) - Mshift = M<= 0: - cshift = c*10**shift - else: - cshift = c//10**-shift - quot, rem = divmod(cshift, _log10_digits(q)) - - # reduce remainder back to original precision - rem = _div_nearest(rem, 10**extra) - - # error in result of _iexp < 120; error after division < 0.62 - return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3 - -def _dpower(xc, xe, yc, ye, p): - """Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and - y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: - - 10**(p-1) <= c <= 10**p, and - (c-1)*10**e < x**y < (c+1)*10**e - - in other words, c*10**e is an approximation to x**y with p digits - of precision, and with an error in c of at most 1. (This is - almost, but not quite, the same as the error being < 1ulp: when c - == 10**(p-1) we can only guarantee error < 10ulp.) - - We assume that: x is positive and not equal to 1, and y is nonzero. - """ - - # Find b such that 10**(b-1) <= |y| <= 10**b - b = len(str(abs(yc))) + ye - - # log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point - lxc = _dlog(xc, xe, p+b+1) - - # compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1) - shift = ye-b - if shift >= 0: - pc = lxc*yc*10**shift - else: - pc = _div_nearest(lxc*yc, 10**-shift) - - if pc == 0: - # we prefer a result that isn't exactly 1; this makes it - # easier to compute a correctly rounded result in __pow__ - if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1: - coeff, exp = 10**(p-1)+1, 1-p - else: - coeff, exp = 10**p-1, -p - else: - coeff, exp = _dexp(pc, -(p+1), p+1) - coeff = _div_nearest(coeff, 10) - exp += 1 - - return coeff, exp - -def _log10_lb(c, correction = { - '1': 100, '2': 70, '3': 53, '4': 40, '5': 31, - '6': 23, '7': 16, '8': 10, '9': 5}): - """Compute a lower bound for 100*log10(c) for a positive integer c.""" - if c <= 0: - raise ValueError("The argument to _log10_lb should be nonnegative.") - str_c = str(c) - return 100*len(str_c) - correction[str_c[0]] - -##### Helper Functions #################################################### - -def _convert_other(other, raiseit=False, allow_float=False): - """Convert other to Decimal. - - Verifies that it's ok to use in an implicit construction. - If allow_float is true, allow conversion from float; this - is used in the comparison methods (__eq__ and friends). - - """ - if isinstance(other, Decimal): - return other - if isinstance(other, int): - return Decimal(other) - if allow_float and isinstance(other, float): - return Decimal.from_float(other) - - if raiseit: - raise TypeError("Unable to convert %s to Decimal" % other) - return NotImplemented - -def _convert_for_comparison(self, other, equality_op=False): - """Given a Decimal instance self and a Python object other, return - a pair (s, o) of Decimal instances such that "s op o" is - equivalent to "self op other" for any of the 6 comparison - operators "op". - - """ - if isinstance(other, Decimal): - return self, other - - # Comparison with a Rational instance (also includes integers): - # self op n/d <=> self*d op n (for n and d integers, d positive). - # A NaN or infinity can be left unchanged without affecting the - # comparison result. - if isinstance(other, _numbers.Rational): - if not self._is_special: - self = _dec_from_triple(self._sign, - str(int(self._int) * other.denominator), - self._exp) - return self, Decimal(other.numerator) - - # Comparisons with float and complex types. == and != comparisons - # with complex numbers should succeed, returning either True or False - # as appropriate. Other comparisons return NotImplemented. - if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0: - other = other.real - if isinstance(other, float): - context = getcontext() - if equality_op: - context.flags[FloatOperation] = 1 - else: - context._raise_error(FloatOperation, - "strict semantics for mixing floats and Decimals are enabled") - return self, Decimal.from_float(other) - return NotImplemented, NotImplemented - - -##### Setup Specific Contexts ############################################ - -# The default context prototype used by Context() -# Is mutable, so that new contexts can have different default values - -DefaultContext = Context( - prec=28, rounding=ROUND_HALF_EVEN, - traps=[DivisionByZero, Overflow, InvalidOperation], - flags=[], - Emax=999999, - Emin=-999999, - capitals=1, - clamp=0 -) - -# Pre-made alternate contexts offered by the specification -# Don't change these; the user should be able to select these -# contexts and be able to reproduce results from other implementations -# of the spec. - -BasicContext = Context( - prec=9, rounding=ROUND_HALF_UP, - traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow], - flags=[], -) - -ExtendedContext = Context( - prec=9, rounding=ROUND_HALF_EVEN, - traps=[], - flags=[], -) - - -##### crud for parsing strings ############################################# -# -# Regular expression used for parsing numeric strings. Additional -# comments: -# -# 1. Uncomment the two '\s*' lines to allow leading and/or trailing -# whitespace. But note that the specification disallows whitespace in -# a numeric string. -# -# 2. For finite numbers (not infinities and NaNs) the body of the -# number between the optional sign and the optional exponent must have -# at least one decimal digit, possibly after the decimal point. The -# lookahead expression '(?=\d|\.\d)' checks this. - -import re -_parser = re.compile(r""" # A numeric string consists of: -# \s* - (?P[-+])? # an optional sign, followed by either... - ( - (?=\d|\.\d) # ...a number (with at least one digit) - (?P\d*) # having a (possibly empty) integer part - (\.(?P\d*))? # followed by an optional fractional part - (E(?P[-+]?\d+))? # followed by an optional exponent, or... - | - Inf(inity)? # ...an infinity, or... - | - (?Ps)? # ...an (optionally signaling) - NaN # NaN - (?P\d*) # with (possibly empty) diagnostic info. - ) -# \s* - \Z -""", re.VERBOSE | re.IGNORECASE).match - -_all_zeros = re.compile('0*$').match -_exact_half = re.compile('50*$').match - -##### PEP3101 support functions ############################################## -# The functions in this section have little to do with the Decimal -# class, and could potentially be reused or adapted for other pure -# Python numeric classes that want to implement __format__ -# -# A format specifier for Decimal looks like: -# -# [[fill]align][sign][#][0][minimumwidth][,][.precision][type] - -_parse_format_specifier_regex = re.compile(r"""\A -(?: - (?P.)? - (?P[<>=^]) -)? -(?P[-+ ])? -(?P\#)? -(?P0)? -(?P(?!0)\d+)? -(?P,)? -(?:\.(?P0|(?!0)\d+))? -(?P[eEfFgGn%])? -\Z -""", re.VERBOSE|re.DOTALL) - -del re - -# The locale module is only needed for the 'n' format specifier. The -# rest of the PEP 3101 code functions quite happily without it, so we -# don't care too much if locale isn't present. -try: - import locale as _locale -except ImportError: - pass - -def _parse_format_specifier(format_spec, _localeconv=None): - """Parse and validate a format specifier. - - Turns a standard numeric format specifier into a dict, with the - following entries: - - fill: fill character to pad field to minimum width - align: alignment type, either '<', '>', '=' or '^' - sign: either '+', '-' or ' ' - minimumwidth: nonnegative integer giving minimum width - zeropad: boolean, indicating whether to pad with zeros - thousands_sep: string to use as thousands separator, or '' - grouping: grouping for thousands separators, in format - used by localeconv - decimal_point: string to use for decimal point - precision: nonnegative integer giving precision, or None - type: one of the characters 'eEfFgG%', or None - - """ - m = _parse_format_specifier_regex.match(format_spec) - if m is None: - raise ValueError("Invalid format specifier: " + format_spec) - - # get the dictionary - format_dict = m.groupdict() - - # zeropad; defaults for fill and alignment. If zero padding - # is requested, the fill and align fields should be absent. - fill = format_dict['fill'] - align = format_dict['align'] - format_dict['zeropad'] = (format_dict['zeropad'] is not None) - if format_dict['zeropad']: - if fill is not None: - raise ValueError("Fill character conflicts with '0'" - " in format specifier: " + format_spec) - if align is not None: - raise ValueError("Alignment conflicts with '0' in " - "format specifier: " + format_spec) - format_dict['fill'] = fill or ' ' - # PEP 3101 originally specified that the default alignment should - # be left; it was later agreed that right-aligned makes more sense - # for numeric types. See http://bugs.python.org/issue6857. - format_dict['align'] = align or '>' - - # default sign handling: '-' for negative, '' for positive - if format_dict['sign'] is None: - format_dict['sign'] = '-' - - # minimumwidth defaults to 0; precision remains None if not given - format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0') - if format_dict['precision'] is not None: - format_dict['precision'] = int(format_dict['precision']) - - # if format type is 'g' or 'G' then a precision of 0 makes little - # sense; convert it to 1. Same if format type is unspecified. - if format_dict['precision'] == 0: - if format_dict['type'] is None or format_dict['type'] in 'gGn': - format_dict['precision'] = 1 - - # determine thousands separator, grouping, and decimal separator, and - # add appropriate entries to format_dict - if format_dict['type'] == 'n': - # apart from separators, 'n' behaves just like 'g' - format_dict['type'] = 'g' - if _localeconv is None: - _localeconv = _locale.localeconv() - if format_dict['thousands_sep'] is not None: - raise ValueError("Explicit thousands separator conflicts with " - "'n' type in format specifier: " + format_spec) - format_dict['thousands_sep'] = _localeconv['thousands_sep'] - format_dict['grouping'] = _localeconv['grouping'] - format_dict['decimal_point'] = _localeconv['decimal_point'] - else: - if format_dict['thousands_sep'] is None: - format_dict['thousands_sep'] = '' - format_dict['grouping'] = [3, 0] - format_dict['decimal_point'] = '.' - - return format_dict - -def _format_align(sign, body, spec): - """Given an unpadded, non-aligned numeric string 'body' and sign - string 'sign', add padding and alignment conforming to the given - format specifier dictionary 'spec' (as produced by - parse_format_specifier). - - """ - # how much extra space do we have to play with? - minimumwidth = spec['minimumwidth'] - fill = spec['fill'] - padding = fill*(minimumwidth - len(sign) - len(body)) - - align = spec['align'] - if align == '<': - result = sign + body + padding - elif align == '>': - result = padding + sign + body - elif align == '=': - result = sign + padding + body - elif align == '^': - half = len(padding)//2 - result = padding[:half] + sign + body + padding[half:] - else: - raise ValueError('Unrecognised alignment field') - - return result - -def _group_lengths(grouping): - """Convert a localeconv-style grouping into a (possibly infinite) - iterable of integers representing group lengths. - - """ - # The result from localeconv()['grouping'], and the input to this - # function, should be a list of integers in one of the - # following three forms: - # - # (1) an empty list, or - # (2) nonempty list of positive integers + [0] - # (3) list of positive integers + [locale.CHAR_MAX], or - - from itertools import chain, repeat - if not grouping: - return [] - elif grouping[-1] == 0 and len(grouping) >= 2: - return chain(grouping[:-1], repeat(grouping[-2])) - elif grouping[-1] == _locale.CHAR_MAX: - return grouping[:-1] - else: - raise ValueError('unrecognised format for grouping') - -def _insert_thousands_sep(digits, spec, min_width=1): - """Insert thousands separators into a digit string. - - spec is a dictionary whose keys should include 'thousands_sep' and - 'grouping'; typically it's the result of parsing the format - specifier using _parse_format_specifier. - - The min_width keyword argument gives the minimum length of the - result, which will be padded on the left with zeros if necessary. - - If necessary, the zero padding adds an extra '0' on the left to - avoid a leading thousands separator. For example, inserting - commas every three digits in '123456', with min_width=8, gives - '0,123,456', even though that has length 9. - - """ - - sep = spec['thousands_sep'] - grouping = spec['grouping'] - - groups = [] - for l in _group_lengths(grouping): - if l <= 0: - raise ValueError("group length should be positive") - # max(..., 1) forces at least 1 digit to the left of a separator - l = min(max(len(digits), min_width, 1), l) - groups.append('0'*(l - len(digits)) + digits[-l:]) - digits = digits[:-l] - min_width -= l - if not digits and min_width <= 0: - break - min_width -= len(sep) - else: - l = max(len(digits), min_width, 1) - groups.append('0'*(l - len(digits)) + digits[-l:]) - return sep.join(reversed(groups)) - -def _format_sign(is_negative, spec): - """Determine sign character.""" - - if is_negative: - return '-' - elif spec['sign'] in ' +': - return spec['sign'] - else: - return '' - -def _format_number(is_negative, intpart, fracpart, exp, spec): - """Format a number, given the following data: - - is_negative: true if the number is negative, else false - intpart: string of digits that must appear before the decimal point - fracpart: string of digits that must come after the point - exp: exponent, as an integer - spec: dictionary resulting from parsing the format specifier - - This function uses the information in spec to: - insert separators (decimal separator and thousands separators) - format the sign - format the exponent - add trailing '%' for the '%' type - zero-pad if necessary - fill and align if necessary - """ - - sign = _format_sign(is_negative, spec) - - if fracpart or spec['alt']: - fracpart = spec['decimal_point'] + fracpart - - if exp != 0 or spec['type'] in 'eE': - echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']] - fracpart += "{0}{1:+}".format(echar, exp) - if spec['type'] == '%': - fracpart += '%' - - if spec['zeropad']: - min_width = spec['minimumwidth'] - len(fracpart) - len(sign) - else: - min_width = 0 - intpart = _insert_thousands_sep(intpart, spec, min_width) - - return _format_align(sign, intpart+fracpart, spec) - - -##### Useful Constants (internal use only) ################################ - -# Reusable defaults -_Infinity = Decimal('Inf') -_NegativeInfinity = Decimal('-Inf') -_NaN = Decimal('NaN') -_Zero = Decimal(0) -_One = Decimal(1) -_NegativeOne = Decimal(-1) - -# _SignedInfinity[sign] is infinity w/ that sign -_SignedInfinity = (_Infinity, _NegativeInfinity) - -# Constants related to the hash implementation; hash(x) is based -# on the reduction of x modulo _PyHASH_MODULUS -_PyHASH_MODULUS = sys.hash_info.modulus -# hash values to use for positive and negative infinities, and nans -_PyHASH_INF = sys.hash_info.inf -_PyHASH_NAN = sys.hash_info.nan - -# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS -_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) -del sys - -try: - import _decimal -except ImportError: - pass -else: - s1 = set(dir()) - s2 = set(dir(_decimal)) - for name in s1 - s2: - del globals()[name] - del s1, s2, name - from _decimal import * - -if __name__ == '__main__': - import doctest, decimal - doctest.testmod(decimal) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -4173,9 +4173,7 @@ self.assertEqual(C.__version__, P.__version__) self.assertEqual(C.__libmpdec_version__, P.__libmpdec_version__) - x = dir(C) - y = [s for s in dir(P) if '__' in s or not s.startswith('_')] - self.assertEqual(set(x) - set(y), set()) + self.assertEqual(dir(C), dir(P)) def test_context_attributes(self): diff --git a/Modules/_decimal/tests/deccheck.py b/Modules/_decimal/tests/deccheck.py --- a/Modules/_decimal/tests/deccheck.py +++ b/Modules/_decimal/tests/deccheck.py @@ -36,6 +36,7 @@ from randdec import randfloat, all_unary, all_binary, all_ternary from randdec import unary_optarg, binary_optarg, ternary_optarg from formathelper import rand_format, rand_locale +from _pydecimal import _dec_from_triple C = import_fresh_module('decimal', fresh=['_decimal']) P = import_fresh_module('decimal', blocked=['_decimal']) @@ -370,7 +371,7 @@ return abs(a - b) def standard_ulp(self, dec, prec): - return P._dec_from_triple(0, '1', dec._exp+len(dec._int)-prec) + return _dec_from_triple(0, '1', dec._exp+len(dec._int)-prec) def rounding_direction(self, x, mode): """Determine the effective direction of the rounding when @@ -401,10 +402,10 @@ # Convert infinities to the largest representable number + 1. x = exact if exact.is_infinite(): - x = P._dec_from_triple(exact._sign, '10', context.p.Emax) + x = _dec_from_triple(exact._sign, '10', context.p.Emax) y = rounded if rounded.is_infinite(): - y = P._dec_from_triple(rounded._sign, '10', context.p.Emax) + y = _dec_from_triple(rounded._sign, '10', context.p.Emax) # err = (rounded - exact) / ulp(rounded) self.maxctx.prec = p * 2 -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 18:01:24 2014 From: python-checkins at python.org (stefan.krah) Date: Wed, 10 Sep 2014 18:01:24 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Whitespace=2E?= Message-ID: <3htSdc17t3z7LkC@mail.python.org> http://hg.python.org/cpython/rev/81c4225fa994 changeset: 92383:81c4225fa994 user: Stefan Krah date: Wed Sep 10 18:00:46 2014 +0200 summary: Whitespace. files: Lib/decimal.py | 3 --- 1 files changed, 0 insertions(+), 3 deletions(-) diff --git a/Lib/decimal.py b/Lib/decimal.py --- a/Lib/decimal.py +++ b/Lib/decimal.py @@ -9,6 +9,3 @@ from _pydecimal import __doc__ from _pydecimal import __version__ from _pydecimal import __libmpdec_version__ - - - -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 22:34:26 2014 From: python-checkins at python.org (larry.hastings) Date: Wed, 10 Sep 2014 22:34:26 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?peps=3A_Updated_dates_for_3=2E4=2E2_s?= =?utf-8?q?chedule=2E?= Message-ID: <3htZhf084Mz7Lk7@mail.python.org> http://hg.python.org/peps/rev/370cd63d1c88 changeset: 5551:370cd63d1c88 user: Larry Hastings date: Wed Sep 10 13:34:46 2014 -0700 summary: Updated dates for 3.4.2 schedule. files: pep-0429.txt | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pep-0429.txt b/pep-0429.txt --- a/pep-0429.txt +++ b/pep-0429.txt @@ -59,8 +59,8 @@ 3.4.2 schedule -------------- -- 3.4.2 candidate 1: September 15, 2014 -- 3.4.2 final: September 28, 2014 +- 3.4.2 candidate 1: September 22, 2014 +- 3.4.2 final: October 6, 2014 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Sep 10 22:59:49 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 10 Sep 2014 22:59:49 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzY5?= =?utf-8?q?=3A_Change_=22context_manager_protocol=22_to_=22context_managem?= =?utf-8?q?ent?= Message-ID: <3htbFx6vKCz7Ll3@mail.python.org> http://hg.python.org/cpython/rev/cc5b183a2ad4 changeset: 92384:cc5b183a2ad4 branch: 3.4 parent: 92380:2b3dbbd2bd92 user: Serhiy Storchaka date: Wed Sep 10 23:43:41 2014 +0300 summary: Issue #22369: Change "context manager protocol" to "context management protocol". files: Doc/library/asyncio-sync.rst | 4 ++-- Doc/library/mailbox.rst | 2 +- Doc/library/multiprocessing.rst | 8 ++++---- Doc/library/ossaudiodev.rst | 4 ++-- Doc/library/tarfile.rst | 2 +- Doc/library/threading.rst | 8 ++++---- Doc/whatsnew/2.7.rst | 2 +- Doc/whatsnew/3.1.rst | 2 +- Doc/whatsnew/3.2.rst | 6 +++--- Doc/whatsnew/3.3.rst | 4 ++-- Doc/whatsnew/3.4.rst | 6 +++--- Lib/asyncio/locks.py | 4 ++-- Lib/mailbox.py | 2 +- Misc/HISTORY | 18 +++++++++--------- Misc/NEWS | 6 +++--- Modules/ossaudiodev.c | 4 ++-- 16 files changed, 41 insertions(+), 41 deletions(-) diff --git a/Doc/library/asyncio-sync.rst b/Doc/library/asyncio-sync.rst --- a/Doc/library/asyncio-sync.rst +++ b/Doc/library/asyncio-sync.rst @@ -34,7 +34,7 @@ :meth:`acquire` is a coroutine and should be called with ``yield from``. - Locks also support the context manager protocol. ``(yield from lock)`` + Locks also support the context management protocol. ``(yield from lock)`` should be used as context manager expression. Usage:: @@ -229,7 +229,7 @@ counter can never go below zero; when :meth:`acquire` finds that it is zero, it blocks, waiting until some other thread calls :meth:`release`. - Semaphores also support the context manager protocol. + Semaphores also support the context management protocol. The optional argument gives the initial value for the internal counter; it defaults to ``1``. If the value given is less than ``0``, :exc:`ValueError` diff --git a/Doc/library/mailbox.rst b/Doc/library/mailbox.rst --- a/Doc/library/mailbox.rst +++ b/Doc/library/mailbox.rst @@ -202,7 +202,7 @@ .. versionchanged:: 3.2 The file object really is a binary file; previously it was incorrectly returned in text mode. Also, the file-like object now supports the - context manager protocol: you can use a :keyword:`with` statement to + context management protocol: you can use a :keyword:`with` statement to automatically close it. .. note:: diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1046,7 +1046,7 @@ using :meth:`Connection.send` and :meth:`Connection.recv`. .. versionadded:: 3.3 - Connection objects now support the context manager protocol -- see + Connection objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the connection object, and :meth:`~contextmanager.__exit__` calls :meth:`close`. @@ -1503,7 +1503,7 @@ The address used by the manager. .. versionchanged:: 3.3 - Manager objects support the context manager protocol -- see + Manager objects support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` starts the server process (if it has not already started) and then returns the manager object. :meth:`~contextmanager.__exit__` calls :meth:`shutdown`. @@ -1995,7 +1995,7 @@ :meth:`terminate` before using :meth:`join`. .. versionadded:: 3.3 - Pool objects now support the context manager protocol -- see + Pool objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the pool object, and :meth:`~contextmanager.__exit__` calls :meth:`terminate`. @@ -2168,7 +2168,7 @@ unavailable then it is ``None``. .. versionadded:: 3.3 - Listener objects now support the context manager protocol -- see + Listener objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the listener object, and :meth:`~contextmanager.__exit__` calls :meth:`close`. diff --git a/Doc/library/ossaudiodev.rst b/Doc/library/ossaudiodev.rst --- a/Doc/library/ossaudiodev.rst +++ b/Doc/library/ossaudiodev.rst @@ -165,7 +165,7 @@ data written is always equal to the amount of data supplied. .. versionchanged:: 3.2 - Audio device objects also support the context manager protocol, i.e. they can + Audio device objects also support the context management protocol, i.e. they can be used in a :keyword:`with` statement. @@ -357,7 +357,7 @@ Returns the file handle number of the open mixer device file. .. versionchanged:: 3.2 - Mixer objects also support the context manager protocol. + Mixer objects also support the context management protocol. The remaining methods are specific to audio mixing: diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst --- a/Doc/library/tarfile.rst +++ b/Doc/library/tarfile.rst @@ -238,7 +238,7 @@ :ref:`tar-examples` section for a use case. .. versionadded:: 3.2 - Added support for the context manager protocol. + Added support for the context management protocol. .. class:: TarFile(name=None, mode='r', fileobj=None, format=DEFAULT_FORMAT, tarinfo=TarInfo, dereference=False, ignore_zeros=False, encoding=ENCODING, errors='surrogateescape', pax_headers=None, debug=0, errorlevel=0) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -354,7 +354,7 @@ immediately. If an attempt is made to release an unlocked lock, a :exc:`RuntimeError` will be raised. -Locks also support the :ref:`context manager protocol `. +Locks also support the :ref:`context management protocol `. When more than one thread is blocked in :meth:`~Lock.acquire` waiting for the state to turn to unlocked, only one thread proceeds when a :meth:`~Lock.release` @@ -433,7 +433,7 @@ :meth:`~Lock.release` of the outermost pair) resets the lock to unlocked and allows another thread blocked in :meth:`~Lock.acquire` to proceed. -Reentrant locks also support the :ref:`context manager protocol `. +Reentrant locks also support the :ref:`context management protocol `. .. class:: RLock() @@ -501,7 +501,7 @@ several condition variables must share the same lock. The lock is part of the condition object: you don't have to track it separately. -A condition variable obeys the :ref:`context manager protocol `: +A condition variable obeys the :ref:`context management protocol `: using the ``with`` statement acquires the associated lock for the duration of the enclosed block. The :meth:`~Condition.acquire` and :meth:`~Condition.release` methods also call the corresponding methods of @@ -677,7 +677,7 @@ finds that it is zero, it blocks, waiting until some other thread calls :meth:`~Semaphore.release`. -Semaphores also support the :ref:`context manager protocol `. +Semaphores also support the :ref:`context management protocol `. .. class:: Semaphore(value=1) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -1612,7 +1612,7 @@ resulting archive. This is more powerful than the existing *exclude* argument, which has therefore been deprecated. (Added by Lars Gust?bel; :issue:`6856`.) - The :class:`~tarfile.TarFile` class also now supports the context manager protocol. + The :class:`~tarfile.TarFile` class also now supports the context management protocol. (Added by Lars Gust?bel; :issue:`7232`.) * The :meth:`~threading.Event.wait` method of the :class:`threading.Event` class diff --git a/Doc/whatsnew/3.1.rst b/Doc/whatsnew/3.1.rst --- a/Doc/whatsnew/3.1.rst +++ b/Doc/whatsnew/3.1.rst @@ -238,7 +238,7 @@ (Contributed by Guilherme Polo; :issue:`2983`.) * The :class:`gzip.GzipFile` and :class:`bz2.BZ2File` classes now support - the context manager protocol:: + the context management protocol:: >>> # Automatically close file after writing >>> with gzip.GzipFile(filename, "wb") as f: diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -522,7 +522,7 @@ (Proposed and implemented by Mark Dickinson; :issue:`9337`.) * :class:`memoryview` objects now have a :meth:`~memoryview.release()` method - and they also now support the context manager protocol. This allows timely + and they also now support the context management protocol. This allows timely release of any resources that were acquired when requesting a buffer from the original object. @@ -1315,7 +1315,7 @@ ftp --- -The :class:`ftplib.FTP` class now supports the context manager protocol to +The :class:`ftplib.FTP` class now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the FTP connection when done:: @@ -1595,7 +1595,7 @@ descriptor. The latter can then be reused for other purposes. (Added by Antoine Pitrou; :issue:`8524`.) -* :func:`socket.create_connection` now supports the context manager protocol +* :func:`socket.create_connection` now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the socket when done. (Contributed by Giampaolo Rodol?; :issue:`9794`.) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -1556,7 +1556,7 @@ nntplib ------- -The :class:`nntplib.NNTP` class now supports the context manager protocol to +The :class:`nntplib.NNTP` class now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the NNTP connection when done:: @@ -1861,7 +1861,7 @@ when creating the outgoing socket. (Contributed by Paulo Scardine in :issue:`11281`.) -:class:`~smtplib.SMTP` now supports the context manager protocol, allowing an +:class:`~smtplib.SMTP` now supports the context management protocol, allowing an ``SMTP`` instance to be used in a ``with`` statement. (Contributed by Giampaolo Rodol? in :issue:`11289`.) diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -608,7 +608,7 @@ The :meth:`~aifc.aifc.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`17818`.) -:func:`aifc.open` now supports the context manager protocol: when used in a +:func:`aifc.open` now supports the context management protocol: when used in a :keyword:`with` block, the :meth:`~aifc.aifc.close` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchacha in :issue:`16486`.) @@ -1521,7 +1521,7 @@ The :meth:`~sunau.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`18901`.) -:meth:`sunau.open` now supports the context manager protocol: when used in a +:meth:`sunau.open` now supports the context management protocol: when used in a :keyword:`with` block, the ``close`` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchaka in :issue:`18878`.) @@ -1723,7 +1723,7 @@ The :meth:`~wave.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`17487`.) -:meth:`wave.open` now supports the context manager protocol. (Contributed +:meth:`wave.open` now supports the context management protocol. (Contributed by Claudiu Popa in :issue:`17616`.) :mod:`wave` can now :ref:`write output to unseekable files diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py --- a/Lib/asyncio/locks.py +++ b/Lib/asyncio/locks.py @@ -63,7 +63,7 @@ acquire() is a coroutine and should be called with 'yield from'. - Locks also support the context manager protocol. '(yield from lock)' + Locks also support the context management protocol. '(yield from lock)' should be used as context manager expression. Usage: @@ -376,7 +376,7 @@ can never go below zero; when acquire() finds that it is zero, it blocks, waiting until some other thread calls release(). - Semaphores also support the context manager protocol. + Semaphores also support the context management protocol. The optional argument gives the initial value for the internal counter; it defaults to 1. If the value given is less than 0, diff --git a/Lib/mailbox.py b/Lib/mailbox.py --- a/Lib/mailbox.py +++ b/Lib/mailbox.py @@ -1980,7 +1980,7 @@ return result def __enter__(self): - """Context manager protocol support.""" + """Context management protocol support.""" return self def __exit__(self, *exc): diff --git a/Misc/HISTORY b/Misc/HISTORY --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -811,7 +811,7 @@ - Issue #14772: Return destination values from some shutil functions. -- Issue #15064: Implement context manager protocol for multiprocessing types +- Issue #15064: Implement context management protocol for multiprocessing types - Issue #15101: Make pool finalizer avoid joining current thread. @@ -3606,7 +3606,7 @@ ``mmap.PROT_READ|mmap.PROT_EXEC`` would segfault instead of raising a TypeError. Patch by Charles-Fran?ois Natali. -- Issue #9795: add context manager protocol support for nntplib.NNTP class. +- Issue #9795: add context management protocol support for nntplib.NNTP class. - Issue #11306: mailbox in certain cases adapts to an inability to open certain files in read-write mode. Previously it detected this by @@ -4804,7 +4804,7 @@ - Issue #1486713: HTMLParser now has an optional tolerant mode where it tries to guess at the correct parsing of invalid html. -- Issue #10554: Add context manager support to subprocess.Popen objects. +- Issue #10554: Add context management protocol support to subprocess.Popen objects. - Issue #8989: email.utils.make_msgid now has a domain parameter that can override the domain name used in the generated msgid. @@ -5166,7 +5166,7 @@ - Issue #10253: FileIO leaks a file descriptor when trying to open a file for append that isn't seekable. Patch by Brian Brazil. -- Support context manager protocol for file-like objects returned by mailbox +- Support context management protocol for file-like objects returned by mailbox ``get_file()`` methods. - Issue #10246: uu.encode didn't close file objects explicitly when filenames @@ -5300,7 +5300,7 @@ - Issue #10143: Update "os.pathconf" values. -- Issue #6518: Support context manager protcol for ossaudiodev types. +- Issue #6518: Support context management protocol for ossaudiodev types. - Issue #678250: Make mmap flush a noop on ACCESS_READ and ACCESS_COPY. @@ -5909,7 +5909,7 @@ - Issue #8105: Validate file descriptor passed to mmap.mmap on Windows. -- Issue #8046: Add context manager protocol support and .closed property to mmap +- Issue #8046: Add context management protocol support and .closed property to mmap objects. Library @@ -6916,7 +6916,7 @@ - The audioop module now supports sound fragments of length greater than 2**31 bytes on 64-bit machines, and is PY_SSIZE_T_CLEAN. -- Issue #4972: Add support for the context manager protocol to the ftplib.FTP +- Issue #4972: Add support for the context management protocol to the ftplib.FTP class. - Issue #8664: In py_compile, create __pycache__ when the compiled path is @@ -7204,7 +7204,7 @@ - Issue #7494: fix a crash in _lsprof (cProfile) after clearing the profiler, reset also the pointer to the current pointer context. -- Issue #7232: Add support for the context manager protocol to the TarFile +- Issue #7232: Add support for the context management protocol to the TarFile class. - Issue #7250: Fix info leak of os.environ across multi-run uses of @@ -9242,7 +9242,7 @@ - Issue #1696199: Add collections.Counter() for rapid and convenient counting. -- Issue #3860: GzipFile and BZ2File now support the context manager protocol. +- Issue #3860: GzipFile and BZ2File now support the context management protocol. - Issue #4867: Fixed a crash in ctypes when passing a string to a function without defining argtypes. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1958,7 +1958,7 @@ - Issue #19448: Add private API to SSL module to lookup ASN.1 objects by OID, NID, short name and long name. -- Issue #19282: dbm.open now supports the context manager protocol. (Inital +- Issue #19282: dbm.open now supports the context management protocol. (Inital patch by Claudiu Popa) - Issue #8311: Added support for writing any bytes-like objects in the aifc, @@ -2667,7 +2667,7 @@ - Issue #18830: inspect.getclasstree() no longer produces duplicate entries even when input list contains duplicates. -- Issue #18878: sunau.open now supports the context manager protocol. Based on +- Issue #18878: sunau.open now supports the context management protocol. Based on patches by Claudiu Popa and R. David Murray. - Issue #18909: Fix _tkinter.tkapp.interpaddr() on Windows 64-bit, don't cast @@ -3307,7 +3307,7 @@ initialization, so as to reclaim allocated resources (Python callbacks) at shutdown. Original patch by Robin Schreiber. -- Issue #17616: wave.open now supports the context manager protocol. +- Issue #17616: wave.open now supports the context management protocol. - Issue #18599: Fix name attribute of _sha1.sha1() object. It now returns 'SHA1' instead of 'SHA'. diff --git a/Modules/ossaudiodev.c b/Modules/ossaudiodev.c --- a/Modules/ossaudiodev.c +++ b/Modules/ossaudiodev.c @@ -894,7 +894,7 @@ /* Aliases for backwards compatibility */ { "flush", (PyCFunction)oss_sync, METH_VARARGS }, - /* Support for the context manager protocol */ + /* Support for the context management protocol */ { "__enter__", oss_self, METH_NOARGS }, { "__exit__", oss_exit, METH_VARARGS }, @@ -906,7 +906,7 @@ { "close", (PyCFunction)oss_mixer_close, METH_NOARGS }, { "fileno", (PyCFunction)oss_mixer_fileno, METH_NOARGS }, - /* Support for the context manager protocol */ + /* Support for the context management protocol */ { "__enter__", oss_self, METH_NOARGS }, { "__exit__", oss_exit, METH_VARARGS }, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 22:59:51 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 10 Sep 2014 22:59:51 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322369=3A_Change_=22context_manager_protocol=22_?= =?utf-8?q?to_=22context_management?= Message-ID: <3htbFz51hqz7Ll8@mail.python.org> http://hg.python.org/cpython/rev/ad568d52af4b changeset: 92385:ad568d52af4b parent: 92383:81c4225fa994 parent: 92384:cc5b183a2ad4 user: Serhiy Storchaka date: Wed Sep 10 23:45:42 2014 +0300 summary: Issue #22369: Change "context manager protocol" to "context management protocol". files: Doc/library/asyncio-sync.rst | 4 ++-- Doc/library/mailbox.rst | 2 +- Doc/library/multiprocessing.rst | 8 ++++---- Doc/library/ossaudiodev.rst | 4 ++-- Doc/library/tarfile.rst | 2 +- Doc/library/threading.rst | 8 ++++---- Doc/whatsnew/2.7.rst | 2 +- Doc/whatsnew/3.1.rst | 2 +- Doc/whatsnew/3.2.rst | 6 +++--- Doc/whatsnew/3.3.rst | 4 ++-- Doc/whatsnew/3.4.rst | 6 +++--- Lib/asyncio/locks.py | 4 ++-- Lib/mailbox.py | 2 +- Misc/HISTORY | 18 +++++++++--------- Misc/NEWS | 8 ++++---- Modules/ossaudiodev.c | 4 ++-- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/Doc/library/asyncio-sync.rst b/Doc/library/asyncio-sync.rst --- a/Doc/library/asyncio-sync.rst +++ b/Doc/library/asyncio-sync.rst @@ -34,7 +34,7 @@ :meth:`acquire` is a coroutine and should be called with ``yield from``. - Locks also support the context manager protocol. ``(yield from lock)`` + Locks also support the context management protocol. ``(yield from lock)`` should be used as context manager expression. Usage:: @@ -229,7 +229,7 @@ counter can never go below zero; when :meth:`acquire` finds that it is zero, it blocks, waiting until some other thread calls :meth:`release`. - Semaphores also support the context manager protocol. + Semaphores also support the context management protocol. The optional argument gives the initial value for the internal counter; it defaults to ``1``. If the value given is less than ``0``, :exc:`ValueError` diff --git a/Doc/library/mailbox.rst b/Doc/library/mailbox.rst --- a/Doc/library/mailbox.rst +++ b/Doc/library/mailbox.rst @@ -202,7 +202,7 @@ .. versionchanged:: 3.2 The file object really is a binary file; previously it was incorrectly returned in text mode. Also, the file-like object now supports the - context manager protocol: you can use a :keyword:`with` statement to + context management protocol: you can use a :keyword:`with` statement to automatically close it. .. note:: diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1046,7 +1046,7 @@ using :meth:`Connection.send` and :meth:`Connection.recv`. .. versionadded:: 3.3 - Connection objects now support the context manager protocol -- see + Connection objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the connection object, and :meth:`~contextmanager.__exit__` calls :meth:`close`. @@ -1506,7 +1506,7 @@ The address used by the manager. .. versionchanged:: 3.3 - Manager objects support the context manager protocol -- see + Manager objects support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` starts the server process (if it has not already started) and then returns the manager object. :meth:`~contextmanager.__exit__` calls :meth:`shutdown`. @@ -1998,7 +1998,7 @@ :meth:`terminate` before using :meth:`join`. .. versionadded:: 3.3 - Pool objects now support the context manager protocol -- see + Pool objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the pool object, and :meth:`~contextmanager.__exit__` calls :meth:`terminate`. @@ -2171,7 +2171,7 @@ unavailable then it is ``None``. .. versionadded:: 3.3 - Listener objects now support the context manager protocol -- see + Listener objects now support the context management protocol -- see :ref:`typecontextmanager`. :meth:`~contextmanager.__enter__` returns the listener object, and :meth:`~contextmanager.__exit__` calls :meth:`close`. diff --git a/Doc/library/ossaudiodev.rst b/Doc/library/ossaudiodev.rst --- a/Doc/library/ossaudiodev.rst +++ b/Doc/library/ossaudiodev.rst @@ -165,7 +165,7 @@ data written is always equal to the amount of data supplied. .. versionchanged:: 3.2 - Audio device objects also support the context manager protocol, i.e. they can + Audio device objects also support the context management protocol, i.e. they can be used in a :keyword:`with` statement. @@ -357,7 +357,7 @@ Returns the file handle number of the open mixer device file. .. versionchanged:: 3.2 - Mixer objects also support the context manager protocol. + Mixer objects also support the context management protocol. The remaining methods are specific to audio mixing: diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst --- a/Doc/library/tarfile.rst +++ b/Doc/library/tarfile.rst @@ -238,7 +238,7 @@ :ref:`tar-examples` section for a use case. .. versionadded:: 3.2 - Added support for the context manager protocol. + Added support for the context management protocol. .. class:: TarFile(name=None, mode='r', fileobj=None, format=DEFAULT_FORMAT, tarinfo=TarInfo, dereference=False, ignore_zeros=False, encoding=ENCODING, errors='surrogateescape', pax_headers=None, debug=0, errorlevel=0) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -354,7 +354,7 @@ immediately. If an attempt is made to release an unlocked lock, a :exc:`RuntimeError` will be raised. -Locks also support the :ref:`context manager protocol `. +Locks also support the :ref:`context management protocol `. When more than one thread is blocked in :meth:`~Lock.acquire` waiting for the state to turn to unlocked, only one thread proceeds when a :meth:`~Lock.release` @@ -433,7 +433,7 @@ :meth:`~Lock.release` of the outermost pair) resets the lock to unlocked and allows another thread blocked in :meth:`~Lock.acquire` to proceed. -Reentrant locks also support the :ref:`context manager protocol `. +Reentrant locks also support the :ref:`context management protocol `. .. class:: RLock() @@ -501,7 +501,7 @@ several condition variables must share the same lock. The lock is part of the condition object: you don't have to track it separately. -A condition variable obeys the :ref:`context manager protocol `: +A condition variable obeys the :ref:`context management protocol `: using the ``with`` statement acquires the associated lock for the duration of the enclosed block. The :meth:`~Condition.acquire` and :meth:`~Condition.release` methods also call the corresponding methods of @@ -677,7 +677,7 @@ finds that it is zero, it blocks, waiting until some other thread calls :meth:`~Semaphore.release`. -Semaphores also support the :ref:`context manager protocol `. +Semaphores also support the :ref:`context management protocol `. .. class:: Semaphore(value=1) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -1612,7 +1612,7 @@ resulting archive. This is more powerful than the existing *exclude* argument, which has therefore been deprecated. (Added by Lars Gust?bel; :issue:`6856`.) - The :class:`~tarfile.TarFile` class also now supports the context manager protocol. + The :class:`~tarfile.TarFile` class also now supports the context management protocol. (Added by Lars Gust?bel; :issue:`7232`.) * The :meth:`~threading.Event.wait` method of the :class:`threading.Event` class diff --git a/Doc/whatsnew/3.1.rst b/Doc/whatsnew/3.1.rst --- a/Doc/whatsnew/3.1.rst +++ b/Doc/whatsnew/3.1.rst @@ -238,7 +238,7 @@ (Contributed by Guilherme Polo; :issue:`2983`.) * The :class:`gzip.GzipFile` and :class:`bz2.BZ2File` classes now support - the context manager protocol:: + the context management protocol:: >>> # Automatically close file after writing >>> with gzip.GzipFile(filename, "wb") as f: diff --git a/Doc/whatsnew/3.2.rst b/Doc/whatsnew/3.2.rst --- a/Doc/whatsnew/3.2.rst +++ b/Doc/whatsnew/3.2.rst @@ -522,7 +522,7 @@ (Proposed and implemented by Mark Dickinson; :issue:`9337`.) * :class:`memoryview` objects now have a :meth:`~memoryview.release()` method - and they also now support the context manager protocol. This allows timely + and they also now support the context management protocol. This allows timely release of any resources that were acquired when requesting a buffer from the original object. @@ -1315,7 +1315,7 @@ ftp --- -The :class:`ftplib.FTP` class now supports the context manager protocol to +The :class:`ftplib.FTP` class now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the FTP connection when done:: @@ -1595,7 +1595,7 @@ descriptor. The latter can then be reused for other purposes. (Added by Antoine Pitrou; :issue:`8524`.) -* :func:`socket.create_connection` now supports the context manager protocol +* :func:`socket.create_connection` now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the socket when done. (Contributed by Giampaolo Rodol?; :issue:`9794`.) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -1556,7 +1556,7 @@ nntplib ------- -The :class:`nntplib.NNTP` class now supports the context manager protocol to +The :class:`nntplib.NNTP` class now supports the context management protocol to unconditionally consume :exc:`socket.error` exceptions and to close the NNTP connection when done:: @@ -1861,7 +1861,7 @@ when creating the outgoing socket. (Contributed by Paulo Scardine in :issue:`11281`.) -:class:`~smtplib.SMTP` now supports the context manager protocol, allowing an +:class:`~smtplib.SMTP` now supports the context management protocol, allowing an ``SMTP`` instance to be used in a ``with`` statement. (Contributed by Giampaolo Rodol? in :issue:`11289`.) diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst --- a/Doc/whatsnew/3.4.rst +++ b/Doc/whatsnew/3.4.rst @@ -608,7 +608,7 @@ The :meth:`~aifc.aifc.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`17818`.) -:func:`aifc.open` now supports the context manager protocol: when used in a +:func:`aifc.open` now supports the context management protocol: when used in a :keyword:`with` block, the :meth:`~aifc.aifc.close` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchacha in :issue:`16486`.) @@ -1521,7 +1521,7 @@ The :meth:`~sunau.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`18901`.) -:meth:`sunau.open` now supports the context manager protocol: when used in a +:meth:`sunau.open` now supports the context management protocol: when used in a :keyword:`with` block, the ``close`` method of the returned object will be called automatically at the end of the block. (Contributed by Serhiy Storchaka in :issue:`18878`.) @@ -1723,7 +1723,7 @@ The :meth:`~wave.getparams` method now returns a namedtuple rather than a plain tuple. (Contributed by Claudiu Popa in :issue:`17487`.) -:meth:`wave.open` now supports the context manager protocol. (Contributed +:meth:`wave.open` now supports the context management protocol. (Contributed by Claudiu Popa in :issue:`17616`.) :mod:`wave` can now :ref:`write output to unseekable files diff --git a/Lib/asyncio/locks.py b/Lib/asyncio/locks.py --- a/Lib/asyncio/locks.py +++ b/Lib/asyncio/locks.py @@ -63,7 +63,7 @@ acquire() is a coroutine and should be called with 'yield from'. - Locks also support the context manager protocol. '(yield from lock)' + Locks also support the context management protocol. '(yield from lock)' should be used as context manager expression. Usage: @@ -376,7 +376,7 @@ can never go below zero; when acquire() finds that it is zero, it blocks, waiting until some other thread calls release(). - Semaphores also support the context manager protocol. + Semaphores also support the context management protocol. The optional argument gives the initial value for the internal counter; it defaults to 1. If the value given is less than 0, diff --git a/Lib/mailbox.py b/Lib/mailbox.py --- a/Lib/mailbox.py +++ b/Lib/mailbox.py @@ -1980,7 +1980,7 @@ return result def __enter__(self): - """Context manager protocol support.""" + """Context management protocol support.""" return self def __exit__(self, *exc): diff --git a/Misc/HISTORY b/Misc/HISTORY --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -806,7 +806,7 @@ - Issue #14772: Return destination values from some shutil functions. -- Issue #15064: Implement context manager protocol for multiprocessing types +- Issue #15064: Implement context management protocol for multiprocessing types - Issue #15101: Make pool finalizer avoid joining current thread. @@ -3527,7 +3527,7 @@ ``mmap.PROT_READ|mmap.PROT_EXEC`` would segfault instead of raising a TypeError. Patch by Charles-Fran?ois Natali. -- Issue #9795: add context manager protocol support for nntplib.NNTP class. +- Issue #9795: add context management protocol support for nntplib.NNTP class. - Issue #11306: mailbox in certain cases adapts to an inability to open certain files in read-write mode. Previously it detected this by @@ -4837,7 +4837,7 @@ - Issue #1486713: HTMLParser now has an optional tolerant mode where it tries to guess at the correct parsing of invalid html. -- Issue #10554: Add context manager support to subprocess.Popen objects. +- Issue #10554: Add context management protocol support to subprocess.Popen objects. - Issue #8989: email.utils.make_msgid now has a domain parameter that can override the domain name used in the generated msgid. @@ -5199,7 +5199,7 @@ - Issue #10253: FileIO leaks a file descriptor when trying to open a file for append that isn't seekable. Patch by Brian Brazil. -- Support context manager protocol for file-like objects returned by mailbox +- Support context management protocol for file-like objects returned by mailbox ``get_file()`` methods. - Issue #10246: uu.encode didn't close file objects explicitly when filenames @@ -5333,7 +5333,7 @@ - Issue #10143: Update "os.pathconf" values. -- Issue #6518: Support context manager protcol for ossaudiodev types. +- Issue #6518: Support context management protocol for ossaudiodev types. - Issue #678250: Make mmap flush a noop on ACCESS_READ and ACCESS_COPY. @@ -5942,7 +5942,7 @@ - Issue #8105: Validate file descriptor passed to mmap.mmap on Windows. -- Issue #8046: Add context manager protocol support and .closed property to mmap +- Issue #8046: Add context management protocol support and .closed property to mmap objects. Library @@ -6949,7 +6949,7 @@ - The audioop module now supports sound fragments of length greater than 2**31 bytes on 64-bit machines, and is PY_SSIZE_T_CLEAN. -- Issue #4972: Add support for the context manager protocol to the ftplib.FTP +- Issue #4972: Add support for the context management protocol to the ftplib.FTP class. - Issue #8664: In py_compile, create __pycache__ when the compiled path is @@ -7237,7 +7237,7 @@ - Issue #7494: fix a crash in _lsprof (cProfile) after clearing the profiler, reset also the pointer to the current pointer context. -- Issue #7232: Add support for the context manager protocol to the TarFile +- Issue #7232: Add support for the context management protocol to the TarFile class. - Issue #7250: Fix info leak of os.environ across multi-run uses of @@ -9275,7 +9275,7 @@ - Issue #1696199: Add collections.Counter() for rapid and convenient counting. -- Issue #3860: GzipFile and BZ2File now support the context manager protocol. +- Issue #3860: GzipFile and BZ2File now support the context management protocol. - Issue #4867: Fixed a crash in ctypes when passing a string to a function without defining argtypes. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,7 +132,7 @@ Library ------- -- Issue #12410: imaplib.IMAP4 now supports the context manager protocol. +- Issue #12410: imaplib.IMAP4 now supports the context management protocol. Original patch by Tarek Ziad?. - Issue #16662: load_tests() is now unconditionally run when it is present in @@ -2286,7 +2286,7 @@ - Issue #19448: Add private API to SSL module to lookup ASN.1 objects by OID, NID, short name and long name. -- Issue #19282: dbm.open now supports the context manager protocol. (Inital +- Issue #19282: dbm.open now supports the context management protocol. (Inital patch by Claudiu Popa) - Issue #8311: Added support for writing any bytes-like objects in the aifc, @@ -2995,7 +2995,7 @@ - Issue #18830: inspect.getclasstree() no longer produces duplicate entries even when input list contains duplicates. -- Issue #18878: sunau.open now supports the context manager protocol. Based on +- Issue #18878: sunau.open now supports the context management protocol. Based on patches by Claudiu Popa and R. David Murray. - Issue #18909: Fix _tkinter.tkapp.interpaddr() on Windows 64-bit, don't cast @@ -3635,7 +3635,7 @@ initialization, so as to reclaim allocated resources (Python callbacks) at shutdown. Original patch by Robin Schreiber. -- Issue #17616: wave.open now supports the context manager protocol. +- Issue #17616: wave.open now supports the context management protocol. - Issue #18599: Fix name attribute of _sha1.sha1() object. It now returns 'SHA1' instead of 'SHA'. diff --git a/Modules/ossaudiodev.c b/Modules/ossaudiodev.c --- a/Modules/ossaudiodev.c +++ b/Modules/ossaudiodev.c @@ -894,7 +894,7 @@ /* Aliases for backwards compatibility */ { "flush", (PyCFunction)oss_sync, METH_VARARGS }, - /* Support for the context manager protocol */ + /* Support for the context management protocol */ { "__enter__", oss_self, METH_NOARGS }, { "__exit__", oss_exit, METH_VARARGS }, @@ -906,7 +906,7 @@ { "close", (PyCFunction)oss_mixer_close, METH_NOARGS }, { "fileno", (PyCFunction)oss_mixer_fileno, METH_NOARGS }, - /* Support for the context manager protocol */ + /* Support for the context management protocol */ { "__enter__", oss_self, METH_NOARGS }, { "__exit__", oss_exit, METH_VARARGS }, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 22:59:53 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 10 Sep 2014 22:59:53 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMzY5?= =?utf-8?q?=3A_Change_=22context_manager_protocol=22_to_=22context_managem?= =?utf-8?q?ent?= Message-ID: <3htbG106tvz7Lkk@mail.python.org> http://hg.python.org/cpython/rev/e62082a70b6b changeset: 92386:e62082a70b6b branch: 2.7 parent: 92372:7b0fdc1e917a user: Serhiy Storchaka date: Wed Sep 10 23:46:14 2014 +0300 summary: Issue #22369: Change "context manager protocol" to "context management protocol". files: Doc/library/tarfile.rst | 2 +- Doc/whatsnew/2.7.rst | 2 +- Misc/NEWS | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst --- a/Doc/library/tarfile.rst +++ b/Doc/library/tarfile.rst @@ -247,7 +247,7 @@ :ref:`tar-examples` section for a use case. .. versionadded:: 2.7 - Added support for the context manager protocol. + Added support for the context management protocol. .. class:: TarFile(name=None, mode='r', fileobj=None, format=DEFAULT_FORMAT, tarinfo=TarInfo, dereference=False, ignore_zeros=False, encoding=ENCODING, errors=None, pax_headers=None, debug=0, errorlevel=0) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -1612,7 +1612,7 @@ resulting archive. This is more powerful than the existing *exclude* argument, which has therefore been deprecated. (Added by Lars Gust?bel; :issue:`6856`.) - The :class:`~tarfile.TarFile` class also now supports the context manager protocol. + The :class:`~tarfile.TarFile` class also now supports the context management protocol. (Added by Lars Gust?bel; :issue:`7232`.) * The :meth:`~threading.Event.wait` method of the :class:`threading.Event` class diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5120,7 +5120,7 @@ - Issue #7494: Fix a crash in ``_lsprof`` (cProfile) after clearing the profiler, reset also the pointer to the current pointer context. -- Issue #7232: Add support for the context manager protocol to the +- Issue #7232: Add support for the context management protocol to the ``tarfile.TarFile`` class. - Issue #7250: Fix info leak of os.environ across multi-run uses of @@ -6870,7 +6870,7 @@ - Issue #1696199: Add collections.Counter() for rapid and convenient counting. -- Issue #3860: GzipFile and BZ2File now support the context manager protocol. +- Issue #3860: GzipFile and BZ2File now support the context management protocol. - Issue #4272: Add an optional argument to the GzipFile constructor to override the timestamp in the gzip stream. The default value remains the current time. @@ -10039,7 +10039,7 @@ parameter. - _winreg's HKEY object has gained __enter__ and __exit__ methods to - support the context manager protocol. The _winreg module also + support the context management protocol. The _winreg module also gained a new function ``ExpandEnvironmentStrings`` to expand REG_EXPAND_SZ keys. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 23:13:51 2014 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 10 Sep 2014 23:13:51 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_y2k_compliance?= =?utf-8?b?LCBsb2wgKGNsb3NlcyAjMjIzODAp?= Message-ID: <3htbZ70NC0z7Lk5@mail.python.org> http://hg.python.org/cpython/rev/071a2620917f changeset: 92387:071a2620917f branch: 3.4 parent: 92384:cc5b183a2ad4 user: Benjamin Peterson date: Wed Sep 10 17:13:06 2014 -0400 summary: y2k compliance, lol (closes #22380) files: Doc/faq/general.rst | 37 --------------------------------- 1 files changed, 0 insertions(+), 37 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -377,43 +377,6 @@ changes while minimizing disruption for users. -Is Python Y2K (Year 2000) Compliant? ------------------------------------- - -.. remove this question? - -As of August, 2003 no major problems have been reported and Y2K compliance seems -to be a non-issue. - -Python does very few date calculations and for those it does perform relies on -the C library functions. Python generally represents times either as seconds -since 1970 or as a ``(year, month, day, ...)`` tuple where the year is expressed -with four digits, which makes Y2K bugs unlikely. So as long as your C library -is okay, Python should be okay. Of course, it's possible that a particular -application written in Python makes assumptions about 2-digit years. - -Because Python is available free of charge, there are no absolute guarantees. -If there *are* unforeseen problems, liability is the user's problem rather than -the developers', and there is nobody you can sue for damages. The Python -copyright notice contains the following disclaimer: - - 4. PSF is making Python 2.3 available to Licensee on an "AS IS" - basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY - WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY - REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR - PURPOSE OR THAT THE USE OF PYTHON 2.3 WILL NOT INFRINGE ANY THIRD PARTY - RIGHTS. - - 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON - 2.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS - A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.3, - OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -The good news is that *if* you encounter a problem, you have full source -available to track it down and fix it. This is one advantage of an open source -programming environment. - - Is Python a good language for beginning programmers? ---------------------------------------------------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 23:13:52 2014 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 10 Sep 2014 23:13:52 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_y2k_compliance?= =?utf-8?b?LCBsb2wgKGNsb3NlcyAjMjIzODAp?= Message-ID: <3htbZ82NFLz7Lkq@mail.python.org> http://hg.python.org/cpython/rev/02c94b9451f8 changeset: 92388:02c94b9451f8 branch: 2.7 parent: 92386:e62082a70b6b user: Benjamin Peterson date: Wed Sep 10 17:13:06 2014 -0400 summary: y2k compliance, lol (closes #22380) files: Doc/faq/general.rst | 37 --------------------------------- 1 files changed, 0 insertions(+), 37 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -377,43 +377,6 @@ changes while minimizing disruption for users. -Is Python Y2K (Year 2000) Compliant? ------------------------------------- - -.. remove this question? - -As of August, 2003 no major problems have been reported and Y2K compliance seems -to be a non-issue. - -Python does very few date calculations and for those it does perform relies on -the C library functions. Python generally represents times either as seconds -since 1970 or as a ``(year, month, day, ...)`` tuple where the year is expressed -with four digits, which makes Y2K bugs unlikely. So as long as your C library -is okay, Python should be okay. Of course, it's possible that a particular -application written in Python makes assumptions about 2-digit years. - -Because Python is available free of charge, there are no absolute guarantees. -If there *are* unforeseen problems, liability is the user's problem rather than -the developers', and there is nobody you can sue for damages. The Python -copyright notice contains the following disclaimer: - - 4. PSF is making Python 2.3 available to Licensee on an "AS IS" - basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY - WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY - REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR - PURPOSE OR THAT THE USE OF PYTHON 2.3 WILL NOT INFRINGE ANY THIRD PARTY - RIGHTS. - - 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON - 2.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS - A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.3, - OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -The good news is that *if* you encounter a problem, you have full source -available to track it down and fix it. This is one advantage of an open source -programming environment. - - Is Python a good language for beginning programmers? ---------------------------------------------------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 23:13:53 2014 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 10 Sep 2014 23:13:53 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjIzODAp?= Message-ID: <3htbZ94DMSz7Lkn@mail.python.org> http://hg.python.org/cpython/rev/e3a3c8809d09 changeset: 92389:e3a3c8809d09 parent: 92385:ad568d52af4b parent: 92387:071a2620917f user: Benjamin Peterson date: Wed Sep 10 17:13:36 2014 -0400 summary: merge 3.4 (#22380) files: Doc/faq/general.rst | 37 --------------------------------- 1 files changed, 0 insertions(+), 37 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -377,43 +377,6 @@ changes while minimizing disruption for users. -Is Python Y2K (Year 2000) Compliant? ------------------------------------- - -.. remove this question? - -As of August, 2003 no major problems have been reported and Y2K compliance seems -to be a non-issue. - -Python does very few date calculations and for those it does perform relies on -the C library functions. Python generally represents times either as seconds -since 1970 or as a ``(year, month, day, ...)`` tuple where the year is expressed -with four digits, which makes Y2K bugs unlikely. So as long as your C library -is okay, Python should be okay. Of course, it's possible that a particular -application written in Python makes assumptions about 2-digit years. - -Because Python is available free of charge, there are no absolute guarantees. -If there *are* unforeseen problems, liability is the user's problem rather than -the developers', and there is nobody you can sue for damages. The Python -copyright notice contains the following disclaimer: - - 4. PSF is making Python 2.3 available to Licensee on an "AS IS" - basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY - WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY - REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR - PURPOSE OR THAT THE USE OF PYTHON 2.3 WILL NOT INFRINGE ANY THIRD PARTY - RIGHTS. - - 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON - 2.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS - A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.3, - OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -The good news is that *if* you encounter a problem, you have full source -available to track it down and fix it. This is one advantage of an open source -programming environment. - - Is Python a good language for beginning programmers? ---------------------------------------------------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 23:34:54 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 10 Sep 2014 23:34:54 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzM4?= =?utf-8?q?=3A_Fix_a_crash_in_the_json_module_on_memory_allocation_failure?= =?utf-8?q?=2E?= Message-ID: <3htc2Q3JChz7Lk6@mail.python.org> http://hg.python.org/cpython/rev/3ac9f9576ce6 changeset: 92390:3ac9f9576ce6 branch: 3.4 parent: 92387:071a2620917f user: Victor Stinner date: Wed Sep 10 23:31:42 2014 +0200 summary: Issue #22338: Fix a crash in the json module on memory allocation failure. files: Misc/NEWS | 2 ++ Modules/_json.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,8 @@ Library ------- +- Issue #22338: Fix a crash in the json module on memory allocation failure. + - Issue #22226: First letter no longer is stripped from the "status" key in the result of Treeview.heading(). diff --git a/Modules/_json.c b/Modules/_json.c --- a/Modules/_json.c +++ b/Modules/_json.c @@ -287,7 +287,7 @@ } \ } \ if (PyList_Append(chunks, chunk)) { \ - Py_DECREF(chunk); \ + Py_CLEAR(chunk); \ goto bail; \ } \ Py_CLEAR(chunk); \ @@ -1555,6 +1555,7 @@ if (item == NULL) goto bail; PyList_SET_ITEM(items, i, item); + item = NULL; Py_DECREF(key); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 10 23:34:55 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 10 Sep 2014 23:34:55 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2322338=3A_Fix_a_crash_in_the_j?= =?utf-8?q?son_module_on_memory_allocation?= Message-ID: <3htc2R5Tkzz7Lkq@mail.python.org> http://hg.python.org/cpython/rev/135fc23e475c changeset: 92391:135fc23e475c parent: 92389:e3a3c8809d09 parent: 92390:3ac9f9576ce6 user: Victor Stinner date: Wed Sep 10 23:32:36 2014 +0200 summary: (Merge 3.4) Issue #22338: Fix a crash in the json module on memory allocation failure. files: Misc/NEWS | 2 ++ Modules/_json.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,8 @@ Library ------- +- Issue #22338: Fix a crash in the json module on memory allocation failure. + - Issue #12410: imaplib.IMAP4 now supports the context management protocol. Original patch by Tarek Ziad?. diff --git a/Modules/_json.c b/Modules/_json.c --- a/Modules/_json.c +++ b/Modules/_json.c @@ -287,7 +287,7 @@ } \ } \ if (PyList_Append(chunks, chunk)) { \ - Py_DECREF(chunk); \ + Py_CLEAR(chunk); \ goto bail; \ } \ Py_CLEAR(chunk); \ @@ -1555,6 +1555,7 @@ if (item == NULL) goto bail; PyList_SET_ITEM(items, i, item); + item = NULL; Py_DECREF(key); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 08:33:15 2014 From: python-checkins at python.org (matthias.klose) Date: Thu, 11 Sep 2014 08:33:15 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogLSBJc3N1ZSAjMjIz?= =?utf-8?q?81=3A_Update_zlib_to_1=2E2=2E8=2E?= Message-ID: <3htqzb1gdmz7LkK@mail.python.org> http://hg.python.org/cpython/rev/3c343588f6a6 changeset: 92392:3c343588f6a6 branch: 2.7 parent: 92388:02c94b9451f8 user: doko at ubuntu.com date: Thu Sep 11 08:32:46 2014 +0200 summary: - Issue #22381: Update zlib to 1.2.8. files: Misc/NEWS | 2 + Modules/zlib/ChangeLog | 619 ++++++++++- Modules/zlib/FAQ | 267 ++- Modules/zlib/INDEX | 41 +- Modules/zlib/Makefile | 157 +-- Modules/zlib/Makefile.in | 260 +++- Modules/zlib/README | 94 +- Modules/zlib/adler32.c | 120 +- Modules/zlib/algorithm.txt | 4 +- Modules/zlib/compress.c | 7 +- Modules/zlib/configure | 786 +++++++++--- Modules/zlib/crc32.c | 112 +- Modules/zlib/crc32.h | 2 +- Modules/zlib/deflate.c | 521 ++++++-- Modules/zlib/deflate.h | 45 +- Modules/zlib/example.c | 92 +- Modules/zlib/gzclose.c | 25 + Modules/zlib/gzguts.h | 209 +++ Modules/zlib/gzio.c | 1026 ---------------- Modules/zlib/gzlib.c | 634 ++++++++++ Modules/zlib/gzread.c | 594 +++++++++ Modules/zlib/gzwrite.c | 577 +++++++++ Modules/zlib/infback.c | 105 +- Modules/zlib/inffast.c | 84 +- Modules/zlib/inffast.h | 4 +- Modules/zlib/inffixed.h | 6 +- Modules/zlib/inflate.c | 482 +++++-- Modules/zlib/inflate.h | 31 +- Modules/zlib/inftrees.c | 93 +- Modules/zlib/inftrees.h | 27 +- Modules/zlib/make_vms.com | 750 +++++++++-- Modules/zlib/minigzip.c | 345 +++++- Modules/zlib/trees.c | 145 +- Modules/zlib/trees.h | 4 +- Modules/zlib/uncompr.c | 6 +- Modules/zlib/zconf.h | 297 +++- Modules/zlib/zconf.h.cmakein | 513 ++++++++ Modules/zlib/zconf.h.in | 511 ++++++++ Modules/zlib/zconf.in.h | 332 ----- Modules/zlib/zlib.3 | 70 +- Modules/zlib/zlib.h | 1359 ++++++++++++++------- Modules/zlib/zlib.map | 83 + Modules/zlib/zlib.pc.in | 13 + Modules/zlib/zutil.c | 56 +- Modules/zlib/zutil.h | 142 +- 45 files changed, 8133 insertions(+), 3519 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -142,6 +142,8 @@ Extension Modules ----------------- +- Issue #22381: Update zlib to 1.2.8. + - Issue #22176: Update the ctypes module's libffi to v3.1. This release adds support for the Linux AArch64 and POWERPC ELF ABIv2 little endian architectures. diff --git a/Modules/zlib/ChangeLog b/Modules/zlib/ChangeLog --- a/Modules/zlib/ChangeLog +++ b/Modules/zlib/ChangeLog @@ -1,6 +1,623 @@ ChangeLog file for zlib +Changes in 1.2.8 (28 Apr 2013) +- Update contrib/minizip/iowin32.c for Windows RT [Vollant] +- Do not force Z_CONST for C++ +- Clean up contrib/vstudio [Ro?] +- Correct spelling error in zlib.h +- Fix mixed line endings in contrib/vstudio + +Changes in 1.2.7.3 (13 Apr 2013) +- Fix version numbers and DLL names in contrib/vstudio/*/zlib.rc + +Changes in 1.2.7.2 (13 Apr 2013) +- Change check for a four-byte type back to hexadecimal +- Fix typo in win32/Makefile.msc +- Add casts in gzwrite.c for pointer differences + +Changes in 1.2.7.1 (24 Mar 2013) +- Replace use of unsafe string functions with snprintf if available +- Avoid including stddef.h on Windows for Z_SOLO compile [Niessink] +- Fix gzgetc undefine when Z_PREFIX set [Turk] +- Eliminate use of mktemp in Makefile (not always available) +- Fix bug in 'F' mode for gzopen() +- Add inflateGetDictionary() function +- Correct comment in deflate.h +- Use _snprintf for snprintf in Microsoft C +- On Darwin, only use /usr/bin/libtool if libtool is not Apple +- Delete "--version" file if created by "ar --version" [Richard G.] +- Fix configure check for veracity of compiler error return codes +- Fix CMake compilation of static lib for MSVC2010 x64 +- Remove unused variable in infback9.c +- Fix argument checks in gzlog_compress() and gzlog_write() +- Clean up the usage of z_const and respect const usage within zlib +- Clean up examples/gzlog.[ch] comparisons of different types +- Avoid shift equal to bits in type (caused endless loop) +- Fix unintialized value bug in gzputc() introduced by const patches +- Fix memory allocation error in examples/zran.c [Nor] +- Fix bug where gzopen(), gzclose() would write an empty file +- Fix bug in gzclose() when gzwrite() runs out of memory +- Check for input buffer malloc failure in examples/gzappend.c +- Add note to contrib/blast to use binary mode in stdio +- Fix comparisons of differently signed integers in contrib/blast +- Check for invalid code length codes in contrib/puff +- Fix serious but very rare decompression bug in inftrees.c +- Update inflateBack() comments, since inflate() can be faster +- Use underscored I/O function names for WINAPI_FAMILY +- Add _tr_flush_bits to the external symbols prefixed by --zprefix +- Add contrib/vstudio/vc10 pre-build step for static only +- Quote --version-script argument in CMakeLists.txt +- Don't specify --version-script on Apple platforms in CMakeLists.txt +- Fix casting error in contrib/testzlib/testzlib.c +- Fix types in contrib/minizip to match result of get_crc_table() +- Simplify contrib/vstudio/vc10 with 'd' suffix +- Add TOP support to win32/Makefile.msc +- Suport i686 and amd64 assembler builds in CMakeLists.txt +- Fix typos in the use of _LARGEFILE64_SOURCE in zconf.h +- Add vc11 and vc12 build files to contrib/vstudio +- Add gzvprintf() as an undocumented function in zlib +- Fix configure for Sun shell +- Remove runtime check in configure for four-byte integer type +- Add casts and consts to ease user conversion to C++ +- Add man pages for minizip and miniunzip +- In Makefile uninstall, don't rm if preceding cd fails +- Do not return Z_BUF_ERROR if deflateParam() has nothing to write + +Changes in 1.2.7 (2 May 2012) +- Replace use of memmove() with a simple copy for portability +- Test for existence of strerror +- Restore gzgetc_ for backward compatibility with 1.2.6 +- Fix build with non-GNU make on Solaris +- Require gcc 4.0 or later on Mac OS X to use the hidden attribute +- Include unistd.h for Watcom C +- Use __WATCOMC__ instead of __WATCOM__ +- Do not use the visibility attribute if NO_VIZ defined +- Improve the detection of no hidden visibility attribute +- Avoid using __int64 for gcc or solo compilation +- Cast to char * in gzprintf to avoid warnings [Zinser] +- Fix make_vms.com for VAX [Zinser] +- Don't use library or built-in byte swaps +- Simplify test and use of gcc hidden attribute +- Fix bug in gzclose_w() when gzwrite() fails to allocate memory +- Add "x" (O_EXCL) and "e" (O_CLOEXEC) modes support to gzopen() +- Fix bug in test/minigzip.c for configure --solo +- Fix contrib/vstudio project link errors [Mohanathas] +- Add ability to choose the builder in make_vms.com [Schweda] +- Add DESTDIR support to mingw32 win32/Makefile.gcc +- Fix comments in win32/Makefile.gcc for proper usage +- Allow overriding the default install locations for cmake +- Generate and install the pkg-config file with cmake +- Build both a static and a shared version of zlib with cmake +- Include version symbols for cmake builds +- If using cmake with MSVC, add the source directory to the includes +- Remove unneeded EXTRA_CFLAGS from win32/Makefile.gcc [Truta] +- Move obsolete emx makefile to old [Truta] +- Allow the use of -Wundef when compiling or using zlib +- Avoid the use of the -u option with mktemp +- Improve inflate() documentation on the use of Z_FINISH +- Recognize clang as gcc +- Add gzopen_w() in Windows for wide character path names +- Rename zconf.h in CMakeLists.txt to move it out of the way +- Add source directory in CMakeLists.txt for building examples +- Look in build directory for zlib.pc in CMakeLists.txt +- Remove gzflags from zlibvc.def in vc9 and vc10 +- Fix contrib/minizip compilation in the MinGW environment +- Update ./configure for Solaris, support --64 [Mooney] +- Remove -R. from Solaris shared build (possible security issue) +- Avoid race condition for parallel make (-j) running example +- Fix type mismatch between get_crc_table() and crc_table +- Fix parsing of version with "-" in CMakeLists.txt [Snider, Ziegler] +- Fix the path to zlib.map in CMakeLists.txt +- Force the native libtool in Mac OS X to avoid GNU libtool [Beebe] +- Add instructions to win32/Makefile.gcc for shared install [Torri] + +Changes in 1.2.6.1 (12 Feb 2012) +- Avoid the use of the Objective-C reserved name "id" +- Include io.h in gzguts.h for Microsoft compilers +- Fix problem with ./configure --prefix and gzgetc macro +- Include gz_header definition when compiling zlib solo +- Put gzflags() functionality back in zutil.c +- Avoid library header include in crc32.c for Z_SOLO +- Use name in GCC_CLASSIC as C compiler for coverage testing, if set +- Minor cleanup in contrib/minizip/zip.c [Vollant] +- Update make_vms.com [Zinser] +- Remove unnecessary gzgetc_ function +- Use optimized byte swap operations for Microsoft and GNU [Snyder] +- Fix minor typo in zlib.h comments [Rzesniowiecki] + +Changes in 1.2.6 (29 Jan 2012) +- Update the Pascal interface in contrib/pascal +- Fix function numbers for gzgetc_ in zlibvc.def files +- Fix configure.ac for contrib/minizip [Schiffer] +- Fix large-entry detection in minizip on 64-bit systems [Schiffer] +- Have ./configure use the compiler return code for error indication +- Fix CMakeLists.txt for cross compilation [McClure] +- Fix contrib/minizip/zip.c for 64-bit architectures [Dalsnes] +- Fix compilation of contrib/minizip on FreeBSD [Marquez] +- Correct suggested usages in win32/Makefile.msc [Shachar, Horvath] +- Include io.h for Turbo C / Borland C on all platforms [Truta] +- Make version explicit in contrib/minizip/configure.ac [Bosmans] +- Avoid warning for no encryption in contrib/minizip/zip.c [Vollant] +- Minor cleanup up contrib/minizip/unzip.c [Vollant] +- Fix bug when compiling minizip with C++ [Vollant] +- Protect for long name and extra fields in contrib/minizip [Vollant] +- Avoid some warnings in contrib/minizip [Vollant] +- Add -I../.. -L../.. to CFLAGS for minizip and miniunzip +- Add missing libs to minizip linker command +- Add support for VPATH builds in contrib/minizip +- Add an --enable-demos option to contrib/minizip/configure +- Add the generation of configure.log by ./configure +- Exit when required parameters not provided to win32/Makefile.gcc +- Have gzputc return the character written instead of the argument +- Use the -m option on ldconfig for BSD systems [Tobias] +- Correct in zlib.map when deflateResetKeep was added + +Changes in 1.2.5.3 (15 Jan 2012) +- Restore gzgetc function for binary compatibility +- Do not use _lseeki64 under Borland C++ [Truta] +- Update win32/Makefile.msc to build test/*.c [Truta] +- Remove old/visualc6 given CMakefile and other alternatives +- Update AS400 build files and documentation [Monnerat] +- Update win32/Makefile.gcc to build test/*.c [Truta] +- Permit stronger flushes after Z_BLOCK flushes +- Avoid extraneous empty blocks when doing empty flushes +- Permit Z_NULL arguments to deflatePending +- Allow deflatePrime() to insert bits in the middle of a stream +- Remove second empty static block for Z_PARTIAL_FLUSH +- Write out all of the available bits when using Z_BLOCK +- Insert the first two strings in the hash table after a flush + +Changes in 1.2.5.2 (17 Dec 2011) +- fix ld error: unable to find version dependency 'ZLIB_1.2.5' +- use relative symlinks for shared libs +- Avoid searching past window for Z_RLE strategy +- Assure that high-water mark initialization is always applied in deflate +- Add assertions to fill_window() in deflate.c to match comments +- Update python link in README +- Correct spelling error in gzread.c +- Fix bug in gzgets() for a concatenated empty gzip stream +- Correct error in comment for gz_make() +- Change gzread() and related to ignore junk after gzip streams +- Allow gzread() and related to continue after gzclearerr() +- Allow gzrewind() and gzseek() after a premature end-of-file +- Simplify gzseek() now that raw after gzip is ignored +- Change gzgetc() to a macro for speed (~40% speedup in testing) +- Fix gzclose() to return the actual error last encountered +- Always add large file support for windows +- Include zconf.h for windows large file support +- Include zconf.h.cmakein for windows large file support +- Update zconf.h.cmakein on make distclean +- Merge vestigial vsnprintf determination from zutil.h to gzguts.h +- Clarify how gzopen() appends in zlib.h comments +- Correct documentation of gzdirect() since junk at end now ignored +- Add a transparent write mode to gzopen() when 'T' is in the mode +- Update python link in zlib man page +- Get inffixed.h and MAKEFIXED result to match +- Add a ./config --solo option to make zlib subset with no libary use +- Add undocumented inflateResetKeep() function for CAB file decoding +- Add --cover option to ./configure for gcc coverage testing +- Add #define ZLIB_CONST option to use const in the z_stream interface +- Add comment to gzdopen() in zlib.h to use dup() when using fileno() +- Note behavior of uncompress() to provide as much data as it can +- Add files in contrib/minizip to aid in building libminizip +- Split off AR options in Makefile.in and configure +- Change ON macro to Z_ARG to avoid application conflicts +- Facilitate compilation with Borland C++ for pragmas and vsnprintf +- Include io.h for Turbo C / Borland C++ +- Move example.c and minigzip.c to test/ +- Simplify incomplete code table filling in inflate_table() +- Remove code from inflate.c and infback.c that is impossible to execute +- Test the inflate code with full coverage +- Allow deflateSetDictionary, inflateSetDictionary at any time (in raw) +- Add deflateResetKeep and fix inflateResetKeep to retain dictionary +- Fix gzwrite.c to accommodate reduced memory zlib compilation +- Have inflate() with Z_FINISH avoid the allocation of a window +- Do not set strm->adler when doing raw inflate +- Fix gzeof() to behave just like feof() when read is not past end of file +- Fix bug in gzread.c when end-of-file is reached +- Avoid use of Z_BUF_ERROR in gz* functions except for premature EOF +- Document gzread() capability to read concurrently written files +- Remove hard-coding of resource compiler in CMakeLists.txt [Blammo] + +Changes in 1.2.5.1 (10 Sep 2011) +- Update FAQ entry on shared builds (#13) +- Avoid symbolic argument to chmod in Makefile.in +- Fix bug and add consts in contrib/puff [Oberhumer] +- Update contrib/puff/zeros.raw test file to have all block types +- Add full coverage test for puff in contrib/puff/Makefile +- Fix static-only-build install in Makefile.in +- Fix bug in unzGetCurrentFileInfo() in contrib/minizip [Kuno] +- Add libz.a dependency to shared in Makefile.in for parallel builds +- Spell out "number" (instead of "nb") in zlib.h for total_in, total_out +- Replace $(...) with `...` in configure for non-bash sh [Bowler] +- Add darwin* to Darwin* and solaris* to SunOS\ 5* in configure [Groffen] +- Add solaris* to Linux* in configure to allow gcc use [Groffen] +- Add *bsd* to Linux* case in configure [Bar-Lev] +- Add inffast.obj to dependencies in win32/Makefile.msc +- Correct spelling error in deflate.h [Kohler] +- Change libzdll.a again to libz.dll.a (!) in win32/Makefile.gcc +- Add test to configure for GNU C looking for gcc in output of $cc -v +- Add zlib.pc generation to win32/Makefile.gcc [Weigelt] +- Fix bug in zlib.h for _FILE_OFFSET_BITS set and _LARGEFILE64_SOURCE not +- Add comment in zlib.h that adler32_combine with len2 < 0 makes no sense +- Make NO_DIVIDE option in adler32.c much faster (thanks to John Reiser) +- Make stronger test in zconf.h to include unistd.h for LFS +- Apply Darwin patches for 64-bit file offsets to contrib/minizip [Slack] +- Fix zlib.h LFS support when Z_PREFIX used +- Add updated as400 support (removed from old) [Monnerat] +- Avoid deflate sensitivity to volatile input data +- Avoid division in adler32_combine for NO_DIVIDE +- Clarify the use of Z_FINISH with deflateBound() amount of space +- Set binary for output file in puff.c +- Use u4 type for crc_table to avoid conversion warnings +- Apply casts in zlib.h to avoid conversion warnings +- Add OF to prototypes for adler32_combine_ and crc32_combine_ [Miller] +- Improve inflateSync() documentation to note indeterminancy +- Add deflatePending() function to return the amount of pending output +- Correct the spelling of "specification" in FAQ [Randers-Pehrson] +- Add a check in configure for stdarg.h, use for gzprintf() +- Check that pointers fit in ints when gzprint() compiled old style +- Add dummy name before $(SHAREDLIBV) in Makefile [Bar-Lev, Bowler] +- Delete line in configure that adds -L. libz.a to LDFLAGS [Weigelt] +- Add debug records in assmebler code [Londer] +- Update RFC references to use http://tools.ietf.org/html/... [Li] +- Add --archs option, use of libtool to configure for Mac OS X [Borstel] + +Changes in 1.2.5 (19 Apr 2010) +- Disable visibility attribute in win32/Makefile.gcc [Bar-Lev] +- Default to libdir as sharedlibdir in configure [Nieder] +- Update copyright dates on modified source files +- Update trees.c to be able to generate modified trees.h +- Exit configure for MinGW, suggesting win32/Makefile.gcc +- Check for NULL path in gz_open [Homurlu] + +Changes in 1.2.4.5 (18 Apr 2010) +- Set sharedlibdir in configure [Torok] +- Set LDFLAGS in Makefile.in [Bar-Lev] +- Avoid mkdir objs race condition in Makefile.in [Bowler] +- Add ZLIB_INTERNAL in front of internal inter-module functions and arrays +- Define ZLIB_INTERNAL to hide internal functions and arrays for GNU C +- Don't use hidden attribute when it is a warning generator (e.g. Solaris) + +Changes in 1.2.4.4 (18 Apr 2010) +- Fix CROSS_PREFIX executable testing, CHOST extract, mingw* [Torok] +- Undefine _LARGEFILE64_SOURCE in zconf.h if it is zero, but not if empty +- Try to use bash or ksh regardless of functionality of /bin/sh +- Fix configure incompatibility with NetBSD sh +- Remove attempt to run under bash or ksh since have better NetBSD fix +- Fix win32/Makefile.gcc for MinGW [Bar-Lev] +- Add diagnostic messages when using CROSS_PREFIX in configure +- Added --sharedlibdir option to configure [Weigelt] +- Use hidden visibility attribute when available [Frysinger] + +Changes in 1.2.4.3 (10 Apr 2010) +- Only use CROSS_PREFIX in configure for ar and ranlib if they exist +- Use CROSS_PREFIX for nm [Bar-Lev] +- Assume _LARGEFILE64_SOURCE defined is equivalent to true +- Avoid use of undefined symbols in #if with && and || +- Make *64 prototypes in gzguts.h consistent with functions +- Add -shared load option for MinGW in configure [Bowler] +- Move z_off64_t to public interface, use instead of off64_t +- Remove ! from shell test in configure (not portable to Solaris) +- Change +0 macro tests to -0 for possibly increased portability + +Changes in 1.2.4.2 (9 Apr 2010) +- Add consistent carriage returns to readme.txt's in masmx86 and masmx64 +- Really provide prototypes for *64 functions when building without LFS +- Only define unlink() in minigzip.c if unistd.h not included +- Update README to point to contrib/vstudio project files +- Move projects/vc6 to old/ and remove projects/ +- Include stdlib.h in minigzip.c for setmode() definition under WinCE +- Clean up assembler builds in win32/Makefile.msc [Rowe] +- Include sys/types.h for Microsoft for off_t definition +- Fix memory leak on error in gz_open() +- Symbolize nm as $NM in configure [Weigelt] +- Use TEST_LDSHARED instead of LDSHARED to link test programs [Weigelt] +- Add +0 to _FILE_OFFSET_BITS and _LFS64_LARGEFILE in case not defined +- Fix bug in gzeof() to take into account unused input data +- Avoid initialization of structures with variables in puff.c +- Updated win32/README-WIN32.txt [Rowe] + +Changes in 1.2.4.1 (28 Mar 2010) +- Remove the use of [a-z] constructs for sed in configure [gentoo 310225] +- Remove $(SHAREDLIB) from LIBS in Makefile.in [Creech] +- Restore "for debugging" comment on sprintf() in gzlib.c +- Remove fdopen for MVS from gzguts.h +- Put new README-WIN32.txt in win32 [Rowe] +- Add check for shell to configure and invoke another shell if needed +- Fix big fat stinking bug in gzseek() on uncompressed files +- Remove vestigial F_OPEN64 define in zutil.h +- Set and check the value of _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE +- Avoid errors on non-LFS systems when applications define LFS macros +- Set EXE to ".exe" in configure for MINGW [Kahle] +- Match crc32() in crc32.c exactly to the prototype in zlib.h [Sherrill] +- Add prefix for cross-compilation in win32/makefile.gcc [Bar-Lev] +- Add DLL install in win32/makefile.gcc [Bar-Lev] +- Allow Linux* or linux* from uname in configure [Bar-Lev] +- Allow ldconfig to be redefined in configure and Makefile.in [Bar-Lev] +- Add cross-compilation prefixes to configure [Bar-Lev] +- Match type exactly in gz_load() invocation in gzread.c +- Match type exactly of zcalloc() in zutil.c to zlib.h alloc_func +- Provide prototypes for *64 functions when building zlib without LFS +- Don't use -lc when linking shared library on MinGW +- Remove errno.h check in configure and vestigial errno code in zutil.h + +Changes in 1.2.4 (14 Mar 2010) +- Fix VER3 extraction in configure for no fourth subversion +- Update zlib.3, add docs to Makefile.in to make .pdf out of it +- Add zlib.3.pdf to distribution +- Don't set error code in gzerror() if passed pointer is NULL +- Apply destination directory fixes to CMakeLists.txt [Lowman] +- Move #cmakedefine's to a new zconf.in.cmakein +- Restore zconf.h for builds that don't use configure or cmake +- Add distclean to dummy Makefile for convenience +- Update and improve INDEX, README, and FAQ +- Update CMakeLists.txt for the return of zconf.h [Lowman] +- Update contrib/vstudio/vc9 and vc10 [Vollant] +- Change libz.dll.a back to libzdll.a in win32/Makefile.gcc +- Apply license and readme changes to contrib/asm686 [Raiter] +- Check file name lengths and add -c option in minigzip.c [Li] +- Update contrib/amd64 and contrib/masmx86/ [Vollant] +- Avoid use of "eof" parameter in trees.c to not shadow library variable +- Update make_vms.com for removal of zlibdefs.h [Zinser] +- Update assembler code and vstudio projects in contrib [Vollant] +- Remove outdated assembler code contrib/masm686 and contrib/asm586 +- Remove old vc7 and vc8 from contrib/vstudio +- Update win32/Makefile.msc, add ZLIB_VER_SUBREVISION [Rowe] +- Fix memory leaks in gzclose_r() and gzclose_w(), file leak in gz_open() +- Add contrib/gcc_gvmat64 for longest_match and inflate_fast [Vollant] +- Remove *64 functions from win32/zlib.def (they're not 64-bit yet) +- Fix bug in void-returning vsprintf() case in gzwrite.c +- Fix name change from inflate.h in contrib/inflate86/inffas86.c +- Check if temporary file exists before removing in make_vms.com [Zinser] +- Fix make install and uninstall for --static option +- Fix usage of _MSC_VER in gzguts.h and zutil.h [Truta] +- Update readme.txt in contrib/masmx64 and masmx86 to assemble + +Changes in 1.2.3.9 (21 Feb 2010) +- Expunge gzio.c +- Move as400 build information to old +- Fix updates in contrib/minizip and contrib/vstudio +- Add const to vsnprintf test in configure to avoid warnings [Weigelt] +- Delete zconf.h (made by configure) [Weigelt] +- Change zconf.in.h to zconf.h.in per convention [Weigelt] +- Check for NULL buf in gzgets() +- Return empty string for gzgets() with len == 1 (like fgets()) +- Fix description of gzgets() in zlib.h for end-of-file, NULL return +- Update minizip to 1.1 [Vollant] +- Avoid MSVC loss of data warnings in gzread.c, gzwrite.c +- Note in zlib.h that gzerror() should be used to distinguish from EOF +- Remove use of snprintf() from gzlib.c +- Fix bug in gzseek() +- Update contrib/vstudio, adding vc9 and vc10 [Kuno, Vollant] +- Fix zconf.h generation in CMakeLists.txt [Lowman] +- Improve comments in zconf.h where modified by configure + +Changes in 1.2.3.8 (13 Feb 2010) +- Clean up text files (tabs, trailing whitespace, etc.) [Oberhumer] +- Use z_off64_t in gz_zero() and gz_skip() to match state->skip +- Avoid comparison problem when sizeof(int) == sizeof(z_off64_t) +- Revert to Makefile.in from 1.2.3.6 (live with the clutter) +- Fix missing error return in gzflush(), add zlib.h note +- Add *64 functions to zlib.map [Levin] +- Fix signed/unsigned comparison in gz_comp() +- Use SFLAGS when testing shared linking in configure +- Add --64 option to ./configure to use -m64 with gcc +- Fix ./configure --help to correctly name options +- Have make fail if a test fails [Levin] +- Avoid buffer overrun in contrib/masmx64/gvmat64.asm [Simpson] +- Remove assembler object files from contrib + +Changes in 1.2.3.7 (24 Jan 2010) +- Always gzopen() with O_LARGEFILE if available +- Fix gzdirect() to work immediately after gzopen() or gzdopen() +- Make gzdirect() more precise when the state changes while reading +- Improve zlib.h documentation in many places +- Catch memory allocation failure in gz_open() +- Complete close operation if seek forward in gzclose_w() fails +- Return Z_ERRNO from gzclose_r() if close() fails +- Return Z_STREAM_ERROR instead of EOF for gzclose() being passed NULL +- Return zero for gzwrite() errors to match zlib.h description +- Return -1 on gzputs() error to match zlib.h description +- Add zconf.in.h to allow recovery from configure modification [Weigelt] +- Fix static library permissions in Makefile.in [Weigelt] +- Avoid warnings in configure tests that hide functionality [Weigelt] +- Add *BSD and DragonFly to Linux case in configure [gentoo 123571] +- Change libzdll.a to libz.dll.a in win32/Makefile.gcc [gentoo 288212] +- Avoid access of uninitialized data for first inflateReset2 call [Gomes] +- Keep object files in subdirectories to reduce the clutter somewhat +- Remove default Makefile and zlibdefs.h, add dummy Makefile +- Add new external functions to Z_PREFIX, remove duplicates, z_z_ -> z_ +- Remove zlibdefs.h completely -- modify zconf.h instead + +Changes in 1.2.3.6 (17 Jan 2010) +- Avoid void * arithmetic in gzread.c and gzwrite.c +- Make compilers happier with const char * for gz_error message +- Avoid unused parameter warning in inflate.c +- Avoid signed-unsigned comparison warning in inflate.c +- Indent #pragma's for traditional C +- Fix usage of strwinerror() in glib.c, change to gz_strwinerror() +- Correct email address in configure for system options +- Update make_vms.com and add make_vms.com to contrib/minizip [Zinser] +- Update zlib.map [Brown] +- Fix Makefile.in for Solaris 10 make of example64 and minizip64 [Torok] +- Apply various fixes to CMakeLists.txt [Lowman] +- Add checks on len in gzread() and gzwrite() +- Add error message for no more room for gzungetc() +- Remove zlib version check in gzwrite() +- Defer compression of gzprintf() result until need to +- Use snprintf() in gzdopen() if available +- Remove USE_MMAP configuration determination (only used by minigzip) +- Remove examples/pigz.c (available separately) +- Update examples/gun.c to 1.6 + +Changes in 1.2.3.5 (8 Jan 2010) +- Add space after #if in zutil.h for some compilers +- Fix relatively harmless bug in deflate_fast() [Exarevsky] +- Fix same problem in deflate_slow() +- Add $(SHAREDLIBV) to LIBS in Makefile.in [Brown] +- Add deflate_rle() for faster Z_RLE strategy run-length encoding +- Add deflate_huff() for faster Z_HUFFMAN_ONLY encoding +- Change name of "write" variable in inffast.c to avoid library collisions +- Fix premature EOF from gzread() in gzio.c [Brown] +- Use zlib header window size if windowBits is 0 in inflateInit2() +- Remove compressBound() call in deflate.c to avoid linking compress.o +- Replace use of errno in gz* with functions, support WinCE [Alves] +- Provide alternative to perror() in minigzip.c for WinCE [Alves] +- Don't use _vsnprintf on later versions of MSVC [Lowman] +- Add CMake build script and input file [Lowman] +- Update contrib/minizip to 1.1 [Svensson, Vollant] +- Moved nintendods directory from contrib to . +- Replace gzio.c with a new set of routines with the same functionality +- Add gzbuffer(), gzoffset(), gzclose_r(), gzclose_w() as part of above +- Update contrib/minizip to 1.1b +- Change gzeof() to return 0 on error instead of -1 to agree with zlib.h + +Changes in 1.2.3.4 (21 Dec 2009) +- Use old school .SUFFIXES in Makefile.in for FreeBSD compatibility +- Update comments in configure and Makefile.in for default --shared +- Fix test -z's in configure [Marquess] +- Build examplesh and minigzipsh when not testing +- Change NULL's to Z_NULL's in deflate.c and in comments in zlib.h +- Import LDFLAGS from the environment in configure +- Fix configure to populate SFLAGS with discovered CFLAGS options +- Adapt make_vms.com to the new Makefile.in [Zinser] +- Add zlib2ansi script for C++ compilation [Marquess] +- Add _FILE_OFFSET_BITS=64 test to make test (when applicable) +- Add AMD64 assembler code for longest match to contrib [Teterin] +- Include options from $SFLAGS when doing $LDSHARED +- Simplify 64-bit file support by introducing z_off64_t type +- Make shared object files in objs directory to work around old Sun cc +- Use only three-part version number for Darwin shared compiles +- Add rc option to ar in Makefile.in for when ./configure not run +- Add -WI,-rpath,. to LDFLAGS for OSF 1 V4* +- Set LD_LIBRARYN32_PATH for SGI IRIX shared compile +- Protect against _FILE_OFFSET_BITS being defined when compiling zlib +- Rename Makefile.in targets allstatic to static and allshared to shared +- Fix static and shared Makefile.in targets to be independent +- Correct error return bug in gz_open() by setting state [Brown] +- Put spaces before ;;'s in configure for better sh compatibility +- Add pigz.c (parallel implementation of gzip) to examples/ +- Correct constant in crc32.c to UL [Leventhal] +- Reject negative lengths in crc32_combine() +- Add inflateReset2() function to work like inflateEnd()/inflateInit2() +- Include sys/types.h for _LARGEFILE64_SOURCE [Brown] +- Correct typo in doc/algorithm.txt [Janik] +- Fix bug in adler32_combine() [Zhu] +- Catch missing-end-of-block-code error in all inflates and in puff + Assures that random input to inflate eventually results in an error +- Added enough.c (calculation of ENOUGH for inftrees.h) to examples/ +- Update ENOUGH and its usage to reflect discovered bounds +- Fix gzerror() error report on empty input file [Brown] +- Add ush casts in trees.c to avoid pedantic runtime errors +- Fix typo in zlib.h uncompress() description [Reiss] +- Correct inflate() comments with regard to automatic header detection +- Remove deprecation comment on Z_PARTIAL_FLUSH (it stays) +- Put new version of gzlog (2.0) in examples with interruption recovery +- Add puff compile option to permit invalid distance-too-far streams +- Add puff TEST command options, ability to read piped input +- Prototype the *64 functions in zlib.h when _FILE_OFFSET_BITS == 64, but + _LARGEFILE64_SOURCE not defined +- Fix Z_FULL_FLUSH to truly erase the past by resetting s->strstart +- Fix deflateSetDictionary() to use all 32K for output consistency +- Remove extraneous #define MIN_LOOKAHEAD in deflate.c (in deflate.h) +- Clear bytes after deflate lookahead to avoid use of uninitialized data +- Change a limit in inftrees.c to be more transparent to Coverity Prevent +- Update win32/zlib.def with exported symbols from zlib.h +- Correct spelling errors in zlib.h [Willem, Sobrado] +- Allow Z_BLOCK for deflate() to force a new block +- Allow negative bits in inflatePrime() to delete existing bit buffer +- Add Z_TREES flush option to inflate() to return at end of trees +- Add inflateMark() to return current state information for random access +- Add Makefile for NintendoDS to contrib [Costa] +- Add -w in configure compile tests to avoid spurious warnings [Beucler] +- Fix typos in zlib.h comments for deflateSetDictionary() +- Fix EOF detection in transparent gzread() [Maier] + +Changes in 1.2.3.3 (2 October 2006) +- Make --shared the default for configure, add a --static option +- Add compile option to permit invalid distance-too-far streams +- Add inflateUndermine() function which is required to enable above +- Remove use of "this" variable name for C++ compatibility [Marquess] +- Add testing of shared library in make test, if shared library built +- Use ftello() and fseeko() if available instead of ftell() and fseek() +- Provide two versions of all functions that use the z_off_t type for + binary compatibility -- a normal version and a 64-bit offset version, + per the Large File Support Extension when _LARGEFILE64_SOURCE is + defined; use the 64-bit versions by default when _FILE_OFFSET_BITS + is defined to be 64 +- Add a --uname= option to configure to perhaps help with cross-compiling + +Changes in 1.2.3.2 (3 September 2006) +- Turn off silly Borland warnings [Hay] +- Use off64_t and define _LARGEFILE64_SOURCE when present +- Fix missing dependency on inffixed.h in Makefile.in +- Rig configure --shared to build both shared and static [Teredesai, Truta] +- Remove zconf.in.h and instead create a new zlibdefs.h file +- Fix contrib/minizip/unzip.c non-encrypted after encrypted [Vollant] +- Add treebuild.xml (see http://treebuild.metux.de/) [Weigelt] + +Changes in 1.2.3.1 (16 August 2006) +- Add watcom directory with OpenWatcom make files [Daniel] +- Remove #undef of FAR in zconf.in.h for MVS [Fedtke] +- Update make_vms.com [Zinser] +- Use -fPIC for shared build in configure [Teredesai, Nicholson] +- Use only major version number for libz.so on IRIX and OSF1 [Reinholdtsen] +- Use fdopen() (not _fdopen()) for Interix in zutil.h [B?ck] +- Add some FAQ entries about the contrib directory +- Update the MVS question in the FAQ +- Avoid extraneous reads after EOF in gzio.c [Brown] +- Correct spelling of "successfully" in gzio.c [Randers-Pehrson] +- Add comments to zlib.h about gzerror() usage [Brown] +- Set extra flags in gzip header in gzopen() like deflate() does +- Make configure options more compatible with double-dash conventions + [Weigelt] +- Clean up compilation under Solaris SunStudio cc [Rowe, Reinholdtsen] +- Fix uninstall target in Makefile.in [Truta] +- Add pkgconfig support [Weigelt] +- Use $(DESTDIR) macro in Makefile.in [Reinholdtsen, Weigelt] +- Replace set_data_type() with a more accurate detect_data_type() in + trees.c, according to the txtvsbin.txt document [Truta] +- Swap the order of #include and #include "zlib.h" in + gzio.c, example.c and minigzip.c [Truta] +- Shut up annoying VS2005 warnings about standard C deprecation [Rowe, + Truta] (where?) +- Fix target "clean" from win32/Makefile.bor [Truta] +- Create .pdb and .manifest files in win32/makefile.msc [Ziegler, Rowe] +- Update zlib www home address in win32/DLL_FAQ.txt [Truta] +- Update contrib/masmx86/inffas32.asm for VS2005 [Vollant, Van Wassenhove] +- Enable browse info in the "Debug" and "ASM Debug" configurations in + the Visual C++ 6 project, and set (non-ASM) "Debug" as default [Truta] +- Add pkgconfig support [Weigelt] +- Add ZLIB_VER_MAJOR, ZLIB_VER_MINOR and ZLIB_VER_REVISION in zlib.h, + for use in win32/zlib1.rc [Polushin, Rowe, Truta] +- Add a document that explains the new text detection scheme to + doc/txtvsbin.txt [Truta] +- Add rfc1950.txt, rfc1951.txt and rfc1952.txt to doc/ [Truta] +- Move algorithm.txt into doc/ [Truta] +- Synchronize FAQ with website +- Fix compressBound(), was low for some pathological cases [Fearnley] +- Take into account wrapper variations in deflateBound() +- Set examples/zpipe.c input and output to binary mode for Windows +- Update examples/zlib_how.html with new zpipe.c (also web site) +- Fix some warnings in examples/gzlog.c and examples/zran.c (it seems + that gcc became pickier in 4.0) +- Add zlib.map for Linux: "All symbols from zlib-1.1.4 remain + un-versioned, the patch adds versioning only for symbols introduced in + zlib-1.2.0 or later. It also declares as local those symbols which are + not designed to be exported." [Levin] +- Update Z_PREFIX list in zconf.in.h, add --zprefix option to configure +- Do not initialize global static by default in trees.c, add a response + NO_INIT_GLOBAL_POINTERS to initialize them if needed [Marquess] +- Don't use strerror() in gzio.c under WinCE [Yakimov] +- Don't use errno.h in zutil.h under WinCE [Yakimov] +- Move arguments for AR to its usage to allow replacing ar [Marot] +- Add HAVE_VISIBILITY_PRAGMA in zconf.in.h for Mozilla [Randers-Pehrson] +- Improve inflateInit() and inflateInit2() documentation +- Fix structure size comment in inflate.h +- Change configure help option from --h* to --help [Santos] + Changes in 1.2.3 (18 July 2005) - Apply security vulnerability fixes to contrib/infback9 as well - Clean up some text files (carriage returns, trailing space) @@ -13,7 +630,7 @@ compile - Fix some spelling errors in comments [Betts] - Correct inflateInit2() error return documentation in zlib.h -- Added zran.c example of compressed data random access to examples +- Add zran.c example of compressed data random access to examples directory, shows use of inflatePrime() - Fix cast for assignments to strm->state in inflate.c and infback.c - Fix zlibCompileFlags() in zutil.c to use 1L for long shifts [Oberhumer] diff --git a/Modules/zlib/FAQ b/Modules/zlib/FAQ --- a/Modules/zlib/FAQ +++ b/Modules/zlib/FAQ @@ -3,8 +3,8 @@ If your question is not there, please check the zlib home page -http://www.zlib.org which may have more recent information. -The lastest zlib FAQ is at http://www.gzip.org/zlib/zlib_faq.html +http://zlib.net/ which may have more recent information. +The lastest zlib FAQ is at http://zlib.net/zlib_faq.html 1. Is zlib Y2K-compliant? @@ -13,54 +13,51 @@ 2. Where can I get a Windows DLL version? - The zlib sources can be compiled without change to produce a DLL. - See the file win32/DLL_FAQ.txt in the zlib distribution. - Pointers to the precompiled DLL are found in the zlib web site at - http://www.zlib.org. + The zlib sources can be compiled without change to produce a DLL. See the + file win32/DLL_FAQ.txt in the zlib distribution. Pointers to the + precompiled DLL are found in the zlib web site at http://zlib.net/ . 3. Where can I get a Visual Basic interface to zlib? See - * http://www.dogma.net/markn/articles/zlibtool/zlibtool.htm - * contrib/visual-basic.txt in the zlib distribution + * http://marknelson.us/1997/01/01/zlib-engine/ * win32/DLL_FAQ.txt in the zlib distribution 4. compress() returns Z_BUF_ERROR. - Make sure that before the call of compress, the length of the compressed - buffer is equal to the total size of the compressed buffer and not - zero. For Visual Basic, check that this parameter is passed by reference + Make sure that before the call of compress(), the length of the compressed + buffer is equal to the available size of the compressed buffer and not + zero. For Visual Basic, check that this parameter is passed by reference ("as any"), not by value ("as long"). 5. deflate() or inflate() returns Z_BUF_ERROR. - Before making the call, make sure that avail_in and avail_out are not - zero. When setting the parameter flush equal to Z_FINISH, also make sure - that avail_out is big enough to allow processing all pending input. - Note that a Z_BUF_ERROR is not fatal--another call to deflate() or - inflate() can be made with more input or output space. A Z_BUF_ERROR - may in fact be unavoidable depending on how the functions are used, since - it is not possible to tell whether or not there is more output pending - when strm.avail_out returns with zero. + Before making the call, make sure that avail_in and avail_out are not zero. + When setting the parameter flush equal to Z_FINISH, also make sure that + avail_out is big enough to allow processing all pending input. Note that a + Z_BUF_ERROR is not fatal--another call to deflate() or inflate() can be + made with more input or output space. A Z_BUF_ERROR may in fact be + unavoidable depending on how the functions are used, since it is not + possible to tell whether or not there is more output pending when + strm.avail_out returns with zero. See http://zlib.net/zlib_how.html for a + heavily annotated example. 6. Where's the zlib documentation (man pages, etc.)? - It's in zlib.h for the moment, and Francis S. Lin has converted it to a - web page zlib.html. Volunteers to transform this to Unix-style man pages, - please contact us (zlib at gzip.org). Examples of zlib usage are in the files - example.c and minigzip.c. + It's in zlib.h . Examples of zlib usage are in the files test/example.c + and test/minigzip.c, with more in examples/ . 7. Why don't you use GNU autoconf or libtool or ...? - Because we would like to keep zlib as a very small and simple - package. zlib is rather portable and doesn't need much configuration. + Because we would like to keep zlib as a very small and simple package. + zlib is rather portable and doesn't need much configuration. 8. I found a bug in zlib. - Most of the time, such problems are due to an incorrect usage of - zlib. Please try to reproduce the problem with a small program and send - the corresponding source to us at zlib at gzip.org . Do not send - multi-megabyte data files without prior agreement. + Most of the time, such problems are due to an incorrect usage of zlib. + Please try to reproduce the problem with a small program and send the + corresponding source to us at zlib at gzip.org . Do not send multi-megabyte + data files without prior agreement. 9. Why do I get "undefined reference to gzputc"? @@ -82,13 +79,15 @@ 12. Can zlib handle .Z files? - No, sorry. You have to spawn an uncompress or gunzip subprocess, or adapt + No, sorry. You have to spawn an uncompress or gunzip subprocess, or adapt the code of uncompress on your own. 13. How can I make a Unix shared library? - make clean - ./configure -s + By default a shared (and a static) library is built for Unix. So: + + make distclean + ./configure make 14. How do I install a shared zlib library on Unix? @@ -99,8 +98,10 @@ However, many flavors of Unix come with a shared zlib already installed. Before going to the trouble of compiling a shared version of zlib and - trying to install it, you may want to check if it's already there! If you - can #include , it's there. The -lz option will probably link to it. + trying to install it, you may want to check if it's already there! If you + can #include , it's there. The -lz option will probably link to + it. You can check the version at the top of zlib.h or with the + ZLIB_VERSION symbol defined in zlib.h . 15. I have a question about OttoPDF. @@ -109,8 +110,8 @@ 16. Can zlib decode Flate data in an Adobe PDF file? - Yes. See http://www.fastio.com/ (ClibPDF), or http://www.pdflib.com/ . - To modify PDF forms, see http://sourceforge.net/projects/acroformtool/ . + Yes. See http://www.pdflib.com/ . To modify PDF forms, see + http://sourceforge.net/projects/acroformtool/ . 17. Why am I getting this "register_frame_info not found" error on Solaris? @@ -121,67 +122,67 @@ symbol __register_frame_info: referenced symbol not found The symbol __register_frame_info is not part of zlib, it is generated by - the C compiler (cc or gcc). You must recompile applications using zlib - which have this problem. This problem is specific to Solaris. See + the C compiler (cc or gcc). You must recompile applications using zlib + which have this problem. This problem is specific to Solaris. See http://www.sunfreeware.com for Solaris versions of zlib and applications using zlib. 18. Why does gzip give an error on a file I make with compress/deflate? The compress and deflate functions produce data in the zlib format, which - is different and incompatible with the gzip format. The gz* functions in - zlib on the other hand use the gzip format. Both the zlib and gzip - formats use the same compressed data format internally, but have different - headers and trailers around the compressed data. + is different and incompatible with the gzip format. The gz* functions in + zlib on the other hand use the gzip format. Both the zlib and gzip formats + use the same compressed data format internally, but have different headers + and trailers around the compressed data. 19. Ok, so why are there two different formats? - The gzip format was designed to retain the directory information about - a single file, such as the name and last modification date. The zlib - format on the other hand was designed for in-memory and communication - channel applications, and has a much more compact header and trailer and - uses a faster integrity check than gzip. + The gzip format was designed to retain the directory information about a + single file, such as the name and last modification date. The zlib format + on the other hand was designed for in-memory and communication channel + applications, and has a much more compact header and trailer and uses a + faster integrity check than gzip. 20. Well that's nice, but how do I make a gzip file in memory? You can request that deflate write the gzip format instead of the zlib - format using deflateInit2(). You can also request that inflate decode - the gzip format using inflateInit2(). Read zlib.h for more details. + format using deflateInit2(). You can also request that inflate decode the + gzip format using inflateInit2(). Read zlib.h for more details. 21. Is zlib thread-safe? - Yes. However any library routines that zlib uses and any application- - provided memory allocation routines must also be thread-safe. zlib's gz* + Yes. However any library routines that zlib uses and any application- + provided memory allocation routines must also be thread-safe. zlib's gz* functions use stdio library routines, and most of zlib's functions use the - library memory allocation routines by default. zlib's Init functions allow - for the application to provide custom memory allocation routines. + library memory allocation routines by default. zlib's *Init* functions + allow for the application to provide custom memory allocation routines. Of course, you should only operate on any given zlib or gzip stream from a single thread at a time. 22. Can I use zlib in my commercial application? - Yes. Please read the license in zlib.h. + Yes. Please read the license in zlib.h. 23. Is zlib under the GNU license? - No. Please read the license in zlib.h. + No. Please read the license in zlib.h. 24. The license says that altered source versions must be "plainly marked". So what exactly do I need to do to meet that requirement? - You need to change the ZLIB_VERSION and ZLIB_VERNUM #defines in zlib.h. In + You need to change the ZLIB_VERSION and ZLIB_VERNUM #defines in zlib.h. In particular, the final version number needs to be changed to "f", and an - identification string should be appended to ZLIB_VERSION. Version numbers + identification string should be appended to ZLIB_VERSION. Version numbers x.x.x.f are reserved for modifications to zlib by others than the zlib - maintainers. For example, if the version of the base zlib you are altering + maintainers. For example, if the version of the base zlib you are altering is "1.2.3.4", then in zlib.h you should change ZLIB_VERNUM to 0x123f, and - ZLIB_VERSION to something like "1.2.3.f-zachary-mods-v3". You can also + ZLIB_VERSION to something like "1.2.3.f-zachary-mods-v3". You can also update the version strings in deflate.c and inftrees.c. For altered source distributions, you should also note the origin and nature of the changes in zlib.h, as well as in ChangeLog and README, along - with the dates of the alterations. The origin should include at least your + with the dates of the alterations. The origin should include at least your name (or your company's name), and an email address to contact for help or issues with the library. @@ -197,105 +198,112 @@ 26. Will zlib work on a 64-bit machine? - It should. It has been tested on 64-bit machines, and has no dependence - on any data types being limited to 32-bits in length. If you have any + Yes. It has been tested on 64-bit machines, and has no dependence on any + data types being limited to 32-bits in length. If you have any difficulties, please provide a complete problem report to zlib at gzip.org 27. Will zlib decompress data from the PKWare Data Compression Library? - No. The PKWare DCL uses a completely different compressed data format - than does PKZIP and zlib. However, you can look in zlib's contrib/blast + No. The PKWare DCL uses a completely different compressed data format than + does PKZIP and zlib. However, you can look in zlib's contrib/blast directory for a possible solution to your problem. 28. Can I access data randomly in a compressed stream? - No, not without some preparation. If when compressing you periodically - use Z_FULL_FLUSH, carefully write all the pending data at those points, - and keep an index of those locations, then you can start decompression - at those points. You have to be careful to not use Z_FULL_FLUSH too - often, since it can significantly degrade compression. + No, not without some preparation. If when compressing you periodically use + Z_FULL_FLUSH, carefully write all the pending data at those points, and + keep an index of those locations, then you can start decompression at those + points. You have to be careful to not use Z_FULL_FLUSH too often, since it + can significantly degrade compression. Alternatively, you can scan a + deflate stream once to generate an index, and then use that index for + random access. See examples/zran.c . 29. Does zlib work on MVS, OS/390, CICS, etc.? - We don't know for sure. We have heard occasional reports of success on - these systems. If you do use it on one of these, please provide us with - a report, instructions, and patches that we can reference when we get - these questions. Thanks. + It has in the past, but we have not heard of any recent evidence. There + were working ports of zlib 1.1.4 to MVS, but those links no longer work. + If you know of recent, successful applications of zlib on these operating + systems, please let us know. Thanks. -30. Is there some simpler, easier to read version of inflate I can look at - to understand the deflate format? +30. Is there some simpler, easier to read version of inflate I can look at to + understand the deflate format? - First off, you should read RFC 1951. Second, yes. Look in zlib's + First off, you should read RFC 1951. Second, yes. Look in zlib's contrib/puff directory. 31. Does zlib infringe on any patents? - As far as we know, no. In fact, that was originally the whole point behind - zlib. Look here for some more information: + As far as we know, no. In fact, that was originally the whole point behind + zlib. Look here for some more information: http://www.gzip.org/#faq11 32. Can zlib work with greater than 4 GB of data? - Yes. inflate() and deflate() will process any amount of data correctly. + Yes. inflate() and deflate() will process any amount of data correctly. Each call of inflate() or deflate() is limited to input and output chunks of the maximum value that can be stored in the compiler's "unsigned int" - type, but there is no limit to the number of chunks. Note however that the - strm.total_in and strm_total_out counters may be limited to 4 GB. These + type, but there is no limit to the number of chunks. Note however that the + strm.total_in and strm_total_out counters may be limited to 4 GB. These counters are provided as a convenience and are not used internally by - inflate() or deflate(). The application can easily set up its own counters + inflate() or deflate(). The application can easily set up its own counters updated after each call of inflate() or deflate() to count beyond 4 GB. compress() and uncompress() may be limited to 4 GB, since they operate in a - single call. gzseek() and gztell() may be limited to 4 GB depending on how - zlib is compiled. See the zlibCompileFlags() function in zlib.h. + single call. gzseek() and gztell() may be limited to 4 GB depending on how + zlib is compiled. See the zlibCompileFlags() function in zlib.h. - The word "may" appears several times above since there is a 4 GB limit - only if the compiler's "long" type is 32 bits. If the compiler's "long" - type is 64 bits, then the limit is 16 exabytes. + The word "may" appears several times above since there is a 4 GB limit only + if the compiler's "long" type is 32 bits. If the compiler's "long" type is + 64 bits, then the limit is 16 exabytes. 33. Does zlib have any security vulnerabilities? - The only one that we are aware of is potentially in gzprintf(). If zlib - is compiled to use sprintf() or vsprintf(), then there is no protection - against a buffer overflow of a 4K string space, other than the caller of - gzprintf() assuring that the output will not exceed 4K. On the other - hand, if zlib is compiled to use snprintf() or vsnprintf(), which should - normally be the case, then there is no vulnerability. The ./configure - script will display warnings if an insecure variation of sprintf() will - be used by gzprintf(). Also the zlibCompileFlags() function will return - information on what variant of sprintf() is used by gzprintf(). + The only one that we are aware of is potentially in gzprintf(). If zlib is + compiled to use sprintf() or vsprintf(), then there is no protection + against a buffer overflow of an 8K string space (or other value as set by + gzbuffer()), other than the caller of gzprintf() assuring that the output + will not exceed 8K. On the other hand, if zlib is compiled to use + snprintf() or vsnprintf(), which should normally be the case, then there is + no vulnerability. The ./configure script will display warnings if an + insecure variation of sprintf() will be used by gzprintf(). Also the + zlibCompileFlags() function will return information on what variant of + sprintf() is used by gzprintf(). If you don't have snprintf() or vsnprintf() and would like one, you can find a portable implementation here: http://www.ijs.si/software/snprintf/ - Note that you should be using the most recent version of zlib. Versions - 1.1.3 and before were subject to a double-free vulnerability. + Note that you should be using the most recent version of zlib. Versions + 1.1.3 and before were subject to a double-free vulnerability, and versions + 1.2.1 and 1.2.2 were subject to an access exception when decompressing + invalid compressed data. 34. Is there a Java version of zlib? Probably what you want is to use zlib in Java. zlib is already included as part of the Java SDK in the java.util.zip package. If you really want a version of zlib written in the Java language, look on the zlib home - page for links: http://www.zlib.org/ + page for links: http://zlib.net/ . 35. I get this or that compiler or source-code scanner warning when I crank it up to maximally-pedantic. Can't you guys write proper code? Many years ago, we gave up attempting to avoid warnings on every compiler - in the universe. It just got to be a waste of time, and some compilers - were downright silly. So now, we simply make sure that the code always - works. + in the universe. It just got to be a waste of time, and some compilers + were downright silly as well as contradicted each other. So now, we simply + make sure that the code always works. 36. Valgrind (or some similar memory access checker) says that deflate is performing a conditional jump that depends on an uninitialized value. Isn't that a bug? - No. That is intentional for performance reasons, and the output of - deflate is not affected. This only started showing up recently since - zlib 1.2.x uses malloc() by default for allocations, whereas earlier - versions used calloc(), which zeros out the allocated memory. + No. That is intentional for performance reasons, and the output of deflate + is not affected. This only started showing up recently since zlib 1.2.x + uses malloc() by default for allocations, whereas earlier versions used + calloc(), which zeros out the allocated memory. Even though the code was + correct, versions 1.2.4 and later was changed to not stimulate these + checkers. 37. Will zlib read the (insert any ancient or arcane format here) compressed data format? @@ -305,20 +313,21 @@ 38. How can I encrypt/decrypt zip files with zlib? - zlib doesn't support encryption. The original PKZIP encryption is very weak - and can be broken with freely available programs. To get strong encryption, - use GnuPG, http://www.gnupg.org/ , which already includes zlib compression. - For PKZIP compatible "encryption", look at http://www.info-zip.org/ + zlib doesn't support encryption. The original PKZIP encryption is very + weak and can be broken with freely available programs. To get strong + encryption, use GnuPG, http://www.gnupg.org/ , which already includes zlib + compression. For PKZIP compatible "encryption", look at + http://www.info-zip.org/ 39. What's the difference between the "gzip" and "deflate" HTTP 1.1 encodings? - "gzip" is the gzip format, and "deflate" is the zlib format. They should - probably have called the second one "zlib" instead to avoid confusion - with the raw deflate compressed data format. While the HTTP 1.1 RFC 2616 + "gzip" is the gzip format, and "deflate" is the zlib format. They should + probably have called the second one "zlib" instead to avoid confusion with + the raw deflate compressed data format. While the HTTP 1.1 RFC 2616 correctly points to the zlib specification in RFC 1950 for the "deflate" transfer encoding, there have been reports of servers and browsers that incorrectly produce or expect raw deflate data per the deflate - specficiation in RFC 1951, most notably Microsoft. So even though the + specification in RFC 1951, most notably Microsoft. So even though the "deflate" transfer encoding using the zlib format would be the more efficient approach (and in fact exactly what the zlib format was designed for), using the "gzip" transfer encoding is probably more reliable due to @@ -328,12 +337,32 @@ 40. Does zlib support the new "Deflate64" format introduced by PKWare? - No. PKWare has apparently decided to keep that format proprietary, since - they have not documented it as they have previous compression formats. - In any case, the compression improvements are so modest compared to other - more modern approaches, that it's not worth the effort to implement. + No. PKWare has apparently decided to keep that format proprietary, since + they have not documented it as they have previous compression formats. In + any case, the compression improvements are so modest compared to other more + modern approaches, that it's not worth the effort to implement. -41. Can you please sign these lengthy legal documents and fax them back to us +41. I'm having a problem with the zip functions in zlib, can you help? + + There are no zip functions in zlib. You are probably using minizip by + Giles Vollant, which is found in the contrib directory of zlib. It is not + part of zlib. In fact none of the stuff in contrib is part of zlib. The + files in there are not supported by the zlib authors. You need to contact + the authors of the respective contribution for help. + +42. The match.asm code in contrib is under the GNU General Public License. + Since it's part of zlib, doesn't that mean that all of zlib falls under the + GNU GPL? + + No. The files in contrib are not part of zlib. They were contributed by + other authors and are provided as a convenience to the user within the zlib + distribution. Each item in contrib has its own license. + +43. Is zlib subject to export controls? What is its ECCN? + + zlib is not subject to export controls, and so is classified as EAR99. + +44. Can you please sign these lengthy legal documents and fax them back to us so that we can use your software in our product? No. Go away. Shoo. diff --git a/Modules/zlib/INDEX b/Modules/zlib/INDEX --- a/Modules/zlib/INDEX +++ b/Modules/zlib/INDEX @@ -1,23 +1,37 @@ +CMakeLists.txt cmake build file ChangeLog history of changes FAQ Frequently Asked Questions about zlib INDEX this file -Makefile makefile for Unix (generated by configure) -Makefile.in makefile for Unix (template for configure) +Makefile dummy Makefile that tells you to ./configure +Makefile.in template for Unix Makefile README guess what -algorithm.txt description of the (de)compression algorithm configure configure script for Unix -zconf.in.h template for zconf.h (used by configure) +make_vms.com makefile for VMS +test/example.c zlib usages examples for build testing +test/minigzip.c minimal gzip-like functionality for build testing +test/infcover.c inf*.c code coverage for build coverage testing +treebuild.xml XML description of source file dependencies +zconf.h.cmakein zconf.h template for cmake +zconf.h.in zconf.h template for configure +zlib.3 Man page for zlib +zlib.3.pdf Man page in PDF format +zlib.map Linux symbol information +zlib.pc.in Template for pkg-config descriptor +zlib.pc.cmakein zlib.pc template for cmake +zlib2ansi perl script to convert source files for C++ compilation amiga/ makefiles for Amiga SAS C -as400/ makefiles for IBM AS/400 +as400/ makefiles for AS/400 +doc/ documentation for formats and algorithms msdos/ makefiles for MSDOS +nintendods/ makefile for Nintendo DS old/ makefiles for various architectures and zlib documentation files that have not yet been updated for zlib 1.2.x -projects/ projects for various Integrated Development Environments qnx/ makefiles for QNX +watcom/ makefiles for OpenWatcom win32/ makefiles for Windows - zlib public header files (must be kept): + zlib public header files (required for library use): zconf.h zlib.h @@ -28,7 +42,11 @@ crc32.h deflate.c deflate.h -gzio.c +gzclose.c +gzguts.h +gzlib.c +gzread.c +gzwrite.c infback.c inffast.c inffast.h @@ -43,9 +61,8 @@ zutil.c zutil.h - source files for sample programs: -example.c -minigzip.c + source files for sample programs +See examples/README.examples - unsupported contribution by third parties + unsupported contributions by third parties See contrib/README.contrib diff --git a/Modules/zlib/Makefile b/Modules/zlib/Makefile --- a/Modules/zlib/Makefile +++ b/Modules/zlib/Makefile @@ -1,154 +1,5 @@ -# Makefile for zlib -# Copyright (C) 1995-2005 Jean-loup Gailly. -# For conditions of distribution and use, see copyright notice in zlib.h +all: + - at echo "Please use ./configure first. Thank you." -# To compile and test, type: -# ./configure; make test -# The call of configure is optional if you don't have special requirements -# If you wish to build zlib as a shared library, use: ./configure -s - -# To use the asm code, type: -# cp contrib/asm?86/match.S ./match.S -# make LOC=-DASMV OBJA=match.o - -# To install /usr/local/lib/libz.* and /usr/local/include/zlib.h, type: -# make install -# To install in $HOME instead of /usr/local, use: -# make install prefix=$HOME - -CC=cc - -CFLAGS=-O -#CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7 -#CFLAGS=-g -DDEBUG -#CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ -# -Wstrict-prototypes -Wmissing-prototypes - -LDFLAGS=libz.a -LDSHARED=$(CC) -CPP=$(CC) -E - -LIBS=libz.a -SHAREDLIB=libz.so -SHAREDLIBV=libz.so.1.2.3 -SHAREDLIBM=libz.so.1 - -AR=ar rc -RANLIB=ranlib -TAR=tar -SHELL=/bin/sh -EXE= - -prefix = /usr/local -exec_prefix = ${prefix} -libdir = ${exec_prefix}/lib -includedir = ${prefix}/include -mandir = ${prefix}/share/man -man3dir = ${mandir}/man3 - -OBJS = adler32.o compress.o crc32.o gzio.o uncompr.o deflate.o trees.o \ - zutil.o inflate.o infback.o inftrees.o inffast.o - -OBJA = -# to use the asm code: make OBJA=match.o - -TEST_OBJS = example.o minigzip.o - -all: example$(EXE) minigzip$(EXE) - -check: test -test: all - @LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ - echo hello world | ./minigzip | ./minigzip -d || \ - echo ' *** minigzip test FAILED ***' ; \ - if ./example; then \ - echo ' *** zlib test OK ***'; \ - else \ - echo ' *** zlib test FAILED ***'; \ - fi - -libz.a: $(OBJS) $(OBJA) - $(AR) $@ $(OBJS) $(OBJA) - -@ ($(RANLIB) $@ || true) >/dev/null 2>&1 - -match.o: match.S - $(CPP) match.S > _match.s - $(CC) -c _match.s - mv _match.o match.o - rm -f _match.s - -$(SHAREDLIBV): $(OBJS) - $(LDSHARED) -o $@ $(OBJS) - rm -f $(SHAREDLIB) $(SHAREDLIBM) - ln -s $@ $(SHAREDLIB) - ln -s $@ $(SHAREDLIBM) - -example$(EXE): example.o $(LIBS) - $(CC) $(CFLAGS) -o $@ example.o $(LDFLAGS) - -minigzip$(EXE): minigzip.o $(LIBS) - $(CC) $(CFLAGS) -o $@ minigzip.o $(LDFLAGS) - -install: $(LIBS) - - at if [ ! -d $(exec_prefix) ]; then mkdir -p $(exec_prefix); fi - - at if [ ! -d $(includedir) ]; then mkdir -p $(includedir); fi - - at if [ ! -d $(libdir) ]; then mkdir -p $(libdir); fi - - at if [ ! -d $(man3dir) ]; then mkdir -p $(man3dir); fi - cp zlib.h zconf.h $(includedir) - chmod 644 $(includedir)/zlib.h $(includedir)/zconf.h - cp $(LIBS) $(libdir) - cd $(libdir); chmod 755 $(LIBS) - -@(cd $(libdir); $(RANLIB) libz.a || true) >/dev/null 2>&1 - cd $(libdir); if test -f $(SHAREDLIBV); then \ - rm -f $(SHAREDLIB) $(SHAREDLIBM); \ - ln -s $(SHAREDLIBV) $(SHAREDLIB); \ - ln -s $(SHAREDLIBV) $(SHAREDLIBM); \ - (ldconfig || true) >/dev/null 2>&1; \ - fi - cp zlib.3 $(man3dir) - chmod 644 $(man3dir)/zlib.3 -# The ranlib in install is needed on NeXTSTEP which checks file times -# ldconfig is for Linux - -uninstall: - cd $(includedir); \ - cd $(libdir); rm -f libz.a; \ - if test -f $(SHAREDLIBV); then \ - rm -f $(SHAREDLIBV) $(SHAREDLIB) $(SHAREDLIBM); \ - fi - cd $(man3dir); rm -f zlib.3 - -mostlyclean: clean -clean: - rm -f *.o *~ example$(EXE) minigzip$(EXE) \ - libz.* foo.gz so_locations \ - _match.s maketree contrib/infback9/*.o - -maintainer-clean: distclean -distclean: clean - cp -p Makefile.in Makefile - cp -p zconf.in.h zconf.h - rm -f .DS_Store - -tags: - etags *.[ch] - -depend: - makedepend -- $(CFLAGS) -- *.[ch] - -# DO NOT DELETE THIS LINE -- make depend depends on it. - -adler32.o: zlib.h zconf.h -compress.o: zlib.h zconf.h -crc32.o: crc32.h zlib.h zconf.h -deflate.o: deflate.h zutil.h zlib.h zconf.h -example.o: zlib.h zconf.h -gzio.o: zutil.h zlib.h zconf.h -inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h -inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h -infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h -inftrees.o: zutil.h zlib.h zconf.h inftrees.h -minigzip.o: zlib.h zconf.h -trees.o: deflate.h zutil.h zlib.h zconf.h trees.h -uncompr.o: zlib.h zconf.h -zutil.o: zutil.h zlib.h zconf.h +distclean: + make -f Makefile.in distclean diff --git a/Modules/zlib/Makefile.in b/Modules/zlib/Makefile.in --- a/Modules/zlib/Makefile.in +++ b/Modules/zlib/Makefile.in @@ -1,11 +1,11 @@ # Makefile for zlib -# Copyright (C) 1995-2005 Jean-loup Gailly. +# Copyright (C) 1995-2013 Jean-loup Gailly, Mark Adler # For conditions of distribution and use, see copyright notice in zlib.h # To compile and test, type: # ./configure; make test -# The call of configure is optional if you don't have special requirements -# If you wish to build zlib as a shared library, use: ./configure -s +# Normally configure builds both a static and a shared library. +# If you want to build just a static library, use: ./configure --static # To use the asm code, type: # cp contrib/asm?86/match.S ./match.S @@ -24,17 +24,23 @@ #CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \ # -Wstrict-prototypes -Wmissing-prototypes -LDFLAGS=libz.a +SFLAGS=-O +LDFLAGS= +TEST_LDFLAGS=-L. libz.a LDSHARED=$(CC) CPP=$(CC) -E -LIBS=libz.a +STATICLIB=libz.a SHAREDLIB=libz.so -SHAREDLIBV=libz.so.1.2.3 +SHAREDLIBV=libz.so.1.2.8 SHAREDLIBM=libz.so.1 +LIBS=$(STATICLIB) $(SHAREDLIBV) -AR=ar rc +AR=ar +ARFLAGS=rc RANLIB=ranlib +LDCONFIG=ldconfig +LDSHAREDLIBC=-lc TAR=tar SHELL=/bin/sh EXE= @@ -42,33 +48,84 @@ prefix = /usr/local exec_prefix = ${prefix} libdir = ${exec_prefix}/lib +sharedlibdir = ${libdir} includedir = ${prefix}/include mandir = ${prefix}/share/man man3dir = ${mandir}/man3 +pkgconfigdir = ${libdir}/pkgconfig -OBJS = adler32.o compress.o crc32.o gzio.o uncompr.o deflate.o trees.o \ - zutil.o inflate.o infback.o inftrees.o inffast.o +OBJZ = adler32.o crc32.o deflate.o infback.o inffast.o inflate.o inftrees.o trees.o zutil.o +OBJG = compress.o uncompr.o gzclose.o gzlib.o gzread.o gzwrite.o +OBJC = $(OBJZ) $(OBJG) +PIC_OBJZ = adler32.lo crc32.lo deflate.lo infback.lo inffast.lo inflate.lo inftrees.lo trees.lo zutil.lo +PIC_OBJG = compress.lo uncompr.lo gzclose.lo gzlib.lo gzread.lo gzwrite.lo +PIC_OBJC = $(PIC_OBJZ) $(PIC_OBJG) + +# to use the asm code: make OBJA=match.o, PIC_OBJA=match.lo OBJA = -# to use the asm code: make OBJA=match.o +PIC_OBJA = -TEST_OBJS = example.o minigzip.o +OBJS = $(OBJC) $(OBJA) -all: example$(EXE) minigzip$(EXE) +PIC_OBJS = $(PIC_OBJC) $(PIC_OBJA) + +all: static shared + +static: example$(EXE) minigzip$(EXE) + +shared: examplesh$(EXE) minigzipsh$(EXE) + +all64: example64$(EXE) minigzip64$(EXE) check: test -test: all - @LD_LIBRARY_PATH=.:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ - echo hello world | ./minigzip | ./minigzip -d || \ - echo ' *** minigzip test FAILED ***' ; \ - if ./example; then \ + +test: all teststatic testshared + +teststatic: static + @TMPST=tmpst_$$; \ + if echo hello world | ./minigzip | ./minigzip -d && ./example $$TMPST ; then \ echo ' *** zlib test OK ***'; \ else \ - echo ' *** zlib test FAILED ***'; \ - fi + echo ' *** zlib test FAILED ***'; false; \ + fi; \ + rm -f $$TMPST -libz.a: $(OBJS) $(OBJA) - $(AR) $@ $(OBJS) $(OBJA) +testshared: shared + @LD_LIBRARY_PATH=`pwd`:$(LD_LIBRARY_PATH) ; export LD_LIBRARY_PATH; \ + LD_LIBRARYN32_PATH=`pwd`:$(LD_LIBRARYN32_PATH) ; export LD_LIBRARYN32_PATH; \ + DYLD_LIBRARY_PATH=`pwd`:$(DYLD_LIBRARY_PATH) ; export DYLD_LIBRARY_PATH; \ + SHLIB_PATH=`pwd`:$(SHLIB_PATH) ; export SHLIB_PATH; \ + TMPSH=tmpsh_$$; \ + if echo hello world | ./minigzipsh | ./minigzipsh -d && ./examplesh $$TMPSH; then \ + echo ' *** zlib shared test OK ***'; \ + else \ + echo ' *** zlib shared test FAILED ***'; false; \ + fi; \ + rm -f $$TMPSH + +test64: all64 + @TMP64=tmp64_$$; \ + if echo hello world | ./minigzip64 | ./minigzip64 -d && ./example64 $$TMP64; then \ + echo ' *** zlib 64-bit test OK ***'; \ + else \ + echo ' *** zlib 64-bit test FAILED ***'; false; \ + fi; \ + rm -f $$TMP64 + +infcover.o: test/infcover.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/infcover.c + +infcover: infcover.o libz.a + $(CC) $(CFLAGS) -o $@ infcover.o libz.a + +cover: infcover + rm -f *.gcda + ./infcover + gcov inf*.c + +libz.a: $(OBJS) + $(AR) $(ARFLAGS) $@ $(OBJS) -@ ($(RANLIB) $@ || true) >/dev/null 2>&1 match.o: match.S @@ -77,58 +134,130 @@ mv _match.o match.o rm -f _match.s -$(SHAREDLIBV): $(OBJS) - $(LDSHARED) -o $@ $(OBJS) +match.lo: match.S + $(CPP) match.S > _match.s + $(CC) -c -fPIC _match.s + mv _match.o match.lo + rm -f _match.s + +example.o: test/example.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/example.c + +minigzip.o: test/minigzip.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -c -o $@ test/minigzip.c + +example64.o: test/example.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -D_FILE_OFFSET_BITS=64 -c -o $@ test/example.c + +minigzip64.o: test/minigzip.c zlib.h zconf.h + $(CC) $(CFLAGS) -I. -D_FILE_OFFSET_BITS=64 -c -o $@ test/minigzip.c + +.SUFFIXES: .lo + +.c.lo: + - at mkdir objs 2>/dev/null || test -d objs + $(CC) $(SFLAGS) -DPIC -c -o objs/$*.o $< + - at mv objs/$*.o $@ + +placebo $(SHAREDLIBV): $(PIC_OBJS) libz.a + $(LDSHARED) $(SFLAGS) -o $@ $(PIC_OBJS) $(LDSHAREDLIBC) $(LDFLAGS) rm -f $(SHAREDLIB) $(SHAREDLIBM) ln -s $@ $(SHAREDLIB) ln -s $@ $(SHAREDLIBM) + - at rmdir objs -example$(EXE): example.o $(LIBS) - $(CC) $(CFLAGS) -o $@ example.o $(LDFLAGS) +example$(EXE): example.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example.o $(TEST_LDFLAGS) -minigzip$(EXE): minigzip.o $(LIBS) - $(CC) $(CFLAGS) -o $@ minigzip.o $(LDFLAGS) +minigzip$(EXE): minigzip.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip.o $(TEST_LDFLAGS) -install: $(LIBS) - - at if [ ! -d $(exec_prefix) ]; then mkdir -p $(exec_prefix); fi - - at if [ ! -d $(includedir) ]; then mkdir -p $(includedir); fi - - at if [ ! -d $(libdir) ]; then mkdir -p $(libdir); fi - - at if [ ! -d $(man3dir) ]; then mkdir -p $(man3dir); fi - cp zlib.h zconf.h $(includedir) - chmod 644 $(includedir)/zlib.h $(includedir)/zconf.h - cp $(LIBS) $(libdir) - cd $(libdir); chmod 755 $(LIBS) - -@(cd $(libdir); $(RANLIB) libz.a || true) >/dev/null 2>&1 - cd $(libdir); if test -f $(SHAREDLIBV); then \ - rm -f $(SHAREDLIB) $(SHAREDLIBM); \ - ln -s $(SHAREDLIBV) $(SHAREDLIB); \ - ln -s $(SHAREDLIBV) $(SHAREDLIBM); \ - (ldconfig || true) >/dev/null 2>&1; \ +examplesh$(EXE): example.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ example.o -L. $(SHAREDLIBV) + +minigzipsh$(EXE): minigzip.o $(SHAREDLIBV) + $(CC) $(CFLAGS) -o $@ minigzip.o -L. $(SHAREDLIBV) + +example64$(EXE): example64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ example64.o $(TEST_LDFLAGS) + +minigzip64$(EXE): minigzip64.o $(STATICLIB) + $(CC) $(CFLAGS) -o $@ minigzip64.o $(TEST_LDFLAGS) + +install-libs: $(LIBS) + - at if [ ! -d $(DESTDIR)$(exec_prefix) ]; then mkdir -p $(DESTDIR)$(exec_prefix); fi + - at if [ ! -d $(DESTDIR)$(libdir) ]; then mkdir -p $(DESTDIR)$(libdir); fi + - at if [ ! -d $(DESTDIR)$(sharedlibdir) ]; then mkdir -p $(DESTDIR)$(sharedlibdir); fi + - at if [ ! -d $(DESTDIR)$(man3dir) ]; then mkdir -p $(DESTDIR)$(man3dir); fi + - at if [ ! -d $(DESTDIR)$(pkgconfigdir) ]; then mkdir -p $(DESTDIR)$(pkgconfigdir); fi + cp $(STATICLIB) $(DESTDIR)$(libdir) + chmod 644 $(DESTDIR)$(libdir)/$(STATICLIB) + -@($(RANLIB) $(DESTDIR)$(libdir)/libz.a || true) >/dev/null 2>&1 + - at if test -n "$(SHAREDLIBV)"; then \ + cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir); \ + echo "cp $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)"; \ + chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV); \ + echo "chmod 755 $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBV)"; \ + rm -f $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIB); \ + ln -s $(SHAREDLIBV) $(DESTDIR)$(sharedlibdir)/$(SHAREDLIBM); \ + ($(LDCONFIG) || true) >/dev/null 2>&1; \ fi - cp zlib.3 $(man3dir) - chmod 644 $(man3dir)/zlib.3 + cp zlib.3 $(DESTDIR)$(man3dir) + chmod 644 $(DESTDIR)$(man3dir)/zlib.3 + cp zlib.pc $(DESTDIR)$(pkgconfigdir) + chmod 644 $(DESTDIR)$(pkgconfigdir)/zlib.pc # The ranlib in install is needed on NeXTSTEP which checks file times # ldconfig is for Linux +install: install-libs + - at if [ ! -d $(DESTDIR)$(includedir) ]; then mkdir -p $(DESTDIR)$(includedir); fi + cp zlib.h zconf.h $(DESTDIR)$(includedir) + chmod 644 $(DESTDIR)$(includedir)/zlib.h $(DESTDIR)$(includedir)/zconf.h + uninstall: - cd $(includedir); \ - cd $(libdir); rm -f libz.a; \ - if test -f $(SHAREDLIBV); then \ + cd $(DESTDIR)$(includedir) && rm -f zlib.h zconf.h + cd $(DESTDIR)$(libdir) && rm -f libz.a; \ + if test -n "$(SHAREDLIBV)" -a -f $(SHAREDLIBV); then \ rm -f $(SHAREDLIBV) $(SHAREDLIB) $(SHAREDLIBM); \ fi - cd $(man3dir); rm -f zlib.3 + cd $(DESTDIR)$(man3dir) && rm -f zlib.3 + cd $(DESTDIR)$(pkgconfigdir) && rm -f zlib.pc + +docs: zlib.3.pdf + +zlib.3.pdf: zlib.3 + groff -mandoc -f H -T ps zlib.3 | ps2pdf - zlib.3.pdf + +zconf.h.cmakein: zconf.h.in + -@ TEMPFILE=zconfh_$$; \ + echo "/#define ZCONF_H/ a\\\\\n#cmakedefine Z_PREFIX\\\\\n#cmakedefine Z_HAVE_UNISTD_H\n" >> $$TEMPFILE &&\ + sed -f $$TEMPFILE zconf.h.in > zconf.h.cmakein &&\ + touch -r zconf.h.in zconf.h.cmakein &&\ + rm $$TEMPFILE + +zconf: zconf.h.in + cp -p zconf.h.in zconf.h mostlyclean: clean clean: - rm -f *.o *~ example$(EXE) minigzip$(EXE) \ + rm -f *.o *.lo *~ \ + example$(EXE) minigzip$(EXE) examplesh$(EXE) minigzipsh$(EXE) \ + example64$(EXE) minigzip64$(EXE) \ + infcover \ libz.* foo.gz so_locations \ _match.s maketree contrib/infback9/*.o + rm -rf objs + rm -f *.gcda *.gcno *.gcov + rm -f contrib/infback9/*.gcda contrib/infback9/*.gcno contrib/infback9/*.gcov maintainer-clean: distclean -distclean: clean - cp -p Makefile.in Makefile - cp -p zconf.in.h zconf.h - rm -f .DS_Store +distclean: clean zconf zconf.h.cmakein docs + rm -f Makefile zlib.pc configure.log + - at rm -f .DS_Store + - at printf 'all:\n\t- at echo "Please use ./configure first. Thank you."\n' > Makefile + - at printf '\ndistclean:\n\tmake -f Makefile.in distclean\n' >> Makefile + - at touch -r Makefile.in Makefile tags: etags *.[ch] @@ -138,17 +267,22 @@ # DO NOT DELETE THIS LINE -- make depend depends on it. -adler32.o: zlib.h zconf.h -compress.o: zlib.h zconf.h -crc32.o: crc32.h zlib.h zconf.h +adler32.o zutil.o: zutil.h zlib.h zconf.h +gzclose.o gzlib.o gzread.o gzwrite.o: zlib.h zconf.h gzguts.h +compress.o example.o minigzip.o uncompr.o: zlib.h zconf.h +crc32.o: zutil.h zlib.h zconf.h crc32.h deflate.o: deflate.h zutil.h zlib.h zconf.h -example.o: zlib.h zconf.h -gzio.o: zutil.h zlib.h zconf.h +infback.o inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h inffixed.h inffast.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h -inflate.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h -infback.o: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h inftrees.o: zutil.h zlib.h zconf.h inftrees.h -minigzip.o: zlib.h zconf.h trees.o: deflate.h zutil.h zlib.h zconf.h trees.h -uncompr.o: zlib.h zconf.h -zutil.o: zutil.h zlib.h zconf.h + +adler32.lo zutil.lo: zutil.h zlib.h zconf.h +gzclose.lo gzlib.lo gzread.lo gzwrite.lo: zlib.h zconf.h gzguts.h +compress.lo example.lo minigzip.lo uncompr.lo: zlib.h zconf.h +crc32.lo: zutil.h zlib.h zconf.h crc32.h +deflate.lo: deflate.h zutil.h zlib.h zconf.h +infback.lo inflate.lo: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h inffixed.h +inffast.lo: zutil.h zlib.h zconf.h inftrees.h inflate.h inffast.h +inftrees.lo: zutil.h zlib.h zconf.h inftrees.h +trees.lo: deflate.h zutil.h zlib.h zconf.h trees.h diff --git a/Modules/zlib/README b/Modules/zlib/README --- a/Modules/zlib/README +++ b/Modules/zlib/README @@ -1,56 +1,52 @@ ZLIB DATA COMPRESSION LIBRARY -zlib 1.2.3 is a general purpose data compression library. All the code is +zlib 1.2.8 is a general purpose data compression library. All the code is thread safe. The data format used by the zlib library is described by RFCs (Request for Comments) 1950 to 1952 in the files -http://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) -and rfc1952.txt (gzip format). These documents are also available in other -formats from ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html +http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and +rfc1952 (gzip format). All functions of the compression library are documented in the file zlib.h -(volunteer to write man pages welcome, contact zlib at gzip.org). A usage example -of the library is given in the file example.c which also tests that the library -is working correctly. Another example is given in the file minigzip.c. The -compression library itself is composed of all source files except example.c and -minigzip.c. +(volunteer to write man pages welcome, contact zlib at gzip.org). A usage example +of the library is given in the file test/example.c which also tests that +the library is working correctly. Another example is given in the file +test/minigzip.c. The compression library itself is composed of all source +files in the root directory. To compile all files and run the test program, follow the instructions given at -the top of Makefile. In short "make test; make install" should work for most -machines. For Unix: "./configure; make test; make install". For MSDOS, use one -of the special makefiles such as Makefile.msc. For VMS, use make_vms.com. +the top of Makefile.in. In short "./configure; make test", and if that goes +well, "make install" should work for most flavors of Unix. For Windows, use +one of the special makefiles in win32/ or contrib/vstudio/ . For VMS, use +make_vms.com. Questions about zlib should be sent to , or to Gilles Vollant - for the Windows DLL version. The zlib home page is -http://www.zlib.org or http://www.gzip.org/zlib/ Before reporting a problem, -please check this site to verify that you have the latest version of zlib; -otherwise get the latest version and check whether the problem still exists or -not. + for the Windows DLL version. The zlib home page is +http://zlib.net/ . Before reporting a problem, please check this site to +verify that you have the latest version of zlib; otherwise get the latest +version and check whether the problem still exists or not. -PLEASE read the zlib FAQ http://www.gzip.org/zlib/zlib_faq.html before asking -for help. +PLEASE read the zlib FAQ http://zlib.net/zlib_faq.html before asking for help. -Mark Nelson wrote an article about zlib for the Jan. 1997 -issue of Dr. Dobb's Journal; a copy of the article is available in -http://dogma.net/markn/articles/zlibtool/zlibtool.htm +Mark Nelson wrote an article about zlib for the Jan. 1997 +issue of Dr. Dobb's Journal; a copy of the article is available at +http://marknelson.us/1997/01/01/zlib-engine/ . -The changes made in version 1.2.3 are documented in the file ChangeLog. +The changes made in version 1.2.8 are documented in the file ChangeLog. -Unsupported third party contributions are provided in directory "contrib". +Unsupported third party contributions are provided in directory contrib/ . -A Java implementation of zlib is available in the Java Development Kit -http://java.sun.com/j2se/1.4.2/docs/api/java/util/zip/package-summary.html -See the zlib home page http://www.zlib.org for details. +zlib is available in Java using the java.util.zip package, documented at +http://java.sun.com/developer/technicalArticles/Programming/compression/ . -A Perl interface to zlib written by Paul Marquess is in the -CPAN (Comprehensive Perl Archive Network) sites -http://www.cpan.org/modules/by-module/Compress/ +A Perl interface to zlib written by Paul Marquess is available +at CPAN (Comprehensive Perl Archive Network) sites, including +http://search.cpan.org/~pmqs/IO-Compress-Zlib/ . A Python interface to zlib written by A.M. Kuchling is available in Python 1.5 and later versions, see -http://www.python.org/doc/lib/module-zlib.html +http://docs.python.org/library/zlib.html . -A zlib binding for TCL written by Andreas Kupries is -availlable at http://www.oche.de/~akupries/soft/trf/trf_zip.html +zlib is built into tcl: http://wiki.tcl.tk/4610 . An experimental package to read and write files in .zip format, written on top of zlib by Gilles Vollant , is available in the @@ -74,25 +70,21 @@ - zlib doesn't work on HP-UX 9.05 with some versions of /bin/cc. It works with other compilers. Use "make test" to check your compiler. -- gzdopen is not supported on RISCOS, BEOS and by some Mac compilers. +- gzdopen is not supported on RISCOS or BEOS. - For PalmOs, see http://palmzlib.sourceforge.net/ -- When building a shared, i.e. dynamic library on Mac OS X, the library must be - installed before testing (do "make install" before "make test"), since the - library location is specified in the library. - Acknowledgments: - The deflate format used by zlib was defined by Phil Katz. The deflate - and zlib specifications were written by L. Peter Deutsch. Thanks to all the - people who reported problems and suggested various improvements in zlib; - they are too numerous to cite here. + The deflate format used by zlib was defined by Phil Katz. The deflate and + zlib specifications were written by L. Peter Deutsch. Thanks to all the + people who reported problems and suggested various improvements in zlib; they + are too numerous to cite here. Copyright notice: - (C) 1995-2004 Jean-loup Gailly and Mark Adler + (C) 1995-2013 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages @@ -113,13 +105,11 @@ Jean-loup Gailly Mark Adler jloup at gzip.org madler at alumni.caltech.edu -If you use the zlib library in a product, we would appreciate *not* -receiving lengthy legal documents to sign. The sources are provided -for free but without warranty of any kind. The library has been -entirely written by Jean-loup Gailly and Mark Adler; it does not -include third-party code. +If you use the zlib library in a product, we would appreciate *not* receiving +lengthy legal documents to sign. The sources are provided for free but without +warranty of any kind. The library has been entirely written by Jean-loup +Gailly and Mark Adler; it does not include third-party code. -If you redistribute modified sources, we would appreciate that you include -in the file ChangeLog history information documenting your changes. Please -read the FAQ for more information on the distribution of modified source -versions. +If you redistribute modified sources, we would appreciate that you include in +the file ChangeLog history information documenting your changes. Please read +the FAQ for more information on the distribution of modified source versions. diff --git a/Modules/zlib/adler32.c b/Modules/zlib/adler32.c --- a/Modules/zlib/adler32.c +++ b/Modules/zlib/adler32.c @@ -1,14 +1,17 @@ /* adler32.c -- compute the Adler-32 checksum of a data stream - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2011 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ -#define ZLIB_INTERNAL -#include "zlib.h" +#include "zutil.h" -#define BASE 65521UL /* largest prime smaller than 65536 */ +#define local static + +local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); + +#define BASE 65521 /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ @@ -18,39 +21,44 @@ #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); -/* use NO_DIVIDE if your processor does not do division in hardware */ +/* use NO_DIVIDE if your processor does not do division in hardware -- + try it both ways to see which is faster */ #ifdef NO_DIVIDE +/* note that this assumes BASE is 65521, where 65536 % 65521 == 15 + (thank you to John Reiser for pointing this out) */ +# define CHOP(a) \ + do { \ + unsigned long tmp = a >> 16; \ + a &= 0xffffUL; \ + a += (tmp << 4) - tmp; \ + } while (0) +# define MOD28(a) \ + do { \ + CHOP(a); \ + if (a >= BASE) a -= BASE; \ + } while (0) # define MOD(a) \ do { \ - if (a >= (BASE << 16)) a -= (BASE << 16); \ - if (a >= (BASE << 15)) a -= (BASE << 15); \ - if (a >= (BASE << 14)) a -= (BASE << 14); \ - if (a >= (BASE << 13)) a -= (BASE << 13); \ - if (a >= (BASE << 12)) a -= (BASE << 12); \ - if (a >= (BASE << 11)) a -= (BASE << 11); \ - if (a >= (BASE << 10)) a -= (BASE << 10); \ - if (a >= (BASE << 9)) a -= (BASE << 9); \ - if (a >= (BASE << 8)) a -= (BASE << 8); \ - if (a >= (BASE << 7)) a -= (BASE << 7); \ - if (a >= (BASE << 6)) a -= (BASE << 6); \ - if (a >= (BASE << 5)) a -= (BASE << 5); \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ - if (a >= BASE) a -= BASE; \ + CHOP(a); \ + MOD28(a); \ } while (0) -# define MOD4(a) \ - do { \ - if (a >= (BASE << 4)) a -= (BASE << 4); \ - if (a >= (BASE << 3)) a -= (BASE << 3); \ - if (a >= (BASE << 2)) a -= (BASE << 2); \ - if (a >= (BASE << 1)) a -= (BASE << 1); \ +# define MOD63(a) \ + do { /* this assumes a is not negative */ \ + z_off64_t tmp = a >> 32; \ + a &= 0xffffffffL; \ + a += (tmp << 8) - (tmp << 5) + tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ + tmp = a >> 16; \ + a &= 0xffffL; \ + a += (tmp << 4) - tmp; \ if (a >= BASE) a -= BASE; \ } while (0) #else # define MOD(a) a %= BASE -# define MOD4(a) a %= BASE +# define MOD28(a) a %= BASE +# define MOD63(a) a %= BASE #endif /* ========================================================================= */ @@ -89,7 +97,7 @@ } if (adler >= BASE) adler -= BASE; - MOD4(sum2); /* only added so many BASE's */ + MOD28(sum2); /* only added so many BASE's */ return adler | (sum2 << 16); } @@ -125,25 +133,47 @@ } /* ========================================================================= */ +local uLong adler32_combine_(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + unsigned long sum1; + unsigned long sum2; + unsigned rem; + + /* for negative len, return invalid adler32 as a clue for debugging */ + if (len2 < 0) + return 0xffffffffUL; + + /* the derivation of this formula is left as an exercise for the reader */ + MOD63(len2); /* assumes len2 >= 0 */ + rem = (unsigned)len2; + sum1 = adler1 & 0xffff; + sum2 = rem * sum1; + MOD(sum2); + sum1 += (adler2 & 0xffff) + BASE - 1; + sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; + if (sum1 >= BASE) sum1 -= BASE; + if (sum1 >= BASE) sum1 -= BASE; + if (sum2 >= (BASE << 1)) sum2 -= (BASE << 1); + if (sum2 >= BASE) sum2 -= BASE; + return sum1 | (sum2 << 16); +} + +/* ========================================================================= */ uLong ZEXPORT adler32_combine(adler1, adler2, len2) uLong adler1; uLong adler2; z_off_t len2; { - unsigned long sum1; - unsigned long sum2; - unsigned rem; + return adler32_combine_(adler1, adler2, len2); +} - /* the derivation of this formula is left as an exercise for the reader */ - rem = (unsigned)(len2 % BASE); - sum1 = adler1 & 0xffff; - sum2 = rem * sum1; - MOD(sum2); - sum1 += (adler2 & 0xffff) + BASE - 1; - sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; - if (sum1 > BASE) sum1 -= BASE; - if (sum1 > BASE) sum1 -= BASE; - if (sum2 > (BASE << 1)) sum2 -= (BASE << 1); - if (sum2 > BASE) sum2 -= BASE; - return sum1 | (sum2 << 16); +uLong ZEXPORT adler32_combine64(adler1, adler2, len2) + uLong adler1; + uLong adler2; + z_off64_t len2; +{ + return adler32_combine_(adler1, adler2, len2); } diff --git a/Modules/zlib/algorithm.txt b/Modules/zlib/algorithm.txt --- a/Modules/zlib/algorithm.txt +++ b/Modules/zlib/algorithm.txt @@ -121,7 +121,7 @@ kbytes. You can imagine that filling in a 2^15 entry table for a 15-bit code would take too long if you're only decoding several thousand symbols. At the other extreme, you could make a new table for every bit in the code. In fact, -that's essentially a Huffman tree. But then you spend two much time +that's essentially a Huffman tree. But then you spend too much time traversing the tree while decoding, even for short symbols. So the number of bits for the first lookup table is a trade of the time to @@ -206,4 +206,4 @@ pp. 337-343. ``DEFLATE Compressed Data Format Specification'' available in -http://www.ietf.org/rfc/rfc1951.txt +http://tools.ietf.org/html/rfc1951 diff --git a/Modules/zlib/compress.c b/Modules/zlib/compress.c --- a/Modules/zlib/compress.c +++ b/Modules/zlib/compress.c @@ -1,5 +1,5 @@ /* compress.c -- compress a memory buffer - * Copyright (C) 1995-2003 Jean-loup Gailly. + * Copyright (C) 1995-2005 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -29,7 +29,7 @@ z_stream stream; int err; - stream.next_in = (Bytef*)source; + stream.next_in = (z_const Bytef *)source; stream.avail_in = (uInt)sourceLen; #ifdef MAXSEG_64K /* Check for source > 64K on 16-bit machine: */ @@ -75,5 +75,6 @@ uLong ZEXPORT compressBound (sourceLen) uLong sourceLen; { - return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + 11; + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13; } diff --git a/Modules/zlib/configure b/Modules/zlib/configure --- a/Modules/zlib/configure +++ b/Modules/zlib/configure @@ -1,108 +1,239 @@ #!/bin/sh -# configure script for zlib. This script is needed only if -# you wish to build a shared library and your system supports them, -# of if you need special compiler, flags or install directory. -# Otherwise, you can just use directly "make test; make install" +# configure script for zlib. # -# To create a shared library, use "configure --shared"; by default a static -# library is created. If the primitive shared library support provided here -# does not work, use ftp://prep.ai.mit.edu/pub/gnu/libtool-*.tar.gz +# Normally configure builds both a static and a shared library. +# If you want to build just a static library, use: ./configure --static # # To impose specific compiler or flags or install directory, use for example: # prefix=$HOME CC=cc CFLAGS="-O4" ./configure # or for csh/tcsh users: # (setenv prefix $HOME; setenv CC cc; setenv CFLAGS "-O4"; ./configure) -# LDSHARED is the command to be used to create a shared library # Incorrect settings of CC or CFLAGS may prevent creating a shared library. # If you have problems, try without defining CC and CFLAGS before reporting # an error. -LIBS=libz.a -LDFLAGS="-L. ${LIBS}" +# start off configure.log +echo -------------------- >> configure.log +echo $0 $* >> configure.log +date >> configure.log + +# set command prefix for cross-compilation +if [ -n "${CHOST}" ]; then + uname="`echo "${CHOST}" | sed -e 's/^[^-]*-\([^-]*\)$/\1/' -e 's/^[^-]*-[^-]*-\([^-]*\)$/\1/' -e 's/^[^-]*-[^-]*-\([^-]*\)-.*$/\1/'`" + CROSS_PREFIX="${CHOST}-" +fi + +# destination name for static library +STATICLIB=libz.a + +# extract zlib version numbers from zlib.h VER=`sed -n -e '/VERSION "/s/.*"\(.*\)".*/\1/p' < zlib.h` +VER3=`sed -n -e '/VERSION "/s/.*"\([0-9]*\\.[0-9]*\\.[0-9]*\).*/\1/p' < zlib.h` VER2=`sed -n -e '/VERSION "/s/.*"\([0-9]*\\.[0-9]*\)\\..*/\1/p' < zlib.h` VER1=`sed -n -e '/VERSION "/s/.*"\([0-9]*\)\\..*/\1/p' < zlib.h` -AR=${AR-"ar rc"} -RANLIB=${RANLIB-"ranlib"} + +# establish commands for library building +if "${CROSS_PREFIX}ar" --version >/dev/null 2>/dev/null || test $? -lt 126; then + AR=${AR-"${CROSS_PREFIX}ar"} + test -n "${CROSS_PREFIX}" && echo Using ${AR} | tee -a configure.log +else + AR=${AR-"ar"} + test -n "${CROSS_PREFIX}" && echo Using ${AR} | tee -a configure.log +fi +ARFLAGS=${ARFLAGS-"rc"} +if "${CROSS_PREFIX}ranlib" --version >/dev/null 2>/dev/null || test $? -lt 126; then + RANLIB=${RANLIB-"${CROSS_PREFIX}ranlib"} + test -n "${CROSS_PREFIX}" && echo Using ${RANLIB} | tee -a configure.log +else + RANLIB=${RANLIB-"ranlib"} +fi +if "${CROSS_PREFIX}nm" --version >/dev/null 2>/dev/null || test $? -lt 126; then + NM=${NM-"${CROSS_PREFIX}nm"} + test -n "${CROSS_PREFIX}" && echo Using ${NM} | tee -a configure.log +else + NM=${NM-"nm"} +fi + +# set defaults before processing command line options +LDCONFIG=${LDCONFIG-"ldconfig"} +LDSHAREDLIBC="${LDSHAREDLIBC--lc}" +ARCHS= prefix=${prefix-/usr/local} exec_prefix=${exec_prefix-'${prefix}'} libdir=${libdir-'${exec_prefix}/lib'} +sharedlibdir=${sharedlibdir-'${libdir}'} includedir=${includedir-'${prefix}/include'} mandir=${mandir-'${prefix}/share/man'} shared_ext='.so' -shared=0 +shared=1 +solo=0 +cover=0 +zprefix=0 +zconst=0 +build64=0 gcc=0 old_cc="$CC" old_cflags="$CFLAGS" +OBJC='$(OBJZ) $(OBJG)' +PIC_OBJC='$(PIC_OBJZ) $(PIC_OBJG)' +# leave this script, optionally in a bad way +leave() +{ + if test "$*" != "0"; then + echo "** $0 aborting." | tee -a configure.log + fi + rm -f $test.[co] $test $test$shared_ext $test.gcno ./--version + echo -------------------- >> configure.log + echo >> configure.log + echo >> configure.log + exit $1 +} + +# process command line options while test $# -ge 1 do case "$1" in - -h* | --h*) - echo 'usage:' - echo ' configure [--shared] [--prefix=PREFIX] [--exec_prefix=EXPREFIX]' - echo ' [--libdir=LIBDIR] [--includedir=INCLUDEDIR]' - exit 0;; - -p*=* | --p*=*) prefix=`echo $1 | sed 's/[-a-z_]*=//'`; shift;; - -e*=* | --e*=*) exec_prefix=`echo $1 | sed 's/[-a-z_]*=//'`; shift;; - -l*=* | --libdir=*) libdir=`echo $1 | sed 's/[-a-z_]*=//'`; shift;; - -i*=* | --includedir=*) includedir=`echo $1 | sed 's/[-a-z_]*=//'`;shift;; - -p* | --p*) prefix="$2"; shift; shift;; - -e* | --e*) exec_prefix="$2"; shift; shift;; - -l* | --l*) libdir="$2"; shift; shift;; - -i* | --i*) includedir="$2"; shift; shift;; - -s* | --s*) shared=1; shift;; - *) echo "unknown option: $1"; echo "$0 --help for help"; exit 1;; + -h* | --help) + echo 'usage:' | tee -a configure.log + echo ' configure [--const] [--zprefix] [--prefix=PREFIX] [--eprefix=EXPREFIX]' | tee -a configure.log + echo ' [--static] [--64] [--libdir=LIBDIR] [--sharedlibdir=LIBDIR]' | tee -a configure.log + echo ' [--includedir=INCLUDEDIR] [--archs="-arch i386 -arch x86_64"]' | tee -a configure.log + exit 0 ;; + -p*=* | --prefix=*) prefix=`echo $1 | sed 's/.*=//'`; shift ;; + -e*=* | --eprefix=*) exec_prefix=`echo $1 | sed 's/.*=//'`; shift ;; + -l*=* | --libdir=*) libdir=`echo $1 | sed 's/.*=//'`; shift ;; + --sharedlibdir=*) sharedlibdir=`echo $1 | sed 's/.*=//'`; shift ;; + -i*=* | --includedir=*) includedir=`echo $1 | sed 's/.*=//'`;shift ;; + -u*=* | --uname=*) uname=`echo $1 | sed 's/.*=//'`;shift ;; + -p* | --prefix) prefix="$2"; shift; shift ;; + -e* | --eprefix) exec_prefix="$2"; shift; shift ;; + -l* | --libdir) libdir="$2"; shift; shift ;; + -i* | --includedir) includedir="$2"; shift; shift ;; + -s* | --shared | --enable-shared) shared=1; shift ;; + -t | --static) shared=0; shift ;; + --solo) solo=1; shift ;; + --cover) cover=1; shift ;; + -z* | --zprefix) zprefix=1; shift ;; + -6* | --64) build64=1; shift ;; + -a*=* | --archs=*) ARCHS=`echo $1 | sed 's/.*=//'`; shift ;; + --sysconfdir=*) echo "ignored option: --sysconfdir" | tee -a configure.log; shift ;; + --localstatedir=*) echo "ignored option: --localstatedir" | tee -a configure.log; shift ;; + -c* | --const) zconst=1; shift ;; + *) + echo "unknown option: $1" | tee -a configure.log + echo "$0 --help for help" | tee -a configure.log + leave 1;; esac done +# temporary file name test=ztest$$ + +# put arguments in log, also put test file in log if used in arguments +show() +{ + case "$*" in + *$test.c*) + echo === $test.c === >> configure.log + cat $test.c >> configure.log + echo === >> configure.log;; + esac + echo $* >> configure.log +} + +# check for gcc vs. cc and set compile and link flags based on the system identified by uname cat > $test.c <&1` in + *gcc*) gcc=1 ;; esac -if test "$gcc" -eq 1 && ($cc -c $cflags $test.c) 2>/dev/null; then +show $cc -c $test.c +if test "$gcc" -eq 1 && ($cc -c $test.c) >> configure.log 2>&1; then + echo ... using gcc >> configure.log CC="$cc" - SFLAGS=${CFLAGS-"-fPIC -O3"} - CFLAGS="$cflags" - case `(uname -s || echo unknown) 2>/dev/null` in - Linux | linux | GNU | GNU/*) LDSHARED=${LDSHARED-"$cc -shared -Wl,-soname,libz.so.1"};; - CYGWIN* | Cygwin* | cygwin* | OS/2* ) - EXE='.exe';; + CFLAGS="${CFLAGS--O3} ${ARCHS}" + SFLAGS="${CFLAGS--O3} -fPIC" + LDFLAGS="${LDFLAGS} ${ARCHS}" + if test $build64 -eq 1; then + CFLAGS="${CFLAGS} -m64" + SFLAGS="${SFLAGS} -m64" + fi + if test "${ZLIBGCCWARN}" = "YES"; then + if test "$zconst" -eq 1; then + CFLAGS="${CFLAGS} -Wall -Wextra -Wcast-qual -pedantic -DZLIB_CONST" + else + CFLAGS="${CFLAGS} -Wall -Wextra -pedantic" + fi + fi + if test -z "$uname"; then + uname=`(uname -s || echo unknown) 2>/dev/null` + fi + case "$uname" in + Linux* | linux* | GNU | GNU/* | solaris*) + LDSHARED=${LDSHARED-"$cc -shared -Wl,-soname,libz.so.1,--version-script,zlib.map"} ;; + *BSD | *bsd* | DragonFly) + LDSHARED=${LDSHARED-"$cc -shared -Wl,-soname,libz.so.1,--version-script,zlib.map"} + LDCONFIG="ldconfig -m" ;; + CYGWIN* | Cygwin* | cygwin* | OS/2*) + EXE='.exe' ;; + MINGW* | mingw*) +# temporary bypass + rm -f $test.[co] $test $test$shared_ext + echo "Please use win32/Makefile.gcc instead." | tee -a configure.log + leave 1 + LDSHARED=${LDSHARED-"$cc -shared"} + LDSHAREDLIBC="" + EXE='.exe' ;; QNX*) # This is for QNX6. I suppose that the QNX rule below is for QNX2,QNX4 # (alain.bonnefoy at icbt.com) - LDSHARED=${LDSHARED-"$cc -shared -Wl,-hlibz.so.1"};; + LDSHARED=${LDSHARED-"$cc -shared -Wl,-hlibz.so.1"} ;; HP-UX*) LDSHARED=${LDSHARED-"$cc -shared $SFLAGS"} case `(uname -m || echo unknown) 2>/dev/null` in ia64) shared_ext='.so' - SHAREDLIB='libz.so';; + SHAREDLIB='libz.so' ;; *) shared_ext='.sl' - SHAREDLIB='libz.sl';; - esac;; - Darwin*) shared_ext='.dylib' + SHAREDLIB='libz.sl' ;; + esac ;; + Darwin* | darwin*) + shared_ext='.dylib' SHAREDLIB=libz$shared_ext SHAREDLIBV=libz.$VER$shared_ext SHAREDLIBM=libz.$VER1$shared_ext - LDSHARED=${LDSHARED-"$cc -dynamiclib -install_name $libdir/$SHAREDLIBM -compatibility_version $VER1 -current_version $VER"};; - *) LDSHARED=${LDSHARED-"$cc -shared"};; + LDSHARED=${LDSHARED-"$cc -dynamiclib -install_name $libdir/$SHAREDLIBM -compatibility_version $VER1 -current_version $VER3"} + if libtool -V 2>&1 | grep Apple > /dev/null; then + AR="libtool" + else + AR="/usr/bin/libtool" + fi + ARFLAGS="-o" ;; + *) LDSHARED=${LDSHARED-"$cc -shared"} ;; esac else # find system name and corresponding cc options CC=${CC-cc} - case `(uname -sr || echo unknown) 2>/dev/null` in + gcc=0 + echo ... using $CC >> configure.log + if test -z "$uname"; then + uname=`(uname -sr || echo unknown) 2>/dev/null` + fi + case "$uname" in HP-UX*) SFLAGS=${CFLAGS-"-O +z"} CFLAGS=${CFLAGS-"-O"} # LDSHARED=${LDSHARED-"ld -b +vnocompatwarnings"} @@ -110,350 +241,591 @@ case `(uname -m || echo unknown) 2>/dev/null` in ia64) shared_ext='.so' - SHAREDLIB='libz.so';; + SHAREDLIB='libz.so' ;; *) shared_ext='.sl' - SHAREDLIB='libz.sl';; - esac;; + SHAREDLIB='libz.sl' ;; + esac ;; IRIX*) SFLAGS=${CFLAGS-"-ansi -O2 -rpath ."} CFLAGS=${CFLAGS-"-ansi -O2"} - LDSHARED=${LDSHARED-"cc -shared"};; + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so.1"} ;; OSF1\ V4*) SFLAGS=${CFLAGS-"-O -std1"} CFLAGS=${CFLAGS-"-O -std1"} - LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so -Wl,-msym -Wl,-rpath,$(libdir) -Wl,-set_version,${VER}:1.0"};; + LDFLAGS="${LDFLAGS} -Wl,-rpath,." + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so -Wl,-msym -Wl,-rpath,$(libdir) -Wl,-set_version,${VER}:1.0"} ;; OSF1*) SFLAGS=${CFLAGS-"-O -std1"} CFLAGS=${CFLAGS-"-O -std1"} - LDSHARED=${LDSHARED-"cc -shared"};; + LDSHARED=${LDSHARED-"cc -shared -Wl,-soname,libz.so.1"} ;; QNX*) SFLAGS=${CFLAGS-"-4 -O"} CFLAGS=${CFLAGS-"-4 -O"} LDSHARED=${LDSHARED-"cc"} RANLIB=${RANLIB-"true"} - AR="cc -A";; + AR="cc" + ARFLAGS="-A" ;; SCO_SV\ 3.2*) SFLAGS=${CFLAGS-"-O3 -dy -KPIC "} CFLAGS=${CFLAGS-"-O3"} - LDSHARED=${LDSHARED-"cc -dy -KPIC -G"};; - SunOS\ 5*) SFLAGS=${CFLAGS-"-fast -xcg89 -KPIC -R."} - CFLAGS=${CFLAGS-"-fast -xcg89"} - LDSHARED=${LDSHARED-"cc -G"};; + LDSHARED=${LDSHARED-"cc -dy -KPIC -G"} ;; + SunOS\ 5* | solaris*) + LDSHARED=${LDSHARED-"cc -G -h libz$shared_ext.$VER1"} + SFLAGS=${CFLAGS-"-fast -KPIC"} + CFLAGS=${CFLAGS-"-fast"} + if test $build64 -eq 1; then + # old versions of SunPRO/Workshop/Studio don't support -m64, + # but newer ones do. Check for it. + flag64=`$CC -flags | egrep -- '^-m64'` + if test x"$flag64" != x"" ; then + CFLAGS="${CFLAGS} -m64" + SFLAGS="${SFLAGS} -m64" + else + case `(uname -m || echo unknown) 2>/dev/null` in + i86*) + SFLAGS="$SFLAGS -xarch=amd64" + CFLAGS="$CFLAGS -xarch=amd64" ;; + *) + SFLAGS="$SFLAGS -xarch=v9" + CFLAGS="$CFLAGS -xarch=v9" ;; + esac + fi + fi + ;; SunOS\ 4*) SFLAGS=${CFLAGS-"-O2 -PIC"} CFLAGS=${CFLAGS-"-O2"} - LDSHARED=${LDSHARED-"ld"};; - SunStudio\ 9*) SFLAGS=${CFLAGS-"-DUSE_MMAP -fast -xcode=pic32 -xtarget=ultra3 -xarch=v9b"} - CFLAGS=${CFLAGS-"-DUSE_MMAP -fast -xtarget=ultra3 -xarch=v9b"} - LDSHARED=${LDSHARED-"cc -xarch=v9b"};; + LDSHARED=${LDSHARED-"ld"} ;; + SunStudio\ 9*) SFLAGS=${CFLAGS-"-fast -xcode=pic32 -xtarget=ultra3 -xarch=v9b"} + CFLAGS=${CFLAGS-"-fast -xtarget=ultra3 -xarch=v9b"} + LDSHARED=${LDSHARED-"cc -xarch=v9b"} ;; UNIX_System_V\ 4.2.0) SFLAGS=${CFLAGS-"-KPIC -O"} CFLAGS=${CFLAGS-"-O"} - LDSHARED=${LDSHARED-"cc -G"};; + LDSHARED=${LDSHARED-"cc -G"} ;; UNIX_SV\ 4.2MP) SFLAGS=${CFLAGS-"-Kconform_pic -O"} CFLAGS=${CFLAGS-"-O"} - LDSHARED=${LDSHARED-"cc -G"};; + LDSHARED=${LDSHARED-"cc -G"} ;; OpenUNIX\ 5) SFLAGS=${CFLAGS-"-KPIC -O"} CFLAGS=${CFLAGS-"-O"} - LDSHARED=${LDSHARED-"cc -G"};; + LDSHARED=${LDSHARED-"cc -G"} ;; AIX*) # Courtesy of dbakker at arrayasolutions.com SFLAGS=${CFLAGS-"-O -qmaxmem=8192"} CFLAGS=${CFLAGS-"-O -qmaxmem=8192"} - LDSHARED=${LDSHARED-"xlc -G"};; - # send working options for other systems to support at gzip.org + LDSHARED=${LDSHARED-"xlc -G"} ;; + # send working options for other systems to zlib at gzip.org *) SFLAGS=${CFLAGS-"-O"} CFLAGS=${CFLAGS-"-O"} - LDSHARED=${LDSHARED-"cc -shared"};; + LDSHARED=${LDSHARED-"cc -shared"} ;; esac fi +# destination names for shared library if not defined above SHAREDLIB=${SHAREDLIB-"libz$shared_ext"} SHAREDLIBV=${SHAREDLIBV-"libz$shared_ext.$VER"} SHAREDLIBM=${SHAREDLIBM-"libz$shared_ext.$VER1"} +echo >> configure.log + +# define functions for testing compiler and library characteristics and logging the results + +cat > $test.c </dev/null; then + try() + { + show $* + test "`( $* ) 2>&1 | tee -a configure.log`" = "" + } + echo - using any output from compiler to indicate an error >> configure.log +else +try() +{ + show $* + ( $* ) >> configure.log 2>&1 + ret=$? + if test $ret -ne 0; then + echo "(exit code "$ret")" >> configure.log + fi + return $ret +} +fi + +tryboth() +{ + show $* + got=`( $* ) 2>&1` + ret=$? + printf %s "$got" >> configure.log + if test $ret -ne 0; then + return $ret + fi + test "$got" = "" +} + +cat > $test.c << EOF +int foo() { return 0; } +EOF +echo "Checking for obsessive-compulsive compiler options..." >> configure.log +if try $CC -c $CFLAGS $test.c; then + : +else + echo "Compiler error reporting is too harsh for $0 (perhaps remove -Werror)." | tee -a configure.log + leave 1 +fi + +echo >> configure.log + +# see if shared library build supported +cat > $test.c <&1`" = "" && - test "`($LDSHARED -o $test$shared_ext $test.o) 2>&1`" = ""; then - CFLAGS="$SFLAGS" - LIBS="$SHAREDLIBV" - echo Building shared library $SHAREDLIBV with $CC. + if try $CC -w -c $SFLAGS $test.c && + try $LDSHARED $SFLAGS -o $test$shared_ext $test.o; then + echo Building shared library $SHAREDLIBV with $CC. | tee -a configure.log elif test -z "$old_cc" -a -z "$old_cflags"; then - echo No shared library support. + echo No shared library support. | tee -a configure.log shared=0; else - echo 'No shared library support; try without defining CC and CFLAGS' + echo 'No shared library support; try without defining CC and CFLAGS' | tee -a configure.log shared=0; fi fi if test $shared -eq 0; then LDSHARED="$CC" - echo Building static library $LIBS version $VER with $CC. + ALL="static" + TEST="all teststatic" + SHAREDLIB="" + SHAREDLIBV="" + SHAREDLIBM="" + echo Building static library $STATICLIB version $VER with $CC. | tee -a configure.log else - LDFLAGS="-L. ${SHAREDLIBV}" + ALL="static shared" + TEST="all teststatic testshared" fi +# check for underscores in external names for use by assembler code +CPP=${CPP-"$CC -E"} +case $CFLAGS in + *ASMV*) + echo >> configure.log + show "$NM $test.o | grep _hello" + if test "`$NM $test.o | grep _hello | tee -a configure.log`" = ""; then + CPP="$CPP -DNO_UNDERLINE" + echo Checking for underline in external names... No. | tee -a configure.log + else + echo Checking for underline in external names... Yes. | tee -a configure.log + fi ;; +esac + +echo >> configure.log + +# check for large file support, and if none, check for fseeko() +cat > $test.c < +off64_t dummy = 0; +EOF +if try $CC -c $CFLAGS -D_LARGEFILE64_SOURCE=1 $test.c; then + CFLAGS="${CFLAGS} -D_LARGEFILE64_SOURCE=1" + SFLAGS="${SFLAGS} -D_LARGEFILE64_SOURCE=1" + ALL="${ALL} all64" + TEST="${TEST} test64" + echo "Checking for off64_t... Yes." | tee -a configure.log + echo "Checking for fseeko... Yes." | tee -a configure.log +else + echo "Checking for off64_t... No." | tee -a configure.log + echo >> configure.log + cat > $test.c < +int main(void) { + fseeko(NULL, 0, 0); + return 0; +} +EOF + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for fseeko... Yes." | tee -a configure.log + else + CFLAGS="${CFLAGS} -DNO_FSEEKO" + SFLAGS="${SFLAGS} -DNO_FSEEKO" + echo "Checking for fseeko... No." | tee -a configure.log + fi +fi + +echo >> configure.log + +# check for strerror() for use by gz* functions +cat > $test.c < +#include +int main() { return strlen(strerror(errno)); } +EOF +if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for strerror... Yes." | tee -a configure.log +else + CFLAGS="${CFLAGS} -DNO_STRERROR" + SFLAGS="${SFLAGS} -DNO_STRERROR" + echo "Checking for strerror... No." | tee -a configure.log +fi + +# copy clean zconf.h for subsequent edits +cp -p zconf.h.in zconf.h + +echo >> configure.log + +# check for unistd.h and save result in zconf.h cat > $test.c < int main() { return 0; } EOF -if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - sed < zconf.in.h "/HAVE_UNISTD_H/s%0%1%" > zconf.h - echo "Checking for unistd.h... Yes." +if try $CC -c $CFLAGS $test.c; then + sed < zconf.h "/^#ifdef HAVE_UNISTD_H.* may be/s/def HAVE_UNISTD_H\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo "Checking for unistd.h... Yes." | tee -a configure.log else - cp -p zconf.in.h zconf.h - echo "Checking for unistd.h... No." + echo "Checking for unistd.h... No." | tee -a configure.log fi +echo >> configure.log + +# check for stdarg.h and save result in zconf.h +cat > $test.c < +int main() { return 0; } +EOF +if try $CC -c $CFLAGS $test.c; then + sed < zconf.h "/^#ifdef HAVE_STDARG_H.* may be/s/def HAVE_STDARG_H\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo "Checking for stdarg.h... Yes." | tee -a configure.log +else + echo "Checking for stdarg.h... No." | tee -a configure.log +fi + +# if the z_ prefix was requested, save that in zconf.h +if test $zprefix -eq 1; then + sed < zconf.h "/#ifdef Z_PREFIX.* may be/s/def Z_PREFIX\(.*\) may be/ 1\1 was/" > zconf.temp.h + mv zconf.temp.h zconf.h + echo >> configure.log + echo "Using z_ prefix on all symbols." | tee -a configure.log +fi + +# if --solo compilation was requested, save that in zconf.h and remove gz stuff from object lists +if test $solo -eq 1; then + sed '/#define ZCONF_H/a\ +#define Z_SOLO + +' < zconf.h > zconf.temp.h + mv zconf.temp.h zconf.h +OBJC='$(OBJZ)' +PIC_OBJC='$(PIC_OBJZ)' +fi + +# if code coverage testing was requested, use older gcc if defined, e.g. "gcc-4.2" on Mac OS X +if test $cover -eq 1; then + CFLAGS="${CFLAGS} -fprofile-arcs -ftest-coverage" + if test -n "$GCC_CLASSIC"; then + CC=$GCC_CLASSIC + fi +fi + +echo >> configure.log + +# conduct a series of tests to resolve eight possible cases of using "vs" or "s" printf functions +# (using stdarg or not), with or without "n" (proving size of buffer), and with or without a +# return value. The most secure result is vsnprintf() with a return value. snprintf() with a +# return value is secure as well, but then gzprintf() will be limited to 20 arguments. cat > $test.c < #include #include "zconf.h" - int main() { #ifndef STDC choke me #endif - return 0; } EOF +if try $CC -c $CFLAGS $test.c; then + echo "Checking whether to use vs[n]printf() or s[n]printf()... using vs[n]printf()." | tee -a configure.log -if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - echo "Checking whether to use vs[n]printf() or s[n]printf()... using vs[n]printf()" - + echo >> configure.log cat > $test.c < #include - -int mytest(char *fmt, ...) +int mytest(const char *fmt, ...) { char buf[20]; va_list ap; - va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); return 0; } +int main() +{ + return (mytest("Hello%d\n", 1)); +} +EOF + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for vsnprintf() in stdio.h... Yes." | tee -a configure.log + echo >> configure.log + cat >$test.c < +#include +int mytest(const char *fmt, ...) +{ + int n; + char buf[20]; + va_list ap; + va_start(ap, fmt); + n = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + return n; +} int main() { return (mytest("Hello%d\n", 1)); } EOF - if test "`($CC $CFLAGS -o $test $test.c) 2>&1`" = ""; then - echo "Checking for vsnprintf() in stdio.h... Yes." + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of vsnprintf()... Yes." | tee -a configure.log + else + CFLAGS="$CFLAGS -DHAS_vsnprintf_void" + SFLAGS="$SFLAGS -DHAS_vsnprintf_void" + echo "Checking for return value of vsnprintf()... No." | tee -a configure.log + echo " WARNING: apparently vsnprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + fi + else + CFLAGS="$CFLAGS -DNO_vsnprintf" + SFLAGS="$SFLAGS -DNO_vsnprintf" + echo "Checking for vsnprintf() in stdio.h... No." | tee -a configure.log + echo " WARNING: vsnprintf() not found, falling back to vsprintf(). zlib" | tee -a configure.log + echo " can build but will be open to possible buffer-overflow security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + echo >> configure.log cat >$test.c < #include - -int mytest(char *fmt, ...) +int mytest(const char *fmt, ...) { int n; char buf[20]; va_list ap; - va_start(ap, fmt); - n = vsnprintf(buf, sizeof(buf), fmt, ap); + n = vsprintf(buf, fmt, ap); va_end(ap); return n; } - int main() { return (mytest("Hello%d\n", 1)); } EOF - if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - echo "Checking for return value of vsnprintf()... Yes." - else - CFLAGS="$CFLAGS -DHAS_vsnprintf_void" - echo "Checking for return value of vsnprintf()... No." - echo " WARNING: apparently vsnprintf() does not return a value. zlib" - echo " can build but will be open to possible string-format security" - echo " vulnerabilities." - fi - else - CFLAGS="$CFLAGS -DNO_vsnprintf" - echo "Checking for vsnprintf() in stdio.h... No." - echo " WARNING: vsnprintf() not found, falling back to vsprintf(). zlib" - echo " can build but will be open to possible buffer-overflow security" - echo " vulnerabilities." - - cat >$test.c < -#include - -int mytest(char *fmt, ...) -{ - int n; - char buf[20]; - va_list ap; - - va_start(ap, fmt); - n = vsprintf(buf, fmt, ap); - va_end(ap); - return n; -} - -int main() -{ - return (mytest("Hello%d\n", 1)); -} -EOF - - if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - echo "Checking for return value of vsprintf()... Yes." + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of vsprintf()... Yes." | tee -a configure.log else CFLAGS="$CFLAGS -DHAS_vsprintf_void" - echo "Checking for return value of vsprintf()... No." - echo " WARNING: apparently vsprintf() does not return a value. zlib" - echo " can build but will be open to possible string-format security" - echo " vulnerabilities." + SFLAGS="$SFLAGS -DHAS_vsprintf_void" + echo "Checking for return value of vsprintf()... No." | tee -a configure.log + echo " WARNING: apparently vsprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log fi fi else - echo "Checking whether to use vs[n]printf() or s[n]printf()... using s[n]printf()" + echo "Checking whether to use vs[n]printf() or s[n]printf()... using s[n]printf()." | tee -a configure.log + echo >> configure.log cat >$test.c < - int mytest() { char buf[20]; - snprintf(buf, sizeof(buf), "%s", "foo"); return 0; } - int main() { return (mytest()); } EOF - if test "`($CC $CFLAGS -o $test $test.c) 2>&1`" = ""; then - echo "Checking for snprintf() in stdio.h... Yes." + if try $CC $CFLAGS -o $test $test.c; then + echo "Checking for snprintf() in stdio.h... Yes." | tee -a configure.log + echo >> configure.log cat >$test.c < - int mytest() { char buf[20]; - return snprintf(buf, sizeof(buf), "%s", "foo"); } - int main() { return (mytest()); } EOF - if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - echo "Checking for return value of snprintf()... Yes." + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of snprintf()... Yes." | tee -a configure.log else CFLAGS="$CFLAGS -DHAS_snprintf_void" - echo "Checking for return value of snprintf()... No." - echo " WARNING: apparently snprintf() does not return a value. zlib" - echo " can build but will be open to possible string-format security" - echo " vulnerabilities." + SFLAGS="$SFLAGS -DHAS_snprintf_void" + echo "Checking for return value of snprintf()... No." | tee -a configure.log + echo " WARNING: apparently snprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log fi else CFLAGS="$CFLAGS -DNO_snprintf" - echo "Checking for snprintf() in stdio.h... No." - echo " WARNING: snprintf() not found, falling back to sprintf(). zlib" - echo " can build but will be open to possible buffer-overflow security" - echo " vulnerabilities." + SFLAGS="$SFLAGS -DNO_snprintf" + echo "Checking for snprintf() in stdio.h... No." | tee -a configure.log + echo " WARNING: snprintf() not found, falling back to sprintf(). zlib" | tee -a configure.log + echo " can build but will be open to possible buffer-overflow security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log + echo >> configure.log cat >$test.c < - int mytest() { char buf[20]; - return sprintf(buf, "%s", "foo"); } - int main() { return (mytest()); } EOF - if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - echo "Checking for return value of sprintf()... Yes." + if try $CC -c $CFLAGS $test.c; then + echo "Checking for return value of sprintf()... Yes." | tee -a configure.log else CFLAGS="$CFLAGS -DHAS_sprintf_void" - echo "Checking for return value of sprintf()... No." - echo " WARNING: apparently sprintf() does not return a value. zlib" - echo " can build but will be open to possible string-format security" - echo " vulnerabilities." + SFLAGS="$SFLAGS -DHAS_sprintf_void" + echo "Checking for return value of sprintf()... No." | tee -a configure.log + echo " WARNING: apparently sprintf() does not return a value. zlib" | tee -a configure.log + echo " can build but will be open to possible string-format security" | tee -a configure.log + echo " vulnerabilities." | tee -a configure.log fi fi fi -cat >$test.c < -int main() { return 0; } +# see if we can hide zlib internal symbols that are linked between separate source files +if test "$gcc" -eq 1; then + echo >> configure.log + cat > $test.c <&1`" = ""; then - echo "Checking for errno.h... Yes." -else - echo "Checking for errno.h... No." - CFLAGS="$CFLAGS -DNO_ERRNO_H" + if tryboth $CC -c $CFLAGS $test.c; then + CFLAGS="$CFLAGS -DHAVE_HIDDEN" + SFLAGS="$SFLAGS -DHAVE_HIDDEN" + echo "Checking for attribute(visibility) support... Yes." | tee -a configure.log + else + echo "Checking for attribute(visibility) support... No." | tee -a configure.log + fi fi -cat > $test.c < -#include -#include -caddr_t hello() { - return mmap((caddr_t)0, (off_t)0, PROT_READ, MAP_SHARED, 0, (off_t)0); -} -EOF -if test "`($CC -c $CFLAGS $test.c) 2>&1`" = ""; then - CFLAGS="$CFLAGS -DUSE_MMAP" - echo Checking for mmap support... Yes. -else - echo Checking for mmap support... No. -fi +# show the results in the log +echo >> configure.log +echo ALL = $ALL >> configure.log +echo AR = $AR >> configure.log +echo ARFLAGS = $ARFLAGS >> configure.log +echo CC = $CC >> configure.log +echo CFLAGS = $CFLAGS >> configure.log +echo CPP = $CPP >> configure.log +echo EXE = $EXE >> configure.log +echo LDCONFIG = $LDCONFIG >> configure.log +echo LDFLAGS = $LDFLAGS >> configure.log +echo LDSHARED = $LDSHARED >> configure.log +echo LDSHAREDLIBC = $LDSHAREDLIBC >> configure.log +echo OBJC = $OBJC >> configure.log +echo PIC_OBJC = $PIC_OBJC >> configure.log +echo RANLIB = $RANLIB >> configure.log +echo SFLAGS = $SFLAGS >> configure.log +echo SHAREDLIB = $SHAREDLIB >> configure.log +echo SHAREDLIBM = $SHAREDLIBM >> configure.log +echo SHAREDLIBV = $SHAREDLIBV >> configure.log +echo STATICLIB = $STATICLIB >> configure.log +echo TEST = $TEST >> configure.log +echo VER = $VER >> configure.log +echo Z_U4 = $Z_U4 >> configure.log +echo exec_prefix = $exec_prefix >> configure.log +echo includedir = $includedir >> configure.log +echo libdir = $libdir >> configure.log +echo mandir = $mandir >> configure.log +echo prefix = $prefix >> configure.log +echo sharedlibdir = $sharedlibdir >> configure.log +echo uname = $uname >> configure.log -CPP=${CPP-"$CC -E"} -case $CFLAGS in - *ASMV*) - if test "`nm $test.o | grep _hello`" = ""; then - CPP="$CPP -DNO_UNDERLINE" - echo Checking for underline in external names... No. - else - echo Checking for underline in external names... Yes. - fi;; -esac +# udpate Makefile with the configure results +sed < Makefile.in " +/^CC *=/s#=.*#=$CC# +/^CFLAGS *=/s#=.*#=$CFLAGS# +/^SFLAGS *=/s#=.*#=$SFLAGS# +/^LDFLAGS *=/s#=.*#=$LDFLAGS# +/^LDSHARED *=/s#=.*#=$LDSHARED# +/^CPP *=/s#=.*#=$CPP# +/^STATICLIB *=/s#=.*#=$STATICLIB# +/^SHAREDLIB *=/s#=.*#=$SHAREDLIB# +/^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV# +/^SHAREDLIBM *=/s#=.*#=$SHAREDLIBM# +/^AR *=/s#=.*#=$AR# +/^ARFLAGS *=/s#=.*#=$ARFLAGS# +/^RANLIB *=/s#=.*#=$RANLIB# +/^LDCONFIG *=/s#=.*#=$LDCONFIG# +/^LDSHAREDLIBC *=/s#=.*#=$LDSHAREDLIBC# +/^EXE *=/s#=.*#=$EXE# +/^prefix *=/s#=.*#=$prefix# +/^exec_prefix *=/s#=.*#=$exec_prefix# +/^libdir *=/s#=.*#=$libdir# +/^sharedlibdir *=/s#=.*#=$sharedlibdir# +/^includedir *=/s#=.*#=$includedir# +/^mandir *=/s#=.*#=$mandir# +/^OBJC *=/s#=.*#= $OBJC# +/^PIC_OBJC *=/s#=.*#= $PIC_OBJC# +/^all: */s#:.*#: $ALL# +/^test: */s#:.*#: $TEST# +" > Makefile -rm -f $test.[co] $test $test$shared_ext - -# udpate Makefile -sed < Makefile.in " +# create zlib.pc with the configure results +sed < zlib.pc.in " /^CC *=/s#=.*#=$CC# /^CFLAGS *=/s#=.*#=$CFLAGS# /^CPP *=/s#=.*#=$CPP# /^LDSHARED *=/s#=.*#=$LDSHARED# -/^LIBS *=/s#=.*#=$LIBS# +/^STATICLIB *=/s#=.*#=$STATICLIB# /^SHAREDLIB *=/s#=.*#=$SHAREDLIB# /^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV# /^SHAREDLIBM *=/s#=.*#=$SHAREDLIBM# /^AR *=/s#=.*#=$AR# +/^ARFLAGS *=/s#=.*#=$ARFLAGS# /^RANLIB *=/s#=.*#=$RANLIB# /^EXE *=/s#=.*#=$EXE# /^prefix *=/s#=.*#=$prefix# /^exec_prefix *=/s#=.*#=$exec_prefix# /^libdir *=/s#=.*#=$libdir# +/^sharedlibdir *=/s#=.*#=$sharedlibdir# /^includedir *=/s#=.*#=$includedir# /^mandir *=/s#=.*#=$mandir# /^LDFLAGS *=/s#=.*#=$LDFLAGS# -" > Makefile +" | sed -e " +s/\@VERSION\@/$VER/g; +" > zlib.pc + +# done +leave 0 diff --git a/Modules/zlib/crc32.c b/Modules/zlib/crc32.c --- a/Modules/zlib/crc32.c +++ b/Modules/zlib/crc32.c @@ -1,5 +1,5 @@ /* crc32.c -- compute the CRC-32 of a data stream - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2006, 2010, 2011, 2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h * * Thanks to Rodney Brown for his contribution of faster @@ -17,6 +17,8 @@ of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should first call get_crc_table() to initialize the tables before allowing more than one thread to use crc32(). + + DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. */ #ifdef MAKECRCH @@ -30,31 +32,11 @@ #define local static -/* Find a four-byte integer type for crc32_little() and crc32_big(). */ -#ifndef NOBYFOUR -# ifdef STDC /* need ANSI C limits.h to determine sizes */ -# include -# define BYFOUR -# if (UINT_MAX == 0xffffffffUL) - typedef unsigned int u4; -# else -# if (ULONG_MAX == 0xffffffffUL) - typedef unsigned long u4; -# else -# if (USHRT_MAX == 0xffffffffUL) - typedef unsigned short u4; -# else -# undef BYFOUR /* can't find a four-byte integer type! */ -# endif -# endif -# endif -# endif /* STDC */ -#endif /* !NOBYFOUR */ - /* Definitions for doing the crc four data bytes at a time. */ +#if !defined(NOBYFOUR) && defined(Z_U4) +# define BYFOUR +#endif #ifdef BYFOUR -# define REV(w) (((w)>>24)+(((w)>>8)&0xff00)+ \ - (((w)&0xff00)<<8)+(((w)&0xff)<<24)) local unsigned long crc32_little OF((unsigned long, const unsigned char FAR *, unsigned)); local unsigned long crc32_big OF((unsigned long, @@ -68,14 +50,16 @@ local unsigned long gf2_matrix_times OF((unsigned long *mat, unsigned long vec)); local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); +local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); + #ifdef DYNAMIC_CRC_TABLE local volatile int crc_table_empty = 1; -local unsigned long FAR crc_table[TBLS][256]; +local z_crc_t FAR crc_table[TBLS][256]; local void make_crc_table OF((void)); #ifdef MAKECRCH - local void write_table OF((FILE *, const unsigned long FAR *)); + local void write_table OF((FILE *, const z_crc_t FAR *)); #endif /* MAKECRCH */ /* Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: @@ -105,9 +89,9 @@ */ local void make_crc_table() { - unsigned long c; + z_crc_t c; int n, k; - unsigned long poly; /* polynomial exclusive-or pattern */ + z_crc_t poly; /* polynomial exclusive-or pattern */ /* terms of polynomial defining this crc (except x^32): */ static volatile int first = 1; /* flag to limit concurrent making */ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; @@ -119,13 +103,13 @@ first = 0; /* make exclusive-or pattern from polynomial (0xedb88320UL) */ - poly = 0UL; - for (n = 0; n < sizeof(p)/sizeof(unsigned char); n++) - poly |= 1UL << (31 - p[n]); + poly = 0; + for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) + poly |= (z_crc_t)1 << (31 - p[n]); /* generate a crc for every 8-bit value */ for (n = 0; n < 256; n++) { - c = (unsigned long)n; + c = (z_crc_t)n; for (k = 0; k < 8; k++) c = c & 1 ? poly ^ (c >> 1) : c >> 1; crc_table[0][n] = c; @@ -136,11 +120,11 @@ and then the byte reversal of those as well as the first table */ for (n = 0; n < 256; n++) { c = crc_table[0][n]; - crc_table[4][n] = REV(c); + crc_table[4][n] = ZSWAP32(c); for (k = 1; k < 4; k++) { c = crc_table[0][c & 0xff] ^ (c >> 8); crc_table[k][n] = c; - crc_table[k + 4][n] = REV(c); + crc_table[k + 4][n] = ZSWAP32(c); } } #endif /* BYFOUR */ @@ -162,7 +146,7 @@ if (out == NULL) return; fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); - fprintf(out, "local const unsigned long FAR "); + fprintf(out, "local const z_crc_t FAR "); fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); write_table(out, crc_table[0]); # ifdef BYFOUR @@ -182,12 +166,13 @@ #ifdef MAKECRCH local void write_table(out, table) FILE *out; - const unsigned long FAR *table; + const z_crc_t FAR *table; { int n; for (n = 0; n < 256; n++) - fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", table[n], + fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", + (unsigned long)(table[n]), n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); } #endif /* MAKECRCH */ @@ -202,13 +187,13 @@ /* ========================================================================= * This function can be used by asm versions of crc32() */ -const unsigned long FAR * ZEXPORT get_crc_table() +const z_crc_t FAR * ZEXPORT get_crc_table() { #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ - return (const unsigned long FAR *)crc_table; + return (const z_crc_t FAR *)crc_table; } /* ========================================================================= */ @@ -219,7 +204,7 @@ unsigned long ZEXPORT crc32(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; - unsigned len; + uInt len; { if (buf == Z_NULL) return 0UL; @@ -230,7 +215,7 @@ #ifdef BYFOUR if (sizeof(void *) == sizeof(ptrdiff_t)) { - u4 endian; + z_crc_t endian; endian = 1; if (*((unsigned char *)(&endian))) @@ -264,17 +249,17 @@ const unsigned char FAR *buf; unsigned len; { - register u4 c; - register const u4 FAR *buf4; + register z_crc_t c; + register const z_crc_t FAR *buf4; - c = (u4)crc; + c = (z_crc_t)crc; c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); len--; } - buf4 = (const u4 FAR *)(const void FAR *)buf; + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOLIT32; len -= 32; @@ -304,17 +289,17 @@ const unsigned char FAR *buf; unsigned len; { - register u4 c; - register const u4 FAR *buf4; + register z_crc_t c; + register const z_crc_t FAR *buf4; - c = REV((u4)crc); + c = ZSWAP32((z_crc_t)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); len--; } - buf4 = (const u4 FAR *)(const void FAR *)buf; + buf4 = (const z_crc_t FAR *)(const void FAR *)buf; buf4--; while (len >= 32) { DOBIG32; @@ -331,7 +316,7 @@ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); } while (--len); c = ~c; - return (unsigned long)(REV(c)); + return (unsigned long)(ZSWAP32(c)); } #endif /* BYFOUR */ @@ -367,22 +352,22 @@ } /* ========================================================================= */ -uLong ZEXPORT crc32_combine(crc1, crc2, len2) +local uLong crc32_combine_(crc1, crc2, len2) uLong crc1; uLong crc2; - z_off_t len2; + z_off64_t len2; { int n; unsigned long row; unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ - /* degenerate case */ - if (len2 == 0) + /* degenerate case (also disallow negative lengths) */ + if (len2 <= 0) return crc1; /* put operator for one zero bit in odd */ - odd[0] = 0xedb88320L; /* CRC-32 polynomial */ + odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ row = 1; for (n = 1; n < GF2_DIM; n++) { odd[n] = row; @@ -421,3 +406,20 @@ crc1 ^= crc2; return crc1; } + +/* ========================================================================= */ +uLong ZEXPORT crc32_combine(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} + +uLong ZEXPORT crc32_combine64(crc1, crc2, len2) + uLong crc1; + uLong crc2; + z_off64_t len2; +{ + return crc32_combine_(crc1, crc2, len2); +} diff --git a/Modules/zlib/crc32.h b/Modules/zlib/crc32.h --- a/Modules/zlib/crc32.h +++ b/Modules/zlib/crc32.h @@ -2,7 +2,7 @@ * Generated automatically by crc32.c */ -local const unsigned long FAR crc_table[TBLS][256] = +local const z_crc_t FAR crc_table[TBLS][256] = { { 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, diff --git a/Modules/zlib/deflate.c b/Modules/zlib/deflate.c --- a/Modules/zlib/deflate.c +++ b/Modules/zlib/deflate.c @@ -1,5 +1,5 @@ /* deflate.c -- compress data using the deflation algorithm - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -37,7 +37,7 @@ * REFERENCES * * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". - * Available in http://www.ietf.org/rfc/rfc1951.txt + * Available in http://tools.ietf.org/html/rfc1951 * * A description of the Rabin and Karp algorithm is given in the book * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. @@ -52,7 +52,7 @@ #include "deflate.h" const char deflate_copyright[] = - " deflate 1.2.3 Copyright 1995-2005 Jean-loup Gailly "; + " deflate 1.2.8 Copyright 1995-2013 Jean-loup Gailly and Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot @@ -79,19 +79,18 @@ #ifndef FASTEST local block_state deflate_slow OF((deflate_state *s, int flush)); #endif +local block_state deflate_rle OF((deflate_state *s, int flush)); +local block_state deflate_huff OF((deflate_state *s, int flush)); local void lm_init OF((deflate_state *s)); local void putShortMSB OF((deflate_state *s, uInt b)); local void flush_pending OF((z_streamp strm)); local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); -#ifndef FASTEST #ifdef ASMV void match_init OF((void)); /* asm code initialization */ uInt longest_match OF((deflate_state *s, IPos cur_match)); #else local uInt longest_match OF((deflate_state *s, IPos cur_match)); #endif -#endif -local uInt longest_match_fast OF((deflate_state *s, IPos cur_match)); #ifdef DEBUG local void check_match OF((deflate_state *s, IPos start, IPos match, @@ -110,11 +109,6 @@ #endif /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ -#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) -/* Minimum amount of lookahead, except at the end of the input file. - * See deflate.c for comments about the MIN_MATCH+1. - */ - /* Values for max_lazy_match, good_match and max_chain_length, depending on * the desired pack level (0..9). The values given below have been tuned to * exclude worst case performance for pathological files. Better values may be @@ -161,9 +155,12 @@ struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ #endif +/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ +#define RANK(f) (((f) << 1) - ((f) > 4 ? 9 : 0)) + /* =========================================================================== * Update a hash value with the given input byte - * IN assertion: all calls to UPDATE_HASH are made with consecutive + * IN assertion: all calls to to UPDATE_HASH are made with consecutive * input characters, so that a running hash key can be computed from the * previous key instead of complete recalculation each time. */ @@ -176,7 +173,7 @@ * the previous length of the hash chain. * If this file is compiled with -DFASTEST, the compression level is forced * to 1, and no hash chains are maintained. - * IN assertion: all calls to INSERT_STRING are made with consecutive + * IN assertion: all calls to to INSERT_STRING are made with consecutive * input characters and the first MIN_MATCH bytes of str are valid * (except for the last MIN_MATCH-1 bytes of the input file). */ @@ -241,10 +238,19 @@ strm->msg = Z_NULL; if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; +#endif } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif #ifdef FASTEST if (level != 0) level = 1; @@ -288,6 +294,8 @@ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + s->high_water = 0; /* nothing written to s->window yet */ + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); @@ -297,7 +305,7 @@ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { s->status = FINISH_STATE; - strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); + strm->msg = ERR_MSG(Z_MEM_ERROR); deflateEnd (strm); return Z_MEM_ERROR; } @@ -318,43 +326,70 @@ uInt dictLength; { deflate_state *s; - uInt length = dictLength; - uInt n; - IPos hash_head = 0; + uInt str, n; + int wrap; + unsigned avail; + z_const unsigned char *next; - if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL || - strm->state->wrap == 2 || - (strm->state->wrap == 1 && strm->state->status != INIT_STATE)) + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + return Z_STREAM_ERROR; + s = strm->state; + wrap = s->wrap; + if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) return Z_STREAM_ERROR; - s = strm->state; - if (s->wrap) + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap == 1) strm->adler = adler32(strm->adler, dictionary, dictLength); + s->wrap = 0; /* avoid computing Adler-32 in read_buf */ - if (length < MIN_MATCH) return Z_OK; - if (length > MAX_DIST(s)) { - length = MAX_DIST(s); - dictionary += dictLength - length; /* use the tail of the dictionary */ + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s->w_size) { + if (wrap == 0) { /* already empty otherwise */ + CLEAR_HASH(s); + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } + dictionary += dictLength - s->w_size; /* use the tail */ + dictLength = s->w_size; } - zmemcpy(s->window, dictionary, length); - s->strstart = length; - s->block_start = (long)length; - /* Insert all strings in the hash table (except for the last two bytes). - * s->lookahead stays null, so s->ins_h will be recomputed at the next - * call of fill_window. - */ - s->ins_h = s->window[0]; - UPDATE_HASH(s, s->ins_h, s->window[1]); - for (n = 0; n <= length - MIN_MATCH; n++) { - INSERT_STRING(s, n, hash_head); + /* insert dictionary into window and hash */ + avail = strm->avail_in; + next = strm->next_in; + strm->avail_in = dictLength; + strm->next_in = (z_const Bytef *)dictionary; + fill_window(s); + while (s->lookahead >= MIN_MATCH) { + str = s->strstart; + n = s->lookahead - (MIN_MATCH-1); + do { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + } while (--n); + s->strstart = str; + s->lookahead = MIN_MATCH-1; + fill_window(s); } - if (hash_head) hash_head = 0; /* to make compiler happy */ + s->strstart += s->lookahead; + s->block_start = (long)s->strstart; + s->insert = s->lookahead; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + strm->next_in = next; + strm->avail_in = avail; + s->wrap = wrap; return Z_OK; } /* ========================================================================= */ -int ZEXPORT deflateReset (strm) +int ZEXPORT deflateResetKeep (strm) z_streamp strm; { deflate_state *s; @@ -384,12 +419,23 @@ s->last_flush = Z_NO_FLUSH; _tr_init(s); - lm_init(s); return Z_OK; } /* ========================================================================= */ +int ZEXPORT deflateReset (strm) + z_streamp strm; +{ + int ret; + + ret = deflateResetKeep(strm); + if (ret == Z_OK) + lm_init(strm->state); + return ret; +} + +/* ========================================================================= */ int ZEXPORT deflateSetHeader (strm, head) z_streamp strm; gz_headerp head; @@ -401,14 +447,42 @@ } /* ========================================================================= */ +int ZEXPORT deflatePending (strm, pending, bits) + unsigned *pending; + int *bits; + z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + if (pending != Z_NULL) + *pending = strm->state->pending; + if (bits != Z_NULL) + *bits = strm->state->bi_valid; + return Z_OK; +} + +/* ========================================================================= */ int ZEXPORT deflatePrime (strm, bits, value) z_streamp strm; int bits; int value; { + deflate_state *s; + int put; + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; - strm->state->bi_valid = bits; - strm->state->bi_buf = (ush)(value & ((1 << bits) - 1)); + s = strm->state; + if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) + return Z_BUF_ERROR; + do { + put = Buf_size - s->bi_valid; + if (put > bits) + put = bits; + s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); + s->bi_valid += put; + _tr_flush_bits(s); + value >>= put; + bits -= put; + } while (bits); return Z_OK; } @@ -435,9 +509,12 @@ } func = configuration_table[s->level].func; - if (func != configuration_table[level].func && strm->total_in != 0) { + if ((strategy != s->strategy || func != configuration_table[level].func) && + strm->total_in != 0) { /* Flush the last buffer: */ - err = deflate(strm, Z_PARTIAL_FLUSH); + err = deflate(strm, Z_BLOCK); + if (err == Z_BUF_ERROR && s->pending == 0) + err = Z_OK; } if (s->level != level) { s->level = level; @@ -481,33 +558,66 @@ * resulting from using fixed blocks instead of stored blocks, which deflate * can emit on compressed data for some combinations of the parameters. * - * This function could be more sophisticated to provide closer upper bounds - * for every combination of windowBits and memLevel, as well as wrap. - * But even the conservative upper bound of about 14% expansion does not - * seem onerous for output buffer allocation. + * This function could be more sophisticated to provide closer upper bounds for + * every combination of windowBits and memLevel. But even the conservative + * upper bound of about 14% expansion does not seem onerous for output buffer + * allocation. */ uLong ZEXPORT deflateBound(strm, sourceLen) z_streamp strm; uLong sourceLen; { deflate_state *s; - uLong destLen; + uLong complen, wraplen; + Bytef *str; - /* conservative upper bound */ - destLen = sourceLen + - ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 11; + /* conservative upper bound for compressed data */ + complen = sourceLen + + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; - /* if can't get parameters, return conservative bound */ + /* if can't get parameters, return conservative bound plus zlib wrapper */ if (strm == Z_NULL || strm->state == Z_NULL) - return destLen; + return complen + 6; + + /* compute wrapper length */ + s = strm->state; + switch (s->wrap) { + case 0: /* raw deflate */ + wraplen = 0; + break; + case 1: /* zlib wrapper */ + wraplen = 6 + (s->strstart ? 4 : 0); + break; + case 2: /* gzip wrapper */ + wraplen = 18; + if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ + if (s->gzhead->extra != Z_NULL) + wraplen += 2 + s->gzhead->extra_len; + str = s->gzhead->name; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + str = s->gzhead->comment; + if (str != Z_NULL) + do { + wraplen++; + } while (*str++); + if (s->gzhead->hcrc) + wraplen += 2; + } + break; + default: /* for compiler happiness */ + wraplen = 6; + } /* if not default parameters, return conservative bound */ - s = strm->state; if (s->w_bits != 15 || s->hash_bits != 8 + 7) - return destLen; + return complen + wraplen; /* default settings: return tight bound for that case */ - return compressBound(sourceLen); + return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + + (sourceLen >> 25) + 13 - 6 + wraplen; } /* ========================================================================= @@ -532,19 +642,22 @@ local void flush_pending(strm) z_streamp strm; { - unsigned len = strm->state->pending; + unsigned len; + deflate_state *s = strm->state; + _tr_flush_bits(s); + len = s->pending; if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; - zmemcpy(strm->next_out, strm->state->pending_out, len); + zmemcpy(strm->next_out, s->pending_out, len); strm->next_out += len; - strm->state->pending_out += len; + s->pending_out += len; strm->total_out += len; strm->avail_out -= len; - strm->state->pending -= len; - if (strm->state->pending == 0) { - strm->state->pending_out = strm->state->pending_buf; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; } } @@ -557,7 +670,7 @@ deflate_state *s; if (strm == Z_NULL || strm->state == Z_NULL || - flush > Z_FINISH || flush < 0) { + flush > Z_BLOCK || flush < 0) { return Z_STREAM_ERROR; } s = strm->state; @@ -581,7 +694,7 @@ put_byte(s, 31); put_byte(s, 139); put_byte(s, 8); - if (s->gzhead == NULL) { + if (s->gzhead == Z_NULL) { put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); @@ -608,7 +721,7 @@ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, s->gzhead->os & 0xff); - if (s->gzhead->extra != NULL) { + if (s->gzhead->extra != Z_NULL) { put_byte(s, s->gzhead->extra_len & 0xff); put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); } @@ -650,7 +763,7 @@ } #ifdef GZIP if (s->status == EXTRA_STATE) { - if (s->gzhead->extra != NULL) { + if (s->gzhead->extra != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ while (s->gzindex < (s->gzhead->extra_len & 0xffff)) { @@ -678,7 +791,7 @@ s->status = NAME_STATE; } if (s->status == NAME_STATE) { - if (s->gzhead->name != NULL) { + if (s->gzhead->name != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ int val; @@ -709,7 +822,7 @@ s->status = COMMENT_STATE; } if (s->status == COMMENT_STATE) { - if (s->gzhead->comment != NULL) { + if (s->gzhead->comment != Z_NULL) { uInt beg = s->pending; /* start of bytes to update crc */ int val; @@ -771,7 +884,7 @@ * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUF_ERROR. */ - } else if (strm->avail_in == 0 && flush <= old_flush && + } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && flush != Z_FINISH) { ERR_RETURN(strm, Z_BUF_ERROR); } @@ -787,7 +900,9 @@ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { block_state bstate; - bstate = (*(configuration_table[s->level].func))(s, flush); + bstate = s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + (s->strategy == Z_RLE ? deflate_rle(s, flush) : + (*(configuration_table[s->level].func))(s, flush)); if (bstate == finish_started || bstate == finish_done) { s->status = FINISH_STATE; @@ -808,13 +923,18 @@ if (bstate == block_done) { if (flush == Z_PARTIAL_FLUSH) { _tr_align(s); - } else { /* FULL_FLUSH or SYNC_FLUSH */ + } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ _tr_stored_block(s, (char*)0, 0L, 0); /* For a full flush, this empty block will be recognized * as a special marker by inflate_sync(). */ if (flush == Z_FULL_FLUSH) { CLEAR_HASH(s); /* forget history */ + if (s->lookahead == 0) { + s->strstart = 0; + s->block_start = 0L; + s->insert = 0; + } } } flush_pending(strm); @@ -909,12 +1029,12 @@ ss = source->state; - zmemcpy(dest, source, sizeof(z_stream)); + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); if (ds == Z_NULL) return Z_MEM_ERROR; dest->state = (struct internal_state FAR *) ds; - zmemcpy(ds, ss, sizeof(deflate_state)); + zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); ds->strm = dest; ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); @@ -930,8 +1050,8 @@ } /* following zmemcpy do not work for 16-bit MSDOS */ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); - zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); - zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); @@ -965,15 +1085,15 @@ strm->avail_in -= len; + zmemcpy(buf, strm->next_in, len); if (strm->state->wrap == 1) { - strm->adler = adler32(strm->adler, strm->next_in, len); + strm->adler = adler32(strm->adler, buf, len); } #ifdef GZIP else if (strm->state->wrap == 2) { - strm->adler = crc32(strm->adler, strm->next_in, len); + strm->adler = crc32(strm->adler, buf, len); } #endif - zmemcpy(buf, strm->next_in, len); strm->next_in += len; strm->total_in += len; @@ -1000,6 +1120,7 @@ s->strstart = 0; s->block_start = 0L; s->lookahead = 0; + s->insert = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; @@ -1167,12 +1288,13 @@ return s->lookahead; } #endif /* ASMV */ -#endif /* FASTEST */ + +#else /* FASTEST */ /* --------------------------------------------------------------------------- - * Optimized version for level == 1 or strategy == Z_RLE only + * Optimized version for FASTEST only */ -local uInt longest_match_fast(s, cur_match) +local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ { @@ -1225,6 +1347,8 @@ return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; } +#endif /* FASTEST */ + #ifdef DEBUG /* =========================================================================== * Check that the match at match_start is indeed a match. @@ -1271,6 +1395,8 @@ unsigned more; /* Amount of free space at the end of the window. */ uInt wsize = s->w_size; + Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + do { more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); @@ -1303,7 +1429,6 @@ later. (Using level 0 permanently is not an optimal usage of zlib, so we don't care about this pathological case.) */ - /* %%% avoid this when Z_RLE */ n = s->hash_size; p = &s->head[n]; do { @@ -1324,7 +1449,7 @@ #endif more += wsize; } - if (s->strm->avail_in == 0) return; + if (s->strm->avail_in == 0) break; /* If there was no sliding: * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && @@ -1343,39 +1468,88 @@ s->lookahead += n; /* Initialize the hash value now that we have some input: */ - if (s->lookahead >= MIN_MATCH) { - s->ins_h = s->window[s->strstart]; - UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); + if (s->lookahead + s->insert >= MIN_MATCH) { + uInt str = s->strstart - s->insert; + s->ins_h = s->window[str]; + UPDATE_HASH(s, s->ins_h, s->window[str + 1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif + while (s->insert) { + UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); +#ifndef FASTEST + s->prev[str & s->w_mask] = s->head[s->ins_h]; +#endif + s->head[s->ins_h] = (Pos)str; + str++; + s->insert--; + if (s->lookahead + s->insert < MIN_MATCH) + break; + } } /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, * but this is not important since only literal bytes will be emitted. */ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ + if (s->high_water < s->window_size) { + ulg curr = s->strstart + (ulg)(s->lookahead); + ulg init; + + if (s->high_water < curr) { + /* Previous high water mark below current data -- zero WIN_INIT + * bytes or up to end of window, whichever is less. + */ + init = s->window_size - curr; + if (init > WIN_INIT) + init = WIN_INIT; + zmemzero(s->window + curr, (unsigned)init); + s->high_water = curr + init; + } + else if (s->high_water < (ulg)curr + WIN_INIT) { + /* High water mark at or above current data, but below current data + * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up + * to end of window, whichever is less. + */ + init = (ulg)curr + WIN_INIT - s->high_water; + if (init > s->window_size - s->high_water) + init = s->window_size - s->high_water; + zmemzero(s->window + s->high_water, (unsigned)init); + s->high_water += init; + } + } + + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "not enough room for search"); } /* =========================================================================== * Flush the current block, with given end-of-file flag. * IN assertion: strstart is set to the end of the current match. */ -#define FLUSH_BLOCK_ONLY(s, eof) { \ +#define FLUSH_BLOCK_ONLY(s, last) { \ _tr_flush_block(s, (s->block_start >= 0L ? \ (charf *)&s->window[(unsigned)s->block_start] : \ (charf *)Z_NULL), \ (ulg)((long)s->strstart - s->block_start), \ - (eof)); \ + (last)); \ s->block_start = s->strstart; \ flush_pending(s->strm); \ Tracev((stderr,"[FLUSH]")); \ } /* Same but force premature exit if necessary. */ -#define FLUSH_BLOCK(s, eof) { \ - FLUSH_BLOCK_ONLY(s, eof); \ - if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ +#define FLUSH_BLOCK(s, last) { \ + FLUSH_BLOCK_ONLY(s, last); \ + if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ } /* =========================================================================== @@ -1434,8 +1608,14 @@ FLUSH_BLOCK(s, 0); } } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if ((long)s->strstart > s->block_start) + FLUSH_BLOCK(s, 0); + return block_done; } /* =========================================================================== @@ -1449,7 +1629,7 @@ deflate_state *s; int flush; { - IPos hash_head = NIL; /* head of the hash chain */ + IPos hash_head; /* head of the hash chain */ int bflush; /* set if current block must be flushed */ for (;;) { @@ -1469,6 +1649,7 @@ /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ + hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } @@ -1481,19 +1662,8 @@ * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ -#ifdef FASTEST - if ((s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) || - (s->strategy == Z_RLE && s->strstart - hash_head == 1)) { - s->match_length = longest_match_fast (s, hash_head); - } -#else - if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { - s->match_length = longest_match (s, hash_head); - } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { - s->match_length = longest_match_fast (s, hash_head); - } -#endif - /* longest_match() or longest_match_fast() sets match_start */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ } if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->match_start, s->match_length); @@ -1541,8 +1711,14 @@ } if (bflush) FLUSH_BLOCK(s, 0); } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } #ifndef FASTEST @@ -1555,7 +1731,7 @@ deflate_state *s; int flush; { - IPos hash_head = NIL; /* head of hash chain */ + IPos hash_head; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ @@ -1576,6 +1752,7 @@ /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ + hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } @@ -1591,12 +1768,8 @@ * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ - if (s->strategy != Z_HUFFMAN_ONLY && s->strategy != Z_RLE) { - s->match_length = longest_match (s, hash_head); - } else if (s->strategy == Z_RLE && s->strstart - hash_head == 1) { - s->match_length = longest_match_fast (s, hash_head); - } - /* longest_match() or longest_match_fast() sets match_start */ + s->match_length = longest_match (s, hash_head); + /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED #if TOO_FAR <= 32767 @@ -1669,12 +1842,17 @@ _tr_tally_lit(s, s->window[s->strstart-1], bflush); s->match_available = 0; } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } #endif /* FASTEST */ -#if 0 /* =========================================================================== * For Z_RLE, simply look for runs of bytes, generate matches only of distance * one. Do not maintain a hash table. (It will be regenerated if this run of @@ -1684,43 +1862,52 @@ deflate_state *s; int flush; { - int bflush; /* set if current block must be flushed */ - uInt run; /* length of run */ - uInt max; /* maximum length of run */ - uInt prev; /* byte at distance one to match */ - Bytef *scan; /* scan for end of run */ + int bflush; /* set if current block must be flushed */ + uInt prev; /* byte at distance one to match */ + Bytef *scan, *strend; /* scan goes up to strend for length of run */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes - * for the longest encodable run. + * for the longest run, plus one for the unrolled loop. */ - if (s->lookahead < MAX_MATCH) { + if (s->lookahead <= MAX_MATCH) { fill_window(s); - if (s->lookahead < MAX_MATCH && flush == Z_NO_FLUSH) { + if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* See how many times the previous byte repeats */ - run = 0; - if (s->strstart > 0) { /* if there is a previous byte, that is */ - max = s->lookahead < MAX_MATCH ? s->lookahead : MAX_MATCH; + s->match_length = 0; + if (s->lookahead >= MIN_MATCH && s->strstart > 0) { scan = s->window + s->strstart - 1; - prev = *scan++; - do { - if (*scan++ != prev) - break; - } while (++run < max); + prev = *scan; + if (prev == *++scan && prev == *++scan && prev == *++scan) { + strend = s->window + s->strstart + MAX_MATCH; + do { + } while (prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + prev == *++scan && prev == *++scan && + scan < strend); + s->match_length = MAX_MATCH - (int)(strend - scan); + if (s->match_length > s->lookahead) + s->match_length = s->lookahead; + } + Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); } /* Emit match if have run of MIN_MATCH or longer, else emit literal */ - if (run >= MIN_MATCH) { - check_match(s, s->strstart, s->strstart - 1, run); - _tr_tally_dist(s, 1, run - MIN_MATCH, bflush); - s->lookahead -= run; - s->strstart += run; + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->strstart - 1, s->match_length); + + _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); + + s->lookahead -= s->match_length; + s->strstart += s->match_length; + s->match_length = 0; } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); @@ -1730,7 +1917,51 @@ } if (bflush) FLUSH_BLOCK(s, 0); } - FLUSH_BLOCK(s, flush == Z_FINISH); - return flush == Z_FINISH ? finish_done : block_done; + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; } -#endif + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +local block_state deflate_huff(s, flush) + deflate_state *s; + int flush; +{ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s->lookahead == 0) { + fill_window(s); + if (s->lookahead == 0) { + if (flush == Z_NO_FLUSH) + return need_more; + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s->match_length = 0; + Tracevv((stderr,"%c", s->window[s->strstart])); + _tr_tally_lit (s, s->window[s->strstart], bflush); + s->lookahead--; + s->strstart++; + if (bflush) FLUSH_BLOCK(s, 0); + } + s->insert = 0; + if (flush == Z_FINISH) { + FLUSH_BLOCK(s, 1); + return finish_done; + } + if (s->last_lit) + FLUSH_BLOCK(s, 0); + return block_done; +} diff --git a/Modules/zlib/deflate.h b/Modules/zlib/deflate.h --- a/Modules/zlib/deflate.h +++ b/Modules/zlib/deflate.h @@ -1,5 +1,5 @@ /* deflate.h -- internal compression state - * Copyright (C) 1995-2004 Jean-loup Gailly + * Copyright (C) 1995-2012 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -48,6 +48,9 @@ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ +#define Buf_size 16 +/* size of bit buffer in bi_buf */ + #define INIT_STATE 42 #define EXTRA_STATE 69 #define NAME_STATE 73 @@ -101,7 +104,7 @@ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ uInt gzindex; /* where in extra, name, or comment */ - Byte method; /* STORED (for zip only) or DEFLATED */ + Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ @@ -188,7 +191,7 @@ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ - /* Didn't use ct_data typedef below to supress compiler warning */ + /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ @@ -244,7 +247,7 @@ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ - int last_eob_len; /* bit length of EOB code for last block */ + uInt insert; /* bytes at end of window left to insert */ #ifdef DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ @@ -260,6 +263,13 @@ * are always zero. */ + ulg high_water; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ + } FAR deflate_state; /* Output a byte on the stream. @@ -278,14 +288,19 @@ * distances are limited to MAX_DIST instead of WSIZE. */ +#define WIN_INIT MAX_MATCH +/* Number of bytes after end of data in window to initialize in order to avoid + memory checker errors from longest match routines */ + /* in trees.c */ -void _tr_init OF((deflate_state *s)); -int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); -void _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, - int eof)); -void _tr_align OF((deflate_state *s)); -void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, - int eof)); +void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); +int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); +void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); +void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, + ulg stored_len, int last)); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) @@ -298,11 +313,11 @@ /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) - extern uch _length_code[]; - extern uch _dist_code[]; + extern uch ZLIB_INTERNAL _length_code[]; + extern uch ZLIB_INTERNAL _dist_code[]; #else - extern const uch _length_code[]; - extern const uch _dist_code[]; + extern const uch ZLIB_INTERNAL _length_code[]; + extern const uch ZLIB_INTERNAL _dist_code[]; #endif # define _tr_tally_lit(s, c, flush) \ diff --git a/Modules/zlib/example.c b/Modules/zlib/example.c --- a/Modules/zlib/example.c +++ b/Modules/zlib/example.c @@ -1,12 +1,12 @@ /* example.c -- usage example of the zlib compression library - * Copyright (C) 1995-2004 Jean-loup Gailly. + * Copyright (C) 1995-2006, 2011 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ +#include "zlib.h" #include -#include "zlib.h" #ifdef STDC # include @@ -26,7 +26,7 @@ } \ } -const char hello[] = "hello, hello!"; +z_const char hello[] = "hello, hello!"; /* "hello world" would be more standard, but the repeated "hello" * stresses the compression code better, sorry... */ @@ -34,10 +34,6 @@ const char dictionary[] = "hello"; uLong dictId; /* Adler32 value of the dictionary */ -void test_compress OF((Byte *compr, uLong comprLen, - Byte *uncompr, uLong uncomprLen)); -void test_gzio OF((const char *fname, - Byte *uncompr, uLong uncomprLen)); void test_deflate OF((Byte *compr, uLong comprLen)); void test_inflate OF((Byte *compr, uLong comprLen, Byte *uncompr, uLong uncomprLen)); @@ -53,6 +49,39 @@ Byte *uncompr, uLong uncomprLen)); int main OF((int argc, char *argv[])); + +#ifdef Z_SOLO + +void *myalloc OF((void *, unsigned, unsigned)); +void myfree OF((void *, void *)); + +void *myalloc(q, n, m) + void *q; + unsigned n, m; +{ + q = Z_NULL; + return calloc(n, m); +} + +void myfree(void *q, void *p) +{ + q = Z_NULL; + free(p); +} + +static alloc_func zalloc = myalloc; +static free_func zfree = myfree; + +#else /* !Z_SOLO */ + +static alloc_func zalloc = (alloc_func)0; +static free_func zfree = (free_func)0; + +void test_compress OF((Byte *compr, uLong comprLen, + Byte *uncompr, uLong uncomprLen)); +void test_gzio OF((const char *fname, + Byte *uncompr, uLong uncomprLen)); + /* =========================================================================== * Test compress() and uncompress() */ @@ -163,6 +192,8 @@ #endif } +#endif /* Z_SOLO */ + /* =========================================================================== * Test deflate() with small buffers */ @@ -174,14 +205,14 @@ int err; uLong len = (uLong)strlen(hello)+1; - c_stream.zalloc = (alloc_func)0; - c_stream.zfree = (free_func)0; + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; c_stream.opaque = (voidpf)0; err = deflateInit(&c_stream, Z_DEFAULT_COMPRESSION); CHECK_ERR(err, "deflateInit"); - c_stream.next_in = (Bytef*)hello; + c_stream.next_in = (z_const unsigned char *)hello; c_stream.next_out = compr; while (c_stream.total_in != len && c_stream.total_out < comprLen) { @@ -213,8 +244,8 @@ strcpy((char*)uncompr, "garbage"); - d_stream.zalloc = (alloc_func)0; - d_stream.zfree = (free_func)0; + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; d_stream.opaque = (voidpf)0; d_stream.next_in = compr; @@ -252,8 +283,8 @@ z_stream c_stream; /* compression stream */ int err; - c_stream.zalloc = (alloc_func)0; - c_stream.zfree = (free_func)0; + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; c_stream.opaque = (voidpf)0; err = deflateInit(&c_stream, Z_BEST_SPEED); @@ -309,8 +340,8 @@ strcpy((char*)uncompr, "garbage"); - d_stream.zalloc = (alloc_func)0; - d_stream.zfree = (free_func)0; + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; d_stream.opaque = (voidpf)0; d_stream.next_in = compr; @@ -349,14 +380,14 @@ int err; uInt len = (uInt)strlen(hello)+1; - c_stream.zalloc = (alloc_func)0; - c_stream.zfree = (free_func)0; + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; c_stream.opaque = (voidpf)0; err = deflateInit(&c_stream, Z_DEFAULT_COMPRESSION); CHECK_ERR(err, "deflateInit"); - c_stream.next_in = (Bytef*)hello; + c_stream.next_in = (z_const unsigned char *)hello; c_stream.next_out = compr; c_stream.avail_in = 3; c_stream.avail_out = (uInt)*comprLen; @@ -388,8 +419,8 @@ strcpy((char*)uncompr, "garbage"); - d_stream.zalloc = (alloc_func)0; - d_stream.zfree = (free_func)0; + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; d_stream.opaque = (voidpf)0; d_stream.next_in = compr; @@ -430,22 +461,22 @@ z_stream c_stream; /* compression stream */ int err; - c_stream.zalloc = (alloc_func)0; - c_stream.zfree = (free_func)0; + c_stream.zalloc = zalloc; + c_stream.zfree = zfree; c_stream.opaque = (voidpf)0; err = deflateInit(&c_stream, Z_BEST_COMPRESSION); CHECK_ERR(err, "deflateInit"); err = deflateSetDictionary(&c_stream, - (const Bytef*)dictionary, sizeof(dictionary)); + (const Bytef*)dictionary, (int)sizeof(dictionary)); CHECK_ERR(err, "deflateSetDictionary"); dictId = c_stream.adler; c_stream.next_out = compr; c_stream.avail_out = (uInt)comprLen; - c_stream.next_in = (Bytef*)hello; + c_stream.next_in = (z_const unsigned char *)hello; c_stream.avail_in = (uInt)strlen(hello)+1; err = deflate(&c_stream, Z_FINISH); @@ -469,8 +500,8 @@ strcpy((char*)uncompr, "garbage"); - d_stream.zalloc = (alloc_func)0; - d_stream.zfree = (free_func)0; + d_stream.zalloc = zalloc; + d_stream.zfree = zfree; d_stream.opaque = (voidpf)0; d_stream.next_in = compr; @@ -491,7 +522,7 @@ exit(1); } err = inflateSetDictionary(&d_stream, (const Bytef*)dictionary, - sizeof(dictionary)); + (int)sizeof(dictionary)); } CHECK_ERR(err, "inflate with dict"); } @@ -540,10 +571,15 @@ printf("out of memory\n"); exit(1); } + +#ifdef Z_SOLO + argc = strlen(argv[0]); +#else test_compress(compr, comprLen, uncompr, uncomprLen); test_gzio((argc > 1 ? argv[1] : TESTFILE), uncompr, uncomprLen); +#endif test_deflate(compr, comprLen); test_inflate(compr, comprLen, uncompr, uncomprLen); diff --git a/Modules/zlib/gzclose.c b/Modules/zlib/gzclose.c new file mode 100644 --- /dev/null +++ b/Modules/zlib/gzclose.c @@ -0,0 +1,25 @@ +/* gzclose.c -- zlib gzclose() function + * Copyright (C) 2004, 2010 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* gzclose() is in a separate file so that it is linked in only if it is used. + That way the other gzclose functions can be used instead to avoid linking in + unneeded compression or decompression routines. */ +int ZEXPORT gzclose(file) + gzFile file; +{ +#ifndef NO_GZCOMPRESS + gz_statep state; + + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + return state->mode == GZ_READ ? gzclose_r(file) : gzclose_w(file); +#else + return gzclose_r(file); +#endif +} diff --git a/Modules/zlib/gzguts.h b/Modules/zlib/gzguts.h new file mode 100644 --- /dev/null +++ b/Modules/zlib/gzguts.h @@ -0,0 +1,209 @@ +/* gzguts.h -- zlib internal header definitions for gz* operations + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#ifdef _LARGEFILE64_SOURCE +# ifndef _LARGEFILE_SOURCE +# define _LARGEFILE_SOURCE 1 +# endif +# ifdef _FILE_OFFSET_BITS +# undef _FILE_OFFSET_BITS +# endif +#endif + +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + +#include +#include "zlib.h" +#ifdef STDC +# include +# include +# include +#endif +#include + +#ifdef _WIN32 +# include +#endif + +#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) +# include +#endif + +#ifdef WINAPI_FAMILY +# define open _open +# define read _read +# define write _write +# define close _close +#endif + +#ifdef NO_DEFLATE /* for compatibility with old definition */ +# define NO_GZCOMPRESS +#endif + +#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(__CYGWIN__) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) +# ifndef HAVE_VSNPRINTF +# define HAVE_VSNPRINTF +# endif +#endif + +#ifndef HAVE_VSNPRINTF +# ifdef MSDOS +/* vsnprintf may exist on some MS-DOS compilers (DJGPP?), + but for now we just assume it doesn't. */ +# define NO_vsnprintf +# endif +# ifdef __TURBOC__ +# define NO_vsnprintf +# endif +# ifdef WIN32 +/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ +# if !defined(vsnprintf) && !defined(NO_vsnprintf) +# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) +# define vsnprintf _vsnprintf +# endif +# endif +# endif +# ifdef __SASC +# define NO_vsnprintf +# endif +# ifdef VMS +# define NO_vsnprintf +# endif +# ifdef __OS400__ +# define NO_vsnprintf +# endif +# ifdef __MVS__ +# define NO_vsnprintf +# endif +#endif + +/* unlike snprintf (which is required in C99, yet still not supported by + Microsoft more than a decade later!), _snprintf does not guarantee null + termination of the result -- however this is only used in gzlib.c where + the result is assured to fit in the space provided */ +#ifdef _MSC_VER +# define snprintf _snprintf +#endif + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +/* gz* functions always use library allocation functions */ +#ifndef STDC + extern voidp malloc OF((uInt size)); + extern void free OF((voidpf ptr)); +#endif + +/* get errno and strerror definition */ +#if defined UNDER_CE +# include +# define zstrerror() gz_strwinerror((DWORD)GetLastError()) +#else +# ifndef NO_STRERROR +# include +# define zstrerror() strerror(errno) +# else +# define zstrerror() "stdio error (consult errno)" +# endif +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); +#endif + +/* default memLevel */ +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif + +/* default i/o buffer size -- double this for output when reading (this and + twice this must be able to fit in an unsigned type) */ +#define GZBUFSIZE 8192 + +/* gzip modes, also provide a little integrity check on the passed structure */ +#define GZ_NONE 0 +#define GZ_READ 7247 +#define GZ_WRITE 31153 +#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ + +/* values for gz_state how */ +#define LOOK 0 /* look for a gzip header */ +#define COPY 1 /* copy input directly */ +#define GZIP 2 /* decompress a gzip stream */ + +/* internal gzip file state data structure */ +typedef struct { + /* exposed contents for gzgetc() macro */ + struct gzFile_s x; /* "x" for exposed */ + /* x.have: number of bytes available at x.next */ + /* x.next: next output data to deliver or write */ + /* x.pos: current position in uncompressed data */ + /* used for both reading and writing */ + int mode; /* see gzip modes above */ + int fd; /* file descriptor */ + char *path; /* path or fd for error messages */ + unsigned size; /* buffer size, zero if not allocated yet */ + unsigned want; /* requested buffer size, default is GZBUFSIZE */ + unsigned char *in; /* input buffer */ + unsigned char *out; /* output buffer (double-sized when reading) */ + int direct; /* 0 if processing gzip, 1 if transparent */ + /* just for reading */ + int how; /* 0: get header, 1: copy, 2: decompress */ + z_off64_t start; /* where the gzip data started, for rewinding */ + int eof; /* true if end of input file reached */ + int past; /* true if read requested past end */ + /* just for writing */ + int level; /* compression level */ + int strategy; /* compression strategy */ + /* seek request */ + z_off64_t skip; /* amount to skip (already rewound if backwards) */ + int seek; /* true if seek request pending */ + /* error information */ + int err; /* error code */ + char *msg; /* error message */ + /* zlib inflate or deflate stream */ + z_stream strm; /* stream structure in-place (not a pointer) */ +} gz_state; +typedef gz_state FAR *gz_statep; + +/* shared functions */ +void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); +#if defined UNDER_CE +char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); +#endif + +/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t + value -- needed when comparing unsigned to z_off64_t, which is signed + (possible z_off64_t types off_t, off64_t, and long are all signed) */ +#ifdef INT_MAX +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) +#else +unsigned ZLIB_INTERNAL gz_intmax OF((void)); +# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) +#endif diff --git a/Modules/zlib/gzio.c b/Modules/zlib/gzio.c deleted file mode 100644 --- a/Modules/zlib/gzio.c +++ /dev/null @@ -1,1026 +0,0 @@ -/* gzio.c -- IO on .gz files - * Copyright (C) 1995-2005 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - * - * Compile this file with -DNO_GZCOMPRESS to avoid the compression code. - */ - -/* @(#) $Id$ */ - -#include - -#include "zutil.h" - -#ifdef NO_DEFLATE /* for compatibility with old definition */ -# define NO_GZCOMPRESS -#endif - -#ifndef NO_DUMMY_DECL -struct internal_state {int dummy;}; /* for buggy compilers */ -#endif - -#ifndef Z_BUFSIZE -# ifdef MAXSEG_64K -# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */ -# else -# define Z_BUFSIZE 16384 -# endif -#endif -#ifndef Z_PRINTF_BUFSIZE -# define Z_PRINTF_BUFSIZE 4096 -#endif - -#ifdef __MVS__ -# pragma map (fdopen , "\174\174FDOPEN") - FILE *fdopen(int, const char *); -#endif - -#ifndef STDC -extern voidp malloc OF((uInt size)); -extern void free OF((voidpf ptr)); -#endif - -#define ALLOC(size) malloc(size) -#define TRYFREE(p) {if (p) free(p);} - -static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ -#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define RESERVED 0xE0 /* bits 5..7: reserved */ - -typedef struct gz_stream { - z_stream stream; - int z_err; /* error code for last stream operation */ - int z_eof; /* set if end of input file */ - FILE *file; /* .gz file */ - Byte *inbuf; /* input buffer */ - Byte *outbuf; /* output buffer */ - uLong crc; /* crc32 of uncompressed data */ - char *msg; /* error message */ - char *path; /* path name for debugging only */ - int transparent; /* 1 if input file is not a .gz file */ - char mode; /* 'w' or 'r' */ - z_off_t start; /* start of compressed data in file (header skipped) */ - z_off_t in; /* bytes into deflate or inflate */ - z_off_t out; /* bytes out of deflate or inflate */ - int back; /* one character push-back */ - int last; /* true if push-back is last character */ -} gz_stream; - - -local gzFile gz_open OF((const char *path, const char *mode, int fd)); -local int do_flush OF((gzFile file, int flush)); -local int get_byte OF((gz_stream *s)); -local void check_header OF((gz_stream *s)); -local int destroy OF((gz_stream *s)); -local void putLong OF((FILE *file, uLong x)); -local uLong getLong OF((gz_stream *s)); - -/* =========================================================================== - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb"). The file is given either by file descriptor - or path name (if fd == -1). - gz_open returns NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). -*/ -local gzFile gz_open (path, mode, fd) - const char *path; - const char *mode; - int fd; -{ - int err; - int level = Z_DEFAULT_COMPRESSION; /* compression level */ - int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ - char *p = (char*)mode; - gz_stream *s; - char fmode[80]; /* copy of mode, without the compression level */ - char *m = fmode; - - if (!path || !mode) return Z_NULL; - - s = (gz_stream *)ALLOC(sizeof(gz_stream)); - if (!s) return Z_NULL; - - s->stream.zalloc = (alloc_func)0; - s->stream.zfree = (free_func)0; - s->stream.opaque = (voidpf)0; - s->stream.next_in = s->inbuf = Z_NULL; - s->stream.next_out = s->outbuf = Z_NULL; - s->stream.avail_in = s->stream.avail_out = 0; - s->file = NULL; - s->z_err = Z_OK; - s->z_eof = 0; - s->in = 0; - s->out = 0; - s->back = EOF; - s->crc = crc32(0L, Z_NULL, 0); - s->msg = NULL; - s->transparent = 0; - - s->path = (char*)ALLOC(strlen(path)+1); - if (s->path == NULL) { - return destroy(s), (gzFile)Z_NULL; - } - strcpy(s->path, path); /* do this early for debugging */ - - s->mode = '\0'; - do { - if (*p == 'r') s->mode = 'r'; - if (*p == 'w' || *p == 'a') s->mode = 'w'; - if (*p >= '0' && *p <= '9') { - level = *p - '0'; - } else if (*p == 'f') { - strategy = Z_FILTERED; - } else if (*p == 'h') { - strategy = Z_HUFFMAN_ONLY; - } else if (*p == 'R') { - strategy = Z_RLE; - } else { - *m++ = *p; /* copy the mode */ - } - } while (*p++ && m != fmode + sizeof(fmode)); - if (s->mode == '\0') return destroy(s), (gzFile)Z_NULL; - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else - err = deflateInit2(&(s->stream), level, - Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy); - /* windowBits is passed < 0 to suppress zlib header */ - - s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); -#endif - if (err != Z_OK || s->outbuf == Z_NULL) { - return destroy(s), (gzFile)Z_NULL; - } - } else { - s->stream.next_in = s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); - - err = inflateInit2(&(s->stream), -MAX_WBITS); - /* windowBits is passed < 0 to tell that there is no zlib header. - * Note that in this case inflate *requires* an extra "dummy" byte - * after the compressed stream in order to complete decompression and - * return Z_STREAM_END. Here the gzip CRC32 ensures that 4 bytes are - * present after the compressed stream. - */ - if (err != Z_OK || s->inbuf == Z_NULL) { - return destroy(s), (gzFile)Z_NULL; - } - } - s->stream.avail_out = Z_BUFSIZE; - - errno = 0; - s->file = fd < 0 ? F_OPEN(path, fmode) : (FILE*)fdopen(fd, fmode); - - if (s->file == NULL) { - return destroy(s), (gzFile)Z_NULL; - } - if (s->mode == 'w') { - /* Write a very simple .gz header: - */ - fprintf(s->file, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1], - Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, OS_CODE); - s->start = 10L; - /* We use 10L instead of ftell(s->file) to because ftell causes an - * fflush on some systems. This version of the library doesn't use - * start anyway in write mode, so this initialization is not - * necessary. - */ - } else { - check_header(s); /* skip the .gz header */ - s->start = ftell(s->file) - s->stream.avail_in; - } - - return (gzFile)s; -} - -/* =========================================================================== - Opens a gzip (.gz) file for reading or writing. -*/ -gzFile ZEXPORT gzopen (path, mode) - const char *path; - const char *mode; -{ - return gz_open (path, mode, -1); -} - -/* =========================================================================== - Associate a gzFile with the file descriptor fd. fd is not dup'ed here - to mimic the behavio(u)r of fdopen. -*/ -gzFile ZEXPORT gzdopen (fd, mode) - int fd; - const char *mode; -{ - char name[46]; /* allow for up to 128-bit integers */ - - if (fd < 0) return (gzFile)Z_NULL; - sprintf(name, "", fd); /* for debugging */ - - return gz_open (name, mode, fd); -} - -/* =========================================================================== - * Update the compression level and strategy - */ -int ZEXPORT gzsetparams (file, level, strategy) - gzFile file; - int level; - int strategy; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - /* Make room to allow flushing */ - if (s->stream.avail_out == 0) { - - s->stream.next_out = s->outbuf; - if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { - s->z_err = Z_ERRNO; - } - s->stream.avail_out = Z_BUFSIZE; - } - - return deflateParams (&(s->stream), level, strategy); -} - -/* =========================================================================== - Read a byte from a gz_stream; update next_in and avail_in. Return EOF - for end of file. - IN assertion: the stream s has been sucessfully opened for reading. -*/ -local int get_byte(s) - gz_stream *s; -{ - if (s->z_eof) return EOF; - if (s->stream.avail_in == 0) { - errno = 0; - s->stream.avail_in = (uInt)fread(s->inbuf, 1, Z_BUFSIZE, s->file); - if (s->stream.avail_in == 0) { - s->z_eof = 1; - if (ferror(s->file)) s->z_err = Z_ERRNO; - return EOF; - } - s->stream.next_in = s->inbuf; - } - s->stream.avail_in--; - return *(s->stream.next_in)++; -} - -/* =========================================================================== - Check the gzip header of a gz_stream opened for reading. Set the stream - mode to transparent if the gzip magic header is not present; set s->err - to Z_DATA_ERROR if the magic header is present but the rest of the header - is incorrect. - IN assertion: the stream s has already been created sucessfully; - s->stream.avail_in is zero for the first time, but may be non-zero - for concatenated .gz files. -*/ -local void check_header(s) - gz_stream *s; -{ - int method; /* method byte */ - int flags; /* flags byte */ - uInt len; - int c; - - /* Assure two bytes in the buffer so we can peek ahead -- handle case - where first byte of header is at the end of the buffer after the last - gzip segment */ - len = s->stream.avail_in; - if (len < 2) { - if (len) s->inbuf[0] = s->stream.next_in[0]; - errno = 0; - len = (uInt)fread(s->inbuf + len, 1, Z_BUFSIZE >> len, s->file); - if (len == 0 && ferror(s->file)) s->z_err = Z_ERRNO; - s->stream.avail_in += len; - s->stream.next_in = s->inbuf; - if (s->stream.avail_in < 2) { - s->transparent = s->stream.avail_in; - return; - } - } - - /* Peek ahead to check the gzip magic header */ - if (s->stream.next_in[0] != gz_magic[0] || - s->stream.next_in[1] != gz_magic[1]) { - s->transparent = 1; - return; - } - s->stream.avail_in -= 2; - s->stream.next_in += 2; - - /* Check the rest of the gzip header */ - method = get_byte(s); - flags = get_byte(s); - if (method != Z_DEFLATED || (flags & RESERVED) != 0) { - s->z_err = Z_DATA_ERROR; - return; - } - - /* Discard time, xflags and OS code: */ - for (len = 0; len < 6; len++) (void)get_byte(s); - - if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */ - len = (uInt)get_byte(s); - len += ((uInt)get_byte(s))<<8; - /* len is garbage if EOF but the loop below will quit anyway */ - while (len-- != 0 && get_byte(s) != EOF) ; - } - if ((flags & ORIG_NAME) != 0) { /* skip the original file name */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & COMMENT) != 0) { /* skip the .gz file comment */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & HEAD_CRC) != 0) { /* skip the header crc */ - for (len = 0; len < 2; len++) (void)get_byte(s); - } - s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; -} - - /* =========================================================================== - * Cleanup then free the given gz_stream. Return a zlib error code. - Try freeing in the reverse order of allocations. - */ -local int destroy (s) - gz_stream *s; -{ - int err = Z_OK; - - if (!s) return Z_STREAM_ERROR; - - TRYFREE(s->msg); - - if (s->stream.state != NULL) { - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else - err = deflateEnd(&(s->stream)); -#endif - } else if (s->mode == 'r') { - err = inflateEnd(&(s->stream)); - } - } - if (s->file != NULL && fclose(s->file)) { -#ifdef ESPIPE - if (errno != ESPIPE) /* fclose is broken for pipes in HP/UX */ -#endif - err = Z_ERRNO; - } - if (s->z_err < 0) err = s->z_err; - - TRYFREE(s->inbuf); - TRYFREE(s->outbuf); - TRYFREE(s->path); - TRYFREE(s); - return err; -} - -/* =========================================================================== - Reads the given number of uncompressed bytes from the compressed file. - gzread returns the number of bytes actually read (0 for end of file). -*/ -int ZEXPORT gzread (file, buf, len) - gzFile file; - voidp buf; - unsigned len; -{ - gz_stream *s = (gz_stream*)file; - Bytef *start = (Bytef*)buf; /* starting point for crc computation */ - Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */ - - if (s == NULL || s->mode != 'r') return Z_STREAM_ERROR; - - if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1; - if (s->z_err == Z_STREAM_END) return 0; /* EOF */ - - next_out = (Byte*)buf; - s->stream.next_out = (Bytef*)buf; - s->stream.avail_out = len; - - if (s->stream.avail_out && s->back != EOF) { - *next_out++ = s->back; - s->stream.next_out++; - s->stream.avail_out--; - s->back = EOF; - s->out++; - start++; - if (s->last) { - s->z_err = Z_STREAM_END; - return 1; - } - } - - while (s->stream.avail_out != 0) { - - if (s->transparent) { - /* Copy first the lookahead bytes: */ - uInt n = s->stream.avail_in; - if (n > s->stream.avail_out) n = s->stream.avail_out; - if (n > 0) { - zmemcpy(s->stream.next_out, s->stream.next_in, n); - next_out += n; - s->stream.next_out = next_out; - s->stream.next_in += n; - s->stream.avail_out -= n; - s->stream.avail_in -= n; - } - if (s->stream.avail_out > 0) { - s->stream.avail_out -= - (uInt)fread(next_out, 1, s->stream.avail_out, s->file); - } - len -= s->stream.avail_out; - s->in += len; - s->out += len; - if (len == 0) s->z_eof = 1; - return (int)len; - } - if (s->stream.avail_in == 0 && !s->z_eof) { - - errno = 0; - s->stream.avail_in = (uInt)fread(s->inbuf, 1, Z_BUFSIZE, s->file); - if (s->stream.avail_in == 0) { - s->z_eof = 1; - if (ferror(s->file)) { - s->z_err = Z_ERRNO; - break; - } - } - s->stream.next_in = s->inbuf; - } - s->in += s->stream.avail_in; - s->out += s->stream.avail_out; - s->z_err = inflate(&(s->stream), Z_NO_FLUSH); - s->in -= s->stream.avail_in; - s->out -= s->stream.avail_out; - - if (s->z_err == Z_STREAM_END) { - /* Check CRC and original size */ - s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); - start = s->stream.next_out; - - if (getLong(s) != s->crc) { - s->z_err = Z_DATA_ERROR; - } else { - (void)getLong(s); - /* The uncompressed length returned by above getlong() may be - * different from s->out in case of concatenated .gz files. - * Check for such files: - */ - check_header(s); - if (s->z_err == Z_OK) { - inflateReset(&(s->stream)); - s->crc = crc32(0L, Z_NULL, 0); - } - } - } - if (s->z_err != Z_OK || s->z_eof) break; - } - s->crc = crc32(s->crc, start, (uInt)(s->stream.next_out - start)); - - if (len == s->stream.avail_out && - (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO)) - return -1; - return (int)(len - s->stream.avail_out); -} - - -/* =========================================================================== - Reads one byte from the compressed file. gzgetc returns this byte - or -1 in case of end of file or error. -*/ -int ZEXPORT gzgetc(file) - gzFile file; -{ - unsigned char c; - - return gzread(file, &c, 1) == 1 ? c : -1; -} - - -/* =========================================================================== - Push one byte back onto the stream. -*/ -int ZEXPORT gzungetc(c, file) - int c; - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r' || c == EOF || s->back != EOF) return EOF; - s->back = c; - s->out--; - s->last = (s->z_err == Z_STREAM_END); - if (s->last) s->z_err = Z_OK; - s->z_eof = 0; - return c; -} - - -/* =========================================================================== - Reads bytes from the compressed file until len-1 characters are - read, or a newline character is read and transferred to buf, or an - end-of-file condition is encountered. The string is then terminated - with a null character. - gzgets returns buf, or Z_NULL in case of error. - - The current implementation is not optimized at all. -*/ -char * ZEXPORT gzgets(file, buf, len) - gzFile file; - char *buf; - int len; -{ - char *b = buf; - if (buf == Z_NULL || len <= 0) return Z_NULL; - - while (--len > 0 && gzread(file, buf, 1) == 1 && *buf++ != '\n') ; - *buf = '\0'; - return b == buf && len > 0 ? Z_NULL : b; -} - - -#ifndef NO_GZCOMPRESS -/* =========================================================================== - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of bytes actually written (0 in case of error). -*/ -int ZEXPORT gzwrite (file, buf, len) - gzFile file; - voidpc buf; - unsigned len; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - s->stream.next_in = (Bytef*)buf; - s->stream.avail_in = len; - - while (s->stream.avail_in != 0) { - - if (s->stream.avail_out == 0) { - - s->stream.next_out = s->outbuf; - if (fwrite(s->outbuf, 1, Z_BUFSIZE, s->file) != Z_BUFSIZE) { - s->z_err = Z_ERRNO; - break; - } - s->stream.avail_out = Z_BUFSIZE; - } - s->in += s->stream.avail_in; - s->out += s->stream.avail_out; - s->z_err = deflate(&(s->stream), Z_NO_FLUSH); - s->in -= s->stream.avail_in; - s->out -= s->stream.avail_out; - if (s->z_err != Z_OK) break; - } - s->crc = crc32(s->crc, (const Bytef *)buf, len); - - return (int)(len - s->stream.avail_in); -} - - -/* =========================================================================== - Converts, formats, and writes the args to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written (0 in case of error). -*/ -#ifdef STDC -#include - -int ZEXPORTVA gzprintf (gzFile file, const char *format, /* args */ ...) -{ - char buf[Z_PRINTF_BUFSIZE]; - va_list va; - int len; - - buf[sizeof(buf) - 1] = 0; - va_start(va, format); -#ifdef NO_vsnprintf -# ifdef HAS_vsprintf_void - (void)vsprintf(buf, format, va); - va_end(va); - for (len = 0; len < sizeof(buf); len++) - if (buf[len] == 0) break; -# else - len = vsprintf(buf, format, va); - va_end(va); -# endif -#else -# ifdef HAS_vsnprintf_void - (void)vsnprintf(buf, sizeof(buf), format, va); - va_end(va); - len = strlen(buf); -# else - len = vsnprintf(buf, sizeof(buf), format, va); - va_end(va); -# endif -#endif - if (len <= 0 || len >= (int)sizeof(buf) || buf[sizeof(buf) - 1] != 0) - return 0; - return gzwrite(file, buf, (unsigned)len); -} -#else /* not ANSI C */ - -int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, - a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) - gzFile file; - const char *format; - int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, - a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; -{ - char buf[Z_PRINTF_BUFSIZE]; - int len; - - buf[sizeof(buf) - 1] = 0; -#ifdef NO_snprintf -# ifdef HAS_sprintf_void - sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); - for (len = 0; len < sizeof(buf); len++) - if (buf[len] == 0) break; -# else - len = sprintf(buf, format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); -# endif -#else -# ifdef HAS_snprintf_void - snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); - len = strlen(buf); -# else - len = snprintf(buf, sizeof(buf), format, a1, a2, a3, a4, a5, a6, a7, a8, - a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); -# endif -#endif - if (len <= 0 || len >= sizeof(buf) || buf[sizeof(buf) - 1] != 0) - return 0; - return gzwrite(file, buf, len); -} -#endif - -/* =========================================================================== - Writes c, converted to an unsigned char, into the compressed file. - gzputc returns the value that was written, or -1 in case of error. -*/ -int ZEXPORT gzputc(file, c) - gzFile file; - int c; -{ - unsigned char cc = (unsigned char) c; /* required for big endian systems */ - - return gzwrite(file, &cc, 1) == 1 ? (int)cc : -1; -} - - -/* =========================================================================== - Writes the given null-terminated string to the compressed file, excluding - the terminating null character. - gzputs returns the number of characters written, or -1 in case of error. -*/ -int ZEXPORT gzputs(file, s) - gzFile file; - const char *s; -{ - return gzwrite(file, (char*)s, (unsigned)strlen(s)); -} - - -/* =========================================================================== - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. -*/ -local int do_flush (file, flush) - gzFile file; - int flush; -{ - uInt len; - int done = 0; - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - s->stream.avail_in = 0; /* should be zero already anyway */ - - for (;;) { - len = Z_BUFSIZE - s->stream.avail_out; - - if (len != 0) { - if ((uInt)fwrite(s->outbuf, 1, len, s->file) != len) { - s->z_err = Z_ERRNO; - return Z_ERRNO; - } - s->stream.next_out = s->outbuf; - s->stream.avail_out = Z_BUFSIZE; - } - if (done) break; - s->out += s->stream.avail_out; - s->z_err = deflate(&(s->stream), flush); - s->out -= s->stream.avail_out; - - /* Ignore the second of two consecutive flushes: */ - if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK; - - /* deflate has finished flushing only when it hasn't used up - * all the available space in the output buffer: - */ - done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END); - - if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break; - } - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; -} - -int ZEXPORT gzflush (file, flush) - gzFile file; - int flush; -{ - gz_stream *s = (gz_stream*)file; - int err = do_flush (file, flush); - - if (err) return err; - fflush(s->file); - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; -} -#endif /* NO_GZCOMPRESS */ - -/* =========================================================================== - Sets the starting position for the next gzread or gzwrite on the given - compressed file. The offset represents a number of bytes in the - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error. - SEEK_END is not implemented, returns error. - In this version of the library, gzseek can be extremely slow. -*/ -z_off_t ZEXPORT gzseek (file, offset, whence) - gzFile file; - z_off_t offset; - int whence; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || whence == SEEK_END || - s->z_err == Z_ERRNO || s->z_err == Z_DATA_ERROR) { - return -1L; - } - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return -1L; -#else - if (whence == SEEK_SET) { - offset -= s->in; - } - if (offset < 0) return -1L; - - /* At this point, offset is the number of zero bytes to write. */ - if (s->inbuf == Z_NULL) { - s->inbuf = (Byte*)ALLOC(Z_BUFSIZE); /* for seeking */ - if (s->inbuf == Z_NULL) return -1L; - zmemzero(s->inbuf, Z_BUFSIZE); - } - while (offset > 0) { - uInt size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (uInt)offset; - - size = gzwrite(file, s->inbuf, size); - if (size == 0) return -1L; - - offset -= size; - } - return s->in; -#endif - } - /* Rest of function is for reading only */ - - /* compute absolute position */ - if (whence == SEEK_CUR) { - offset += s->out; - } - if (offset < 0) return -1L; - - if (s->transparent) { - /* map to fseek */ - s->back = EOF; - s->stream.avail_in = 0; - s->stream.next_in = s->inbuf; - if (fseek(s->file, offset, SEEK_SET) < 0) return -1L; - - s->in = s->out = offset; - return offset; - } - - /* For a negative seek, rewind and use positive seek */ - if (offset >= s->out) { - offset -= s->out; - } else if (gzrewind(file) < 0) { - return -1L; - } - /* offset is now the number of bytes to skip. */ - - if (offset != 0 && s->outbuf == Z_NULL) { - s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); - if (s->outbuf == Z_NULL) return -1L; - } - if (offset && s->back != EOF) { - s->back = EOF; - s->out++; - offset--; - if (s->last) s->z_err = Z_STREAM_END; - } - while (offset > 0) { - int size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (int)offset; - - size = gzread(file, s->outbuf, (uInt)size); - if (size <= 0) return -1L; - offset -= size; - } - return s->out; -} - -/* =========================================================================== - Rewinds input file. -*/ -int ZEXPORT gzrewind (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r') return -1; - - s->z_err = Z_OK; - s->z_eof = 0; - s->back = EOF; - s->stream.avail_in = 0; - s->stream.next_in = s->inbuf; - s->crc = crc32(0L, Z_NULL, 0); - if (!s->transparent) (void)inflateReset(&s->stream); - s->in = 0; - s->out = 0; - return fseek(s->file, s->start, SEEK_SET); -} - -/* =========================================================================== - Returns the starting position for the next gzread or gzwrite on the - given compressed file. This position represents a number of bytes in the - uncompressed data stream. -*/ -z_off_t ZEXPORT gztell (file) - gzFile file; -{ - return gzseek(file, 0L, SEEK_CUR); -} - -/* =========================================================================== - Returns 1 when EOF has previously been detected reading the given - input stream, otherwise zero. -*/ -int ZEXPORT gzeof (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - /* With concatenated compressed files that can have embedded - * crc trailers, z_eof is no longer the only/best indicator of EOF - * on a gz_stream. Handle end-of-stream error explicitly here. - */ - if (s == NULL || s->mode != 'r') return 0; - if (s->z_eof) return 1; - return s->z_err == Z_STREAM_END; -} - -/* =========================================================================== - Returns 1 if reading and doing so transparently, otherwise zero. -*/ -int ZEXPORT gzdirect (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL || s->mode != 'r') return 0; - return s->transparent; -} - -/* =========================================================================== - Outputs a long in LSB order to the given file -*/ -local void putLong (file, x) - FILE *file; - uLong x; -{ - int n; - for (n = 0; n < 4; n++) { - fputc((int)(x & 0xff), file); - x >>= 8; - } -} - -/* =========================================================================== - Reads a long in LSB order from the given gz_stream. Sets z_err in case - of error. -*/ -local uLong getLong (s) - gz_stream *s; -{ - uLong x = (uLong)get_byte(s); - int c; - - x += ((uLong)get_byte(s))<<8; - x += ((uLong)get_byte(s))<<16; - c = get_byte(s); - if (c == EOF) s->z_err = Z_DATA_ERROR; - x += ((uLong)c)<<24; - return x; -} - -/* =========================================================================== - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. -*/ -int ZEXPORT gzclose (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL) return Z_STREAM_ERROR; - - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return Z_STREAM_ERROR; -#else - if (do_flush (file, Z_FINISH) != Z_OK) - return destroy((gz_stream*)file); - - putLong (s->file, s->crc); - putLong (s->file, (uLong)(s->in & 0xffffffff)); -#endif - } - return destroy((gz_stream*)file); -} - -#ifdef STDC -# define zstrerror(errnum) strerror(errnum) -#else -# define zstrerror(errnum) "" -#endif - -/* =========================================================================== - Returns the error message for the last error which occurred on the - given compressed file. errnum is set to zlib error number. If an - error occurred in the file system and not in the compression library, - errnum is set to Z_ERRNO and the application may consult errno - to get the exact error code. -*/ -const char * ZEXPORT gzerror (file, errnum) - gzFile file; - int *errnum; -{ - char *m; - gz_stream *s = (gz_stream*)file; - - if (s == NULL) { - *errnum = Z_STREAM_ERROR; - return (const char*)ERR_MSG(Z_STREAM_ERROR); - } - *errnum = s->z_err; - if (*errnum == Z_OK) return (const char*)""; - - m = (char*)(*errnum == Z_ERRNO ? zstrerror(errno) : s->stream.msg); - - if (m == NULL || *m == '\0') m = (char*)ERR_MSG(s->z_err); - - TRYFREE(s->msg); - s->msg = (char*)ALLOC(strlen(s->path) + strlen(m) + 3); - if (s->msg == Z_NULL) return (const char*)ERR_MSG(Z_MEM_ERROR); - strcpy(s->msg, s->path); - strcat(s->msg, ": "); - strcat(s->msg, m); - return (const char*)s->msg; -} - -/* =========================================================================== - Clear the error and end-of-file flags, and do the same for the real file. -*/ -void ZEXPORT gzclearerr (file) - gzFile file; -{ - gz_stream *s = (gz_stream*)file; - - if (s == NULL) return; - if (s->z_err != Z_STREAM_END) s->z_err = Z_OK; - s->z_eof = 0; - clearerr(s->file); -} diff --git a/Modules/zlib/gzlib.c b/Modules/zlib/gzlib.c new file mode 100644 --- /dev/null +++ b/Modules/zlib/gzlib.c @@ -0,0 +1,634 @@ +/* gzlib.c -- zlib functions common to reading and writing gzip files + * Copyright (C) 2004, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +#if defined(_WIN32) && !defined(__BORLANDC__) +# define LSEEK _lseeki64 +#else +#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 +# define LSEEK lseek64 +#else +# define LSEEK lseek +#endif +#endif + +/* Local functions */ +local void gz_reset OF((gz_statep)); +local gzFile gz_open OF((const void *, int, const char *)); + +#if defined UNDER_CE + +/* Map the Windows error number in ERROR to a locale-dependent error message + string and return a pointer to it. Typically, the values for ERROR come + from GetLastError. + + The string pointed to shall not be modified by the application, but may be + overwritten by a subsequent call to gz_strwinerror + + The gz_strwinerror function does not change the current setting of + GetLastError. */ +char ZLIB_INTERNAL *gz_strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +#endif /* UNDER_CE */ + +/* Reset gzip file state */ +local void gz_reset(state) + gz_statep state; +{ + state->x.have = 0; /* no output data available */ + if (state->mode == GZ_READ) { /* for reading ... */ + state->eof = 0; /* not at end of file */ + state->past = 0; /* have not read past end yet */ + state->how = LOOK; /* look for gzip header */ + } + state->seek = 0; /* no seek request pending */ + gz_error(state, Z_OK, NULL); /* clear error */ + state->x.pos = 0; /* no uncompressed data yet */ + state->strm.avail_in = 0; /* no input data yet */ +} + +/* Open a gzip file either by name or file descriptor. */ +local gzFile gz_open(path, fd, mode) + const void *path; + int fd; + const char *mode; +{ + gz_statep state; + size_t len; + int oflag; +#ifdef O_CLOEXEC + int cloexec = 0; +#endif +#ifdef O_EXCL + int exclusive = 0; +#endif + + /* check input */ + if (path == NULL) + return NULL; + + /* allocate gzFile structure to return */ + state = (gz_statep)malloc(sizeof(gz_state)); + if (state == NULL) + return NULL; + state->size = 0; /* no buffers allocated yet */ + state->want = GZBUFSIZE; /* requested buffer size */ + state->msg = NULL; /* no error message yet */ + + /* interpret mode */ + state->mode = GZ_NONE; + state->level = Z_DEFAULT_COMPRESSION; + state->strategy = Z_DEFAULT_STRATEGY; + state->direct = 0; + while (*mode) { + if (*mode >= '0' && *mode <= '9') + state->level = *mode - '0'; + else + switch (*mode) { + case 'r': + state->mode = GZ_READ; + break; +#ifndef NO_GZCOMPRESS + case 'w': + state->mode = GZ_WRITE; + break; + case 'a': + state->mode = GZ_APPEND; + break; +#endif + case '+': /* can't read and write at the same time */ + free(state); + return NULL; + case 'b': /* ignore -- will request binary anyway */ + break; +#ifdef O_CLOEXEC + case 'e': + cloexec = 1; + break; +#endif +#ifdef O_EXCL + case 'x': + exclusive = 1; + break; +#endif + case 'f': + state->strategy = Z_FILTERED; + break; + case 'h': + state->strategy = Z_HUFFMAN_ONLY; + break; + case 'R': + state->strategy = Z_RLE; + break; + case 'F': + state->strategy = Z_FIXED; + break; + case 'T': + state->direct = 1; + break; + default: /* could consider as an error, but just ignore */ + ; + } + mode++; + } + + /* must provide an "r", "w", or "a" */ + if (state->mode == GZ_NONE) { + free(state); + return NULL; + } + + /* can't force transparent read */ + if (state->mode == GZ_READ) { + if (state->direct) { + free(state); + return NULL; + } + state->direct = 1; /* for empty file */ + } + + /* save the path name for error messages */ +#ifdef _WIN32 + if (fd == -2) { + len = wcstombs(NULL, path, 0); + if (len == (size_t)-1) + len = 0; + } + else +#endif + len = strlen((const char *)path); + state->path = (char *)malloc(len + 1); + if (state->path == NULL) { + free(state); + return NULL; + } +#ifdef _WIN32 + if (fd == -2) + if (len) + wcstombs(state->path, path, len + 1); + else + *(state->path) = 0; + else +#endif +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(state->path, len + 1, "%s", (const char *)path); +#else + strcpy(state->path, path); +#endif + + /* compute the flags for open() */ + oflag = +#ifdef O_LARGEFILE + O_LARGEFILE | +#endif +#ifdef O_BINARY + O_BINARY | +#endif +#ifdef O_CLOEXEC + (cloexec ? O_CLOEXEC : 0) | +#endif + (state->mode == GZ_READ ? + O_RDONLY : + (O_WRONLY | O_CREAT | +#ifdef O_EXCL + (exclusive ? O_EXCL : 0) | +#endif + (state->mode == GZ_WRITE ? + O_TRUNC : + O_APPEND))); + + /* open the file with the appropriate flags (or just use fd) */ + state->fd = fd > -1 ? fd : ( +#ifdef _WIN32 + fd == -2 ? _wopen(path, oflag, 0666) : +#endif + open((const char *)path, oflag, 0666)); + if (state->fd == -1) { + free(state->path); + free(state); + return NULL; + } + if (state->mode == GZ_APPEND) + state->mode = GZ_WRITE; /* simplify later checks */ + + /* save the current position for rewinding (only if reading) */ + if (state->mode == GZ_READ) { + state->start = LSEEK(state->fd, 0, SEEK_CUR); + if (state->start == -1) state->start = 0; + } + + /* initialize stream */ + gz_reset(state); + + /* return stream */ + return (gzFile)state; +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzopen64(path, mode) + const char *path; + const char *mode; +{ + return gz_open(path, -1, mode); +} + +/* -- see zlib.h -- */ +gzFile ZEXPORT gzdopen(fd, mode) + int fd; + const char *mode; +{ + char *path; /* identifier for error messages */ + gzFile gz; + + if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL) + return NULL; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(path, 7 + 3 * sizeof(int), "", fd); /* for debugging */ +#else + sprintf(path, "", fd); /* for debugging */ +#endif + gz = gz_open(path, fd, mode); + free(path); + return gz; +} + +/* -- see zlib.h -- */ +#ifdef _WIN32 +gzFile ZEXPORT gzopen_w(path, mode) + const wchar_t *path; + const char *mode; +{ + return gz_open(path, -2, mode); +} +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzbuffer(file, size) + gzFile file; + unsigned size; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* make sure we haven't already allocated memory */ + if (state->size != 0) + return -1; + + /* check and set requested size */ + if (size < 2) + size = 2; /* need two bytes to check magic header */ + state->want = size; + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzrewind(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* back up and start over */ + if (LSEEK(state->fd, state->start, SEEK_SET) == -1) + return -1; + gz_reset(state); + return 0; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzseek64(file, offset, whence) + gzFile file; + z_off64_t offset; + int whence; +{ + unsigned n; + z_off64_t ret; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* check that there's no error */ + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + + /* can only seek from start or relative to current position */ + if (whence != SEEK_SET && whence != SEEK_CUR) + return -1; + + /* normalize offset to a SEEK_CUR specification */ + if (whence == SEEK_SET) + offset -= state->x.pos; + else if (state->seek) + offset += state->skip; + state->seek = 0; + + /* if within raw area while reading, just go there */ + if (state->mode == GZ_READ && state->how == COPY && + state->x.pos + offset >= 0) { + ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR); + if (ret == -1) + return -1; + state->x.have = 0; + state->eof = 0; + state->past = 0; + state->seek = 0; + gz_error(state, Z_OK, NULL); + state->strm.avail_in = 0; + state->x.pos += offset; + return state->x.pos; + } + + /* calculate skip amount, rewinding if needed for back seek when reading */ + if (offset < 0) { + if (state->mode != GZ_READ) /* writing -- can't go backwards */ + return -1; + offset += state->x.pos; + if (offset < 0) /* before start of file! */ + return -1; + if (gzrewind(file) == -1) /* rewind, then skip to offset */ + return -1; + } + + /* if reading, skip what's in output buffer (one less gzgetc() check) */ + if (state->mode == GZ_READ) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ? + (unsigned)offset : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + offset -= n; + } + + /* request skip (if not zero) */ + if (offset) { + state->seek = 1; + state->skip = offset; + } + return state->x.pos + offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzseek(file, offset, whence) + gzFile file; + z_off_t offset; + int whence; +{ + z_off64_t ret; + + ret = gzseek64(file, (z_off64_t)offset, whence); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gztell64(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* return position */ + return state->x.pos + (state->seek ? state->skip : 0); +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gztell(file) + gzFile file; +{ + z_off64_t ret; + + ret = gztell64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +z_off64_t ZEXPORT gzoffset64(file) + gzFile file; +{ + z_off64_t offset; + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return -1; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return -1; + + /* compute and return effective offset in file */ + offset = LSEEK(state->fd, 0, SEEK_CUR); + if (offset == -1) + return -1; + if (state->mode == GZ_READ) /* reading */ + offset -= state->strm.avail_in; /* don't count buffered input */ + return offset; +} + +/* -- see zlib.h -- */ +z_off_t ZEXPORT gzoffset(file) + gzFile file; +{ + z_off64_t ret; + + ret = gzoffset64(file); + return ret == (z_off_t)ret ? (z_off_t)ret : -1; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzeof(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return 0; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return 0; + + /* return end-of-file state */ + return state->mode == GZ_READ ? state->past : 0; +} + +/* -- see zlib.h -- */ +const char * ZEXPORT gzerror(file, errnum) + gzFile file; + int *errnum; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return NULL; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return NULL; + + /* return error information */ + if (errnum != NULL) + *errnum = state->err; + return state->err == Z_MEM_ERROR ? "out of memory" : + (state->msg == NULL ? "" : state->msg); +} + +/* -- see zlib.h -- */ +void ZEXPORT gzclearerr(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure and check integrity */ + if (file == NULL) + return; + state = (gz_statep)file; + if (state->mode != GZ_READ && state->mode != GZ_WRITE) + return; + + /* clear error and end-of-file */ + if (state->mode == GZ_READ) { + state->eof = 0; + state->past = 0; + } + gz_error(state, Z_OK, NULL); +} + +/* Create an error message in allocated memory and set state->err and + state->msg accordingly. Free any previous error message already there. Do + not try to free or allocate space if the error is Z_MEM_ERROR (out of + memory). Simply save the error message as a static string. If there is an + allocation failure constructing the error message, then convert the error to + out of memory. */ +void ZLIB_INTERNAL gz_error(state, err, msg) + gz_statep state; + int err; + const char *msg; +{ + /* free previously allocated message and clear */ + if (state->msg != NULL) { + if (state->err != Z_MEM_ERROR) + free(state->msg); + state->msg = NULL; + } + + /* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */ + if (err != Z_OK && err != Z_BUF_ERROR) + state->x.have = 0; + + /* set error code, and if no message, then done */ + state->err = err; + if (msg == NULL) + return; + + /* for an out of memory error, return literal string when requested */ + if (err == Z_MEM_ERROR) + return; + + /* construct error message with path */ + if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) == + NULL) { + state->err = Z_MEM_ERROR; + return; + } +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(state->msg, strlen(state->path) + strlen(msg) + 3, + "%s%s%s", state->path, ": ", msg); +#else + strcpy(state->msg, state->path); + strcat(state->msg, ": "); + strcat(state->msg, msg); +#endif + return; +} + +#ifndef INT_MAX +/* portably return maximum value for an int (when limits.h presumed not + available) -- we need to do this to cover cases where 2's complement not + used, since C standard permits 1's complement and sign-bit representations, + otherwise we could just use ((unsigned)-1) >> 1 */ +unsigned ZLIB_INTERNAL gz_intmax() +{ + unsigned p, q; + + p = 1; + do { + q = p; + p <<= 1; + p++; + } while (p > q); + return q >> 1; +} +#endif diff --git a/Modules/zlib/gzread.c b/Modules/zlib/gzread.c new file mode 100644 --- /dev/null +++ b/Modules/zlib/gzread.c @@ -0,0 +1,594 @@ +/* gzread.c -- zlib functions for reading gzip files + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); +local int gz_avail OF((gz_statep)); +local int gz_look OF((gz_statep)); +local int gz_decomp OF((gz_statep)); +local int gz_fetch OF((gz_statep)); +local int gz_skip OF((gz_statep, z_off64_t)); + +/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from + state->fd, and update state->eof, state->err, and state->msg as appropriate. + This function needs to loop on read(), since read() is not guaranteed to + read the number of bytes requested, depending on the type of descriptor. */ +local int gz_load(state, buf, len, have) + gz_statep state; + unsigned char *buf; + unsigned len; + unsigned *have; +{ + int ret; + + *have = 0; + do { + ret = read(state->fd, buf + *have, len - *have); + if (ret <= 0) + break; + *have += ret; + } while (*have < len); + if (ret < 0) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + if (ret == 0) + state->eof = 1; + return 0; +} + +/* Load up input buffer and set eof flag if last data loaded -- return -1 on + error, 0 otherwise. Note that the eof flag is set when the end of the input + file is reached, even though there may be unused data in the buffer. Once + that data has been used, no more attempts will be made to read the file. + If strm->avail_in != 0, then the current data is moved to the beginning of + the input buffer, and then the remainder of the buffer is loaded with the + available data from the input file. */ +local int gz_avail(state) + gz_statep state; +{ + unsigned got; + z_streamp strm = &(state->strm); + + if (state->err != Z_OK && state->err != Z_BUF_ERROR) + return -1; + if (state->eof == 0) { + if (strm->avail_in) { /* copy what's there to the start */ + unsigned char *p = state->in; + unsigned const char *q = strm->next_in; + unsigned n = strm->avail_in; + do { + *p++ = *q++; + } while (--n); + } + if (gz_load(state, state->in + strm->avail_in, + state->size - strm->avail_in, &got) == -1) + return -1; + strm->avail_in += got; + strm->next_in = state->in; + } + return 0; +} + +/* Look for gzip header, set up for inflate or copy. state->x.have must be 0. + If this is the first time in, allocate required memory. state->how will be + left unchanged if there is no more input data available, will be set to COPY + if there is no gzip header and direct copying will be performed, or it will + be set to GZIP for decompression. If direct copying, then leftover input + data from the input buffer will be copied to the output buffer. In that + case, all further file reads will be directly to either the output buffer or + a user buffer. If decompressing, the inflate state will be initialized. + gz_look() will return 0 on success or -1 on failure. */ +local int gz_look(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + /* allocate read buffers and inflate memory */ + if (state->size == 0) { + /* allocate buffers */ + state->in = (unsigned char *)malloc(state->want); + state->out = (unsigned char *)malloc(state->want << 1); + if (state->in == NULL || state->out == NULL) { + if (state->out != NULL) + free(state->out); + if (state->in != NULL) + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + state->size = state->want; + + /* allocate inflate memory */ + state->strm.zalloc = Z_NULL; + state->strm.zfree = Z_NULL; + state->strm.opaque = Z_NULL; + state->strm.avail_in = 0; + state->strm.next_in = Z_NULL; + if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */ + free(state->out); + free(state->in); + state->size = 0; + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + } + + /* get at least the magic bytes in the input buffer */ + if (strm->avail_in < 2) { + if (gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) + return 0; + } + + /* look for gzip magic bytes -- if there, do gzip decoding (note: there is + a logical dilemma here when considering the case of a partially written + gzip file, to wit, if a single 31 byte is written, then we cannot tell + whether this is a single-byte file, or just a partially written gzip + file -- for here we assume that if a gzip file is being written, then + the header will be written in a single operation, so that reading a + single byte is sufficient indication that it is not a gzip file) */ + if (strm->avail_in > 1 && + strm->next_in[0] == 31 && strm->next_in[1] == 139) { + inflateReset(strm); + state->how = GZIP; + state->direct = 0; + return 0; + } + + /* no gzip header -- if we were decoding gzip before, then this is trailing + garbage. Ignore the trailing garbage and finish. */ + if (state->direct == 0) { + strm->avail_in = 0; + state->eof = 1; + state->x.have = 0; + return 0; + } + + /* doing raw i/o, copy any leftover input to output -- this assumes that + the output buffer is larger than the input buffer, which also assures + space for gzungetc() */ + state->x.next = state->out; + if (strm->avail_in) { + memcpy(state->x.next, strm->next_in, strm->avail_in); + state->x.have = strm->avail_in; + strm->avail_in = 0; + } + state->how = COPY; + state->direct = 1; + return 0; +} + +/* Decompress from input to the provided next_out and avail_out in the state. + On return, state->x.have and state->x.next point to the just decompressed + data. If the gzip stream completes, state->how is reset to LOOK to look for + the next gzip stream or raw data, once state->x.have is depleted. Returns 0 + on success, -1 on failure. */ +local int gz_decomp(state) + gz_statep state; +{ + int ret = Z_OK; + unsigned had; + z_streamp strm = &(state->strm); + + /* fill output buffer up to end of deflate stream */ + had = strm->avail_out; + do { + /* get more input for inflate() */ + if (strm->avail_in == 0 && gz_avail(state) == -1) + return -1; + if (strm->avail_in == 0) { + gz_error(state, Z_BUF_ERROR, "unexpected end of file"); + break; + } + + /* decompress and handle errors */ + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) { + gz_error(state, Z_STREAM_ERROR, + "internal error: inflate stream corrupt"); + return -1; + } + if (ret == Z_MEM_ERROR) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + if (ret == Z_DATA_ERROR) { /* deflate stream invalid */ + gz_error(state, Z_DATA_ERROR, + strm->msg == NULL ? "compressed data error" : strm->msg); + return -1; + } + } while (strm->avail_out && ret != Z_STREAM_END); + + /* update available output */ + state->x.have = had - strm->avail_out; + state->x.next = strm->next_out - state->x.have; + + /* if the gzip stream completed successfully, look for another */ + if (ret == Z_STREAM_END) + state->how = LOOK; + + /* good decompression */ + return 0; +} + +/* Fetch data and put it in the output buffer. Assumes state->x.have is 0. + Data is either copied from the input file or decompressed from the input + file depending on state->how. If state->how is LOOK, then a gzip header is + looked for to determine whether to copy or decompress. Returns -1 on error, + otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the + end of the input file has been reached and all data has been processed. */ +local int gz_fetch(state) + gz_statep state; +{ + z_streamp strm = &(state->strm); + + do { + switch(state->how) { + case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */ + if (gz_look(state) == -1) + return -1; + if (state->how == LOOK) + return 0; + break; + case COPY: /* -> COPY */ + if (gz_load(state, state->out, state->size << 1, &(state->x.have)) + == -1) + return -1; + state->x.next = state->out; + return 0; + case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */ + strm->avail_out = state->size << 1; + strm->next_out = state->out; + if (gz_decomp(state) == -1) + return -1; + } + } while (state->x.have == 0 && (!state->eof || strm->avail_in)); + return 0; +} + +/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */ +local int gz_skip(state, len) + gz_statep state; + z_off64_t len; +{ + unsigned n; + + /* skip over len bytes or reach end-of-file, whichever comes first */ + while (len) + /* skip over whatever is in output buffer */ + if (state->x.have) { + n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ? + (unsigned)len : state->x.have; + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + len -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && state->strm.avail_in == 0) + break; + + /* need more data to skip -- load up output buffer */ + else { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return -1; + } + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzread(file, buf, len) + gzFile file; + voidp buf; + unsigned len; +{ + unsigned got, n; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids the flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); + return -1; + } + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return -1; + } + + /* get len bytes to buf, or less than len if at the end */ + got = 0; + do { + /* first just try copying data from the output buffer */ + if (state->x.have) { + n = state->x.have > len ? len : state->x.have; + memcpy(buf, state->x.next, n); + state->x.next += n; + state->x.have -= n; + } + + /* output buffer empty -- return if we're at the end of the input */ + else if (state->eof && strm->avail_in == 0) { + state->past = 1; /* tried to read past end */ + break; + } + + /* need output data -- for small len or new stream load up our output + buffer */ + else if (state->how == LOOK || len < (state->size << 1)) { + /* get more output, looking for header if required */ + if (gz_fetch(state) == -1) + return -1; + continue; /* no progress yet -- go back to copy above */ + /* the copy above assures that we will leave with space in the + output buffer, allowing at least one gzungetc() to succeed */ + } + + /* large len -- read directly into user buffer */ + else if (state->how == COPY) { /* read directly */ + if (gz_load(state, (unsigned char *)buf, len, &n) == -1) + return -1; + } + + /* large len -- decompress directly into user buffer */ + else { /* state->how == GZIP */ + strm->avail_out = len; + strm->next_out = (unsigned char *)buf; + if (gz_decomp(state) == -1) + return -1; + n = state->x.have; + state->x.have = 0; + } + + /* update progress */ + len -= n; + buf = (char *)buf + n; + got += n; + state->x.pos += n; + } while (len); + + /* return number of bytes read into user buffer (will fit in int) */ + return (int)got; +} + +/* -- see zlib.h -- */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +#else +# undef gzgetc +#endif +int ZEXPORT gzgetc(file) + gzFile file; +{ + int ret; + unsigned char buf[1]; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* try output buffer (no need to check for skip request) */ + if (state->x.have) { + state->x.have--; + state->x.pos++; + return *(state->x.next)++; + } + + /* nothing there -- try gzread() */ + ret = gzread(file, buf, 1); + return ret < 1 ? -1 : buf[0]; +} + +int ZEXPORT gzgetc_(file) +gzFile file; +{ + return gzgetc(file); +} + +/* -- see zlib.h -- */ +int ZEXPORT gzungetc(c, file) + int c; + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return -1; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return -1; + } + + /* can't push EOF */ + if (c < 0) + return -1; + + /* if output buffer empty, put byte at end (allows more pushing) */ + if (state->x.have == 0) { + state->x.have = 1; + state->x.next = state->out + (state->size << 1) - 1; + state->x.next[0] = c; + state->x.pos--; + state->past = 0; + return c; + } + + /* if no room, give up (must have already done a gzungetc()) */ + if (state->x.have == (state->size << 1)) { + gz_error(state, Z_DATA_ERROR, "out of room to push characters"); + return -1; + } + + /* slide output data if needed and insert byte before existing data */ + if (state->x.next == state->out) { + unsigned char *src = state->out + state->x.have; + unsigned char *dest = state->out + (state->size << 1); + while (src > state->out) + *--dest = *--src; + state->x.next = dest; + } + state->x.have++; + state->x.next--; + state->x.next[0] = c; + state->x.pos--; + state->past = 0; + return c; +} + +/* -- see zlib.h -- */ +char * ZEXPORT gzgets(file, buf, len) + gzFile file; + char *buf; + int len; +{ + unsigned left, n; + char *str; + unsigned char *eol; + gz_statep state; + + /* check parameters and get internal structure */ + if (file == NULL || buf == NULL || len < 1) + return NULL; + state = (gz_statep)file; + + /* check that we're reading and that there's no (serious) error */ + if (state->mode != GZ_READ || + (state->err != Z_OK && state->err != Z_BUF_ERROR)) + return NULL; + + /* process a skip request */ + if (state->seek) { + state->seek = 0; + if (gz_skip(state, state->skip) == -1) + return NULL; + } + + /* copy output bytes up to new line or len - 1, whichever comes first -- + append a terminating zero to the string (we don't check for a zero in + the contents, let the user worry about that) */ + str = buf; + left = (unsigned)len - 1; + if (left) do { + /* assure that something is in the output buffer */ + if (state->x.have == 0 && gz_fetch(state) == -1) + return NULL; /* error */ + if (state->x.have == 0) { /* end of file */ + state->past = 1; /* read past end */ + break; /* return what we have */ + } + + /* look for end-of-line in current output buffer */ + n = state->x.have > left ? left : state->x.have; + eol = (unsigned char *)memchr(state->x.next, '\n', n); + if (eol != NULL) + n = (unsigned)(eol - state->x.next) + 1; + + /* copy through end-of-line, or remainder if not found */ + memcpy(buf, state->x.next, n); + state->x.have -= n; + state->x.next += n; + state->x.pos += n; + left -= n; + buf += n; + } while (left && eol == NULL); + + /* return terminated string, or if nothing, end of file */ + if (buf == str) + return NULL; + buf[0] = 0; + return str; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzdirect(file) + gzFile file; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + + /* if the state is not known, but we can find out, then do so (this is + mainly for right after a gzopen() or gzdopen()) */ + if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0) + (void)gz_look(state); + + /* return 1 if transparent, 0 if processing a gzip stream */ + return state->direct; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_r(file) + gzFile file; +{ + int ret, err; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're reading */ + if (state->mode != GZ_READ) + return Z_STREAM_ERROR; + + /* free memory and close file */ + if (state->size) { + inflateEnd(&(state->strm)); + free(state->out); + free(state->in); + } + err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK; + gz_error(state, Z_OK, NULL); + free(state->path); + ret = close(state->fd); + free(state); + return ret ? Z_ERRNO : err; +} diff --git a/Modules/zlib/gzwrite.c b/Modules/zlib/gzwrite.c new file mode 100644 --- /dev/null +++ b/Modules/zlib/gzwrite.c @@ -0,0 +1,577 @@ +/* gzwrite.c -- zlib functions for writing gzip files + * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +#include "gzguts.h" + +/* Local functions */ +local int gz_init OF((gz_statep)); +local int gz_comp OF((gz_statep, int)); +local int gz_zero OF((gz_statep, z_off64_t)); + +/* Initialize state for writing a gzip file. Mark initialization by setting + state->size to non-zero. Return -1 on failure or 0 on success. */ +local int gz_init(state) + gz_statep state; +{ + int ret; + z_streamp strm = &(state->strm); + + /* allocate input buffer */ + state->in = (unsigned char *)malloc(state->want); + if (state->in == NULL) { + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* only need output buffer and deflate state if compressing */ + if (!state->direct) { + /* allocate output buffer */ + state->out = (unsigned char *)malloc(state->want); + if (state->out == NULL) { + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + + /* allocate deflate memory, set up for gzip compression */ + strm->zalloc = Z_NULL; + strm->zfree = Z_NULL; + strm->opaque = Z_NULL; + ret = deflateInit2(strm, state->level, Z_DEFLATED, + MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy); + if (ret != Z_OK) { + free(state->out); + free(state->in); + gz_error(state, Z_MEM_ERROR, "out of memory"); + return -1; + } + } + + /* mark state as initialized */ + state->size = state->want; + + /* initialize write buffer if compressing */ + if (!state->direct) { + strm->avail_out = state->size; + strm->next_out = state->out; + state->x.next = strm->next_out; + } + return 0; +} + +/* Compress whatever is at avail_in and next_in and write to the output file. + Return -1 if there is an error writing to the output file, otherwise 0. + flush is assumed to be a valid deflate() flush value. If flush is Z_FINISH, + then the deflate() state is reset to start a new gzip stream. If gz->direct + is true, then simply write to the output file without compressing, and + ignore flush. */ +local int gz_comp(state, flush) + gz_statep state; + int flush; +{ + int ret, got; + unsigned have; + z_streamp strm = &(state->strm); + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return -1; + + /* write directly if requested */ + if (state->direct) { + got = write(state->fd, strm->next_in, strm->avail_in); + if (got < 0 || (unsigned)got != strm->avail_in) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + strm->avail_in = 0; + return 0; + } + + /* run deflate() on provided input until it produces no more output */ + ret = Z_OK; + do { + /* write out current buffer contents if full, or if flushing, but if + doing Z_FINISH then don't write until we get to Z_STREAM_END */ + if (strm->avail_out == 0 || (flush != Z_NO_FLUSH && + (flush != Z_FINISH || ret == Z_STREAM_END))) { + have = (unsigned)(strm->next_out - state->x.next); + if (have && ((got = write(state->fd, state->x.next, have)) < 0 || + (unsigned)got != have)) { + gz_error(state, Z_ERRNO, zstrerror()); + return -1; + } + if (strm->avail_out == 0) { + strm->avail_out = state->size; + strm->next_out = state->out; + } + state->x.next = strm->next_out; + } + + /* compress */ + have = strm->avail_out; + ret = deflate(strm, flush); + if (ret == Z_STREAM_ERROR) { + gz_error(state, Z_STREAM_ERROR, + "internal error: deflate stream corrupt"); + return -1; + } + have -= strm->avail_out; + } while (have); + + /* if that completed a deflate stream, allow another to start */ + if (flush == Z_FINISH) + deflateReset(strm); + + /* all done, no errors */ + return 0; +} + +/* Compress len zeros to output. Return -1 on error, 0 on success. */ +local int gz_zero(state, len) + gz_statep state; + z_off64_t len; +{ + int first; + unsigned n; + z_streamp strm = &(state->strm); + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + + /* compress len zeros (len guaranteed > 0) */ + first = 1; + while (len) { + n = GT_OFF(state->size) || (z_off64_t)state->size > len ? + (unsigned)len : state->size; + if (first) { + memset(state->in, 0, n); + first = 0; + } + strm->avail_in = n; + strm->next_in = state->in; + state->x.pos += n; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return -1; + len -= n; + } + return 0; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzwrite(file, buf, len) + gzFile file; + voidpc buf; + unsigned len; +{ + unsigned put = len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return 0; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* since an int is returned, make sure len fits in one, otherwise return + with an error (this avoids the flaw in the interface) */ + if ((int)len < 0) { + gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); + return 0; + } + + /* if len is zero, avoid unnecessary operations */ + if (len == 0) + return 0; + + /* allocate memory if this is the first time through */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* for small len, copy to input buffer, otherwise compress directly */ + if (len < state->size) { + /* copy to input buffer, compress when full */ + do { + unsigned have, copy; + + if (strm->avail_in == 0) + strm->next_in = state->in; + have = (unsigned)((strm->next_in + strm->avail_in) - state->in); + copy = state->size - have; + if (copy > len) + copy = len; + memcpy(state->in + have, buf, copy); + strm->avail_in += copy; + state->x.pos += copy; + buf = (const char *)buf + copy; + len -= copy; + if (len && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + } while (len); + } + else { + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* directly compress user buffer to file */ + strm->avail_in = len; + strm->next_in = (z_const Bytef *)buf; + state->x.pos += len; + if (gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + } + + /* input was all buffered or compressed (put will fit in int) */ + return (int)put; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputc(file, c) + gzFile file; + int c; +{ + unsigned have; + unsigned char buf[1]; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return -1; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* try writing to input buffer for speed (state->size == 0 if buffer not + initialized) */ + if (state->size) { + if (strm->avail_in == 0) + strm->next_in = state->in; + have = (unsigned)((strm->next_in + strm->avail_in) - state->in); + if (have < state->size) { + state->in[have] = c; + strm->avail_in++; + state->x.pos++; + return c & 0xff; + } + } + + /* no room in buffer or not initialized, use gz_write() */ + buf[0] = c; + if (gzwrite(file, buf, 1) != 1) + return -1; + return c & 0xff; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzputs(file, str) + gzFile file; + const char *str; +{ + int ret; + unsigned len; + + /* write string */ + len = (unsigned)strlen(str); + ret = gzwrite(file, str, len); + return ret == 0 && len != 0 ? -1 : ret; +} + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +#include + +/* -- see zlib.h -- */ +int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) +{ + int size, len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* do the printf() into the input buffer, put length in len */ + size = (int)(state->size); + state->in[size - 1] = 0; +#ifdef NO_vsnprintf +# ifdef HAS_vsprintf_void + (void)vsprintf((char *)(state->in), format, va); + for (len = 0; len < size; len++) + if (state->in[len] == 0) break; +# else + len = vsprintf((char *)(state->in), format, va); +# endif +#else +# ifdef HAS_vsnprintf_void + (void)vsnprintf((char *)(state->in), size, format, va); + len = strlen((char *)(state->in)); +# else + len = vsnprintf((char *)(state->in), size, format, va); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len <= 0 || len >= (int)size || state->in[size - 1] != 0) + return 0; + + /* update buffer and position, defer compression until needed */ + strm->avail_in = (unsigned)len; + strm->next_in = state->in; + state->x.pos += len; + return len; +} + +int ZEXPORTVA gzprintf(gzFile file, const char *format, ...) +{ + va_list va; + int ret; + + va_start(va, format); + ret = gzvprintf(file, format, va); + va_end(va); + return ret; +} + +#else /* !STDC && !Z_HAVE_STDARG_H */ + +/* -- see zlib.h -- */ +int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) + gzFile file; + const char *format; + int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, + a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; +{ + int size, len; + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that can really pass pointer in ints */ + if (sizeof(int) != sizeof(void *)) + return 0; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return 0; + + /* make sure we have some buffer space */ + if (state->size == 0 && gz_init(state) == -1) + return 0; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return 0; + } + + /* consume whatever's left in the input buffer */ + if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) + return 0; + + /* do the printf() into the input buffer, put length in len */ + size = (int)(state->size); + state->in[size - 1] = 0; +#ifdef NO_snprintf +# ifdef HAS_sprintf_void + sprintf((char *)(state->in), format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); + for (len = 0; len < size; len++) + if (state->in[len] == 0) break; +# else + len = sprintf((char *)(state->in), format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); +# endif +#else +# ifdef HAS_snprintf_void + snprintf((char *)(state->in), size, format, a1, a2, a3, a4, a5, a6, a7, a8, + a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); + len = strlen((char *)(state->in)); +# else + len = snprintf((char *)(state->in), size, format, a1, a2, a3, a4, a5, a6, + a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, + a19, a20); +# endif +#endif + + /* check that printf() results fit in buffer */ + if (len <= 0 || len >= (int)size || state->in[size - 1] != 0) + return 0; + + /* update buffer and position, defer compression until needed */ + strm->avail_in = (unsigned)len; + strm->next_in = state->in; + state->x.pos += len; + return len; +} + +#endif + +/* -- see zlib.h -- */ +int ZEXPORT gzflush(file, flush) + gzFile file; + int flush; +{ + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return -1; + state = (gz_statep)file; + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* check flush parameter */ + if (flush < 0 || flush > Z_FINISH) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* compress remaining data with requested flush */ + gz_comp(state, flush); + return state->err; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzsetparams(file, level, strategy) + gzFile file; + int level; + int strategy; +{ + gz_statep state; + z_streamp strm; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + strm = &(state->strm); + + /* check that we're writing and that there's no error */ + if (state->mode != GZ_WRITE || state->err != Z_OK) + return Z_STREAM_ERROR; + + /* if no change is requested, then do nothing */ + if (level == state->level && strategy == state->strategy) + return Z_OK; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + return -1; + } + + /* change compression parameters for subsequent input */ + if (state->size) { + /* flush previous input with previous parameters before changing */ + if (strm->avail_in && gz_comp(state, Z_PARTIAL_FLUSH) == -1) + return state->err; + deflateParams(strm, level, strategy); + } + state->level = level; + state->strategy = strategy; + return Z_OK; +} + +/* -- see zlib.h -- */ +int ZEXPORT gzclose_w(file) + gzFile file; +{ + int ret = Z_OK; + gz_statep state; + + /* get internal structure */ + if (file == NULL) + return Z_STREAM_ERROR; + state = (gz_statep)file; + + /* check that we're writing */ + if (state->mode != GZ_WRITE) + return Z_STREAM_ERROR; + + /* check for seek request */ + if (state->seek) { + state->seek = 0; + if (gz_zero(state, state->skip) == -1) + ret = state->err; + } + + /* flush, free memory, and close file */ + if (gz_comp(state, Z_FINISH) == -1) + ret = state->err; + if (state->size) { + if (!state->direct) { + (void)deflateEnd(&(state->strm)); + free(state->out); + } + free(state->in); + } + gz_error(state, Z_OK, NULL); + free(state->path); + if (close(state->fd) == -1) + ret = Z_ERRNO; + free(state); + return ret; +} diff --git a/Modules/zlib/infback.c b/Modules/zlib/infback.c --- a/Modules/zlib/infback.c +++ b/Modules/zlib/infback.c @@ -1,5 +1,5 @@ /* infback.c -- inflate using a call-back interface - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2011 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -42,10 +42,19 @@ return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; +#endif } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; @@ -55,7 +64,7 @@ state->wbits = windowBits; state->wsize = 1U << windowBits; state->window = window; - state->write = 0; + state->wnext = 0; state->whave = 0; return Z_OK; } @@ -246,14 +255,14 @@ void FAR *out_desc; { struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ + z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ - code this; /* current decoding table entry */ + code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ @@ -389,19 +398,18 @@ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.val < 16) { - NEEDBITS(this.bits); - DROPBITS(this.bits); - state->lens[state->have++] = this.val; + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; } else { - if (this.val == 16) { - NEEDBITS(this.bits + 2); - DROPBITS(this.bits); + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; @@ -411,16 +419,16 @@ copy = 3 + BITS(2); DROPBITS(2); } - else if (this.val == 17) { - NEEDBITS(this.bits + 3); - DROPBITS(this.bits); + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { - NEEDBITS(this.bits + 7); - DROPBITS(this.bits); + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); @@ -438,7 +446,16 @@ /* handle error breaks in while */ if (state->mode == BAD) break; - /* build code tables */ + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; @@ -474,28 +491,28 @@ /* get a literal, length, or end-of-block code */ for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.op && (this.op & 0xf0) == 0) { - last = this; + if (here.op && (here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->lencode[last.val + + here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } - DROPBITS(this.bits); - state->length = (unsigned)this.val; + DROPBITS(here.bits); + state->length = (unsigned)here.val; /* process literal */ - if (this.op == 0) { - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + if (here.op == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); + "inflate: literal 0x%02x\n", here.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; @@ -504,21 +521,21 @@ } /* process end of block */ - if (this.op & 32) { + if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ - if (this.op & 64) { + if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); @@ -528,30 +545,30 @@ /* get distance code */ for (;;) { - this = state->distcode[BITS(state->distbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if ((this.op & 0xf0) == 0) { - last = this; + if ((here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->distcode[last.val + + here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } - DROPBITS(this.bits); - if (this.op & 64) { + DROPBITS(here.bits); + if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } - state->offset = (unsigned)this.val; + state->offset = (unsigned)here.val; /* get distance extra bits, if any */ - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); diff --git a/Modules/zlib/inffast.c b/Modules/zlib/inffast.c --- a/Modules/zlib/inffast.c +++ b/Modules/zlib/inffast.c @@ -1,5 +1,5 @@ /* inffast.c -- fast decoding - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2008, 2010, 2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -64,13 +64,13 @@ requires strm->avail_out >= 258 for each loop to avoid checking for output space. */ -void inflate_fast(strm, start) +void ZLIB_INTERNAL inflate_fast(strm, start) z_streamp strm; unsigned start; /* inflate()'s starting value for strm->avail_out */ { struct inflate_state FAR *state; - unsigned char FAR *in; /* local strm->next_in */ - unsigned char FAR *last; /* while in < last, enough input available */ + z_const unsigned char FAR *in; /* local strm->next_in */ + z_const unsigned char FAR *last; /* have enough input while in < last */ unsigned char FAR *out; /* local strm->next_out */ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ unsigned char FAR *end; /* while out < end, enough space available */ @@ -79,7 +79,7 @@ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ - unsigned write; /* window write index */ + unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ @@ -87,7 +87,7 @@ code const FAR *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ - code this; /* retrieved table entry */ + code here; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ @@ -106,7 +106,7 @@ #endif wsize = state->wsize; whave = state->whave; - write = state->write; + wnext = state->wnext; window = state->window; hold = state->hold; bits = state->bits; @@ -124,20 +124,20 @@ hold += (unsigned long)(PUP(in)) << bits; bits += 8; } - this = lcode[hold & lmask]; + here = lcode[hold & lmask]; dolen: - op = (unsigned)(this.bits); + op = (unsigned)(here.bits); hold >>= op; bits -= op; - op = (unsigned)(this.op); + op = (unsigned)(here.op); if (op == 0) { /* literal */ - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); - PUP(out) = (unsigned char)(this.val); + "inflate: literal 0x%02x\n", here.val)); + PUP(out) = (unsigned char)(here.val); } else if (op & 16) { /* length base */ - len = (unsigned)(this.val); + len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { @@ -155,14 +155,14 @@ hold += (unsigned long)(PUP(in)) << bits; bits += 8; } - this = dcode[hold & dmask]; + here = dcode[hold & dmask]; dodist: - op = (unsigned)(this.bits); + op = (unsigned)(here.bits); hold >>= op; bits -= op; - op = (unsigned)(this.op); + op = (unsigned)(here.op); if (op & 16) { /* distance base */ - dist = (unsigned)(this.val); + dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(PUP(in)) << bits; @@ -187,12 +187,34 @@ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; + if (state->sane) { + strm->msg = + (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + if (len <= op - whave) { + do { + PUP(out) = 0; + } while (--len); + continue; + } + len -= op - whave; + do { + PUP(out) = 0; + } while (--op > whave); + if (op == 0) { + from = out - dist; + do { + PUP(out) = PUP(from); + } while (--len); + continue; + } +#endif } from = window - OFF; - if (write == 0) { /* very common case */ + if (wnext == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; @@ -202,17 +224,17 @@ from = out - dist; /* rest from output */ } } - else if (write < op) { /* wrap around window */ - from += wsize + write - op; - op -= write; + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; if (op < len) { /* some from end of window */ len -= op; do { PUP(out) = PUP(from); } while (--op); from = window - OFF; - if (write < len) { /* some from start of window */ - op = write; + if (wnext < len) { /* some from start of window */ + op = wnext; len -= op; do { PUP(out) = PUP(from); @@ -222,7 +244,7 @@ } } else { /* contiguous in window */ - from += write - op; + from += wnext - op; if (op < len) { /* some from window */ len -= op; do { @@ -259,7 +281,7 @@ } } else if ((op & 64) == 0) { /* 2nd level distance code */ - this = dcode[this.val + (hold & ((1U << op) - 1))]; + here = dcode[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else { @@ -269,7 +291,7 @@ } } else if ((op & 64) == 0) { /* 2nd level length code */ - this = lcode[this.val + (hold & ((1U << op) - 1))]; + here = lcode[here.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ @@ -305,7 +327,7 @@ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - - Three separate decoding do-loops for direct, window, and write == 0 + - Three separate decoding do-loops for direct, window, and wnext == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes diff --git a/Modules/zlib/inffast.h b/Modules/zlib/inffast.h --- a/Modules/zlib/inffast.h +++ b/Modules/zlib/inffast.h @@ -1,5 +1,5 @@ /* inffast.h -- header to use inffast.c - * Copyright (C) 1995-2003 Mark Adler + * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -8,4 +8,4 @@ subject to change. Applications should only use zlib.h. */ -void inflate_fast OF((z_streamp strm, unsigned start)); +void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); diff --git a/Modules/zlib/inffixed.h b/Modules/zlib/inffixed.h --- a/Modules/zlib/inffixed.h +++ b/Modules/zlib/inffixed.h @@ -2,9 +2,9 @@ * Generated automatically by makefixed(). */ - /* WARNING: this file should *not* be used by applications. It - is part of the implementation of the compression library and - is subject to change. Applications should only use zlib.h. + /* WARNING: this file should *not* be used by applications. + It is part of the implementation of this library and is + subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { diff --git a/Modules/zlib/inflate.c b/Modules/zlib/inflate.c --- a/Modules/zlib/inflate.c +++ b/Modules/zlib/inflate.c @@ -1,5 +1,5 @@ /* inflate.c -- zlib decompression - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2012 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -45,7 +45,7 @@ * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed - * - Pull out common write == 0 case for speed in inflate_fast() + * - Pull out common wnext == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() @@ -93,13 +93,39 @@ /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); -local int updatewindow OF((z_streamp strm, unsigned out)); +local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, + unsigned copy)); #ifdef BUILDFIXED void makefixed OF((void)); #endif -local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf, +local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, unsigned len)); +int ZEXPORT inflateResetKeep(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + strm->total_in = strm->total_out = state->total = 0; + strm->msg = Z_NULL; + if (state->wrap) /* to support ill-conceived Java test suite */ + strm->adler = state->wrap & 1; + state->mode = HEAD; + state->last = 0; + state->havedict = 0; + state->dmax = 32768U; + state->head = Z_NULL; + state->hold = 0; + state->bits = 0; + state->lencode = state->distcode = state->next = state->codes; + state->sane = 1; + state->back = -1; + Tracev((stderr, "inflate: reset\n")); + return Z_OK; +} + int ZEXPORT inflateReset(strm) z_streamp strm; { @@ -107,22 +133,98 @@ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; - strm->total_in = strm->total_out = state->total = 0; - strm->msg = Z_NULL; - strm->adler = 1; /* to support ill-conceived Java test suite */ - state->mode = HEAD; - state->last = 0; - state->havedict = 0; - state->dmax = 32768U; - state->head = Z_NULL; state->wsize = 0; state->whave = 0; - state->write = 0; - state->hold = 0; - state->bits = 0; - state->lencode = state->distcode = state->next = state->codes; - Tracev((stderr, "inflate: reset\n")); - return Z_OK; + state->wnext = 0; + return inflateResetKeep(strm); +} + +int ZEXPORT inflateReset2(strm, windowBits) +z_streamp strm; +int windowBits; +{ + int wrap; + struct inflate_state FAR *state; + + /* get the state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 1; +#ifdef GUNZIP + if (windowBits < 48) + windowBits &= 15; +#endif + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) + return Z_STREAM_ERROR; + if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { + ZFREE(strm, state->window); + state->window = Z_NULL; + } + + /* update state and reset the rest of it */ + state->wrap = wrap; + state->wbits = (unsigned)windowBits; + return inflateReset(strm); +} + +int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) +z_streamp strm; +int windowBits; +const char *version; +int stream_size; +{ + int ret; + struct inflate_state FAR *state; + + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != (int)(sizeof(z_stream))) + return Z_VERSION_ERROR; + if (strm == Z_NULL) return Z_STREAM_ERROR; + strm->msg = Z_NULL; /* in case we return an error */ + if (strm->zalloc == (alloc_func)0) { +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; +#endif + } + if (strm->zfree == (free_func)0) +#ifdef Z_SOLO + return Z_STREAM_ERROR; +#else + strm->zfree = zcfree; +#endif + state = (struct inflate_state FAR *) + ZALLOC(strm, 1, sizeof(struct inflate_state)); + if (state == Z_NULL) return Z_MEM_ERROR; + Tracev((stderr, "inflate: allocated\n")); + strm->state = (struct internal_state FAR *)state; + state->window = Z_NULL; + ret = inflateReset2(strm, windowBits); + if (ret != Z_OK) { + ZFREE(strm, state); + strm->state = Z_NULL; + } + return ret; +} + +int ZEXPORT inflateInit_(strm, version, stream_size) +z_streamp strm; +const char *version; +int stream_size; +{ + return inflateInit2_(strm, DEF_WBITS, version, stream_size); } int ZEXPORT inflatePrime(strm, bits, value) @@ -134,6 +236,11 @@ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; + if (bits < 0) { + state->hold = 0; + state->bits = 0; + return Z_OK; + } if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += value << state->bits; @@ -141,57 +248,6 @@ return Z_OK; } -int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) -z_streamp strm; -int windowBits; -const char *version; -int stream_size; -{ - struct inflate_state FAR *state; - - if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || - stream_size != (int)(sizeof(z_stream))) - return Z_VERSION_ERROR; - if (strm == Z_NULL) return Z_STREAM_ERROR; - strm->msg = Z_NULL; /* in case we return an error */ - if (strm->zalloc == (alloc_func)0) { - strm->zalloc = zcalloc; - strm->opaque = (voidpf)0; - } - if (strm->zfree == (free_func)0) strm->zfree = zcfree; - state = (struct inflate_state FAR *) - ZALLOC(strm, 1, sizeof(struct inflate_state)); - if (state == Z_NULL) return Z_MEM_ERROR; - Tracev((stderr, "inflate: allocated\n")); - strm->state = (struct internal_state FAR *)state; - if (windowBits < 0) { - state->wrap = 0; - windowBits = -windowBits; - } - else { - state->wrap = (windowBits >> 4) + 1; -#ifdef GUNZIP - if (windowBits < 48) windowBits &= 15; -#endif - } - if (windowBits < 8 || windowBits > 15) { - ZFREE(strm, state); - strm->state = Z_NULL; - return Z_STREAM_ERROR; - } - state->wbits = (unsigned)windowBits; - state->window = Z_NULL; - return inflateReset(strm); -} - -int ZEXPORT inflateInit_(strm, version, stream_size) -z_streamp strm; -const char *version; -int stream_size; -{ - return inflateInit2_(strm, DEF_WBITS, version, stream_size); -} - /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. @@ -286,8 +342,8 @@ low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); - printf("{%u,%u,%d}", state.lencode[low].op, state.lencode[low].bits, - state.lencode[low].val); + printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, + state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } @@ -320,12 +376,13 @@ output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ -local int updatewindow(strm, out) +local int updatewindow(strm, end, copy) z_streamp strm; -unsigned out; +const Bytef *end; +unsigned copy; { struct inflate_state FAR *state; - unsigned copy, dist; + unsigned dist; state = (struct inflate_state FAR *)strm->state; @@ -340,30 +397,29 @@ /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; - state->write = 0; + state->wnext = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ - copy = out - strm->avail_out; if (copy >= state->wsize) { - zmemcpy(state->window, strm->next_out - state->wsize, state->wsize); - state->write = 0; + zmemcpy(state->window, end - state->wsize, state->wsize); + state->wnext = 0; state->whave = state->wsize; } else { - dist = state->wsize - state->write; + dist = state->wsize - state->wnext; if (dist > copy) dist = copy; - zmemcpy(state->window + state->write, strm->next_out - copy, dist); + zmemcpy(state->window + state->wnext, end - copy, dist); copy -= dist; if (copy) { - zmemcpy(state->window, strm->next_out - copy, copy); - state->write = copy; + zmemcpy(state->window, end - copy, copy); + state->wnext = copy; state->whave = state->wsize; } else { - state->write += dist; - if (state->write == state->wsize) state->write = 0; + state->wnext += dist; + if (state->wnext == state->wsize) state->wnext = 0; if (state->whave < state->wsize) state->whave += dist; } } @@ -464,11 +520,6 @@ bits -= bits & 7; \ } while (0) -/* Reverse the bytes in a 32-bit value */ -#define REVERSE(q) \ - ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ - (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) - /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is @@ -556,7 +607,7 @@ int flush; { struct inflate_state FAR *state; - unsigned char FAR *next; /* next input */ + z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ @@ -564,7 +615,7 @@ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ - code this; /* current decoding table entry */ + code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ @@ -619,7 +670,9 @@ } DROPBITS(4); len = BITS(4) + 8; - if (len > state->wbits) { + if (state->wbits == 0) + state->wbits = len; + else if (len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; @@ -760,7 +813,7 @@ #endif case DICTID: NEEDBITS(32); - strm->adler = state->check = REVERSE(hold); + strm->adler = state->check = ZSWAP32(hold); INITBITS(); state->mode = DICT; case DICT: @@ -771,7 +824,7 @@ strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; case TYPE: - if (flush == Z_BLOCK) goto inf_leave; + if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); @@ -791,7 +844,11 @@ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); - state->mode = LEN; /* decode codes */ + state->mode = LEN_; /* decode codes */ + if (flush == Z_TREES) { + DROPBITS(2); + goto inf_leave; + } break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", @@ -816,6 +873,9 @@ Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); + state->mode = COPY_; + if (flush == Z_TREES) goto inf_leave; + case COPY_: state->mode = COPY; case COPY: copy = state->length; @@ -861,7 +921,7 @@ while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; - state->lencode = (code const FAR *)(state->next); + state->lencode = (const code FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); @@ -876,19 +936,18 @@ case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.val < 16) { - NEEDBITS(this.bits); - DROPBITS(this.bits); - state->lens[state->have++] = this.val; + if (here.val < 16) { + DROPBITS(here.bits); + state->lens[state->have++] = here.val; } else { - if (this.val == 16) { - NEEDBITS(this.bits + 2); - DROPBITS(this.bits); + if (here.val == 16) { + NEEDBITS(here.bits + 2); + DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; @@ -898,16 +957,16 @@ copy = 3 + BITS(2); DROPBITS(2); } - else if (this.val == 17) { - NEEDBITS(this.bits + 3); - DROPBITS(this.bits); + else if (here.val == 17) { + NEEDBITS(here.bits + 3); + DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { - NEEDBITS(this.bits + 7); - DROPBITS(this.bits); + NEEDBITS(here.bits + 7); + DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); @@ -925,9 +984,18 @@ /* handle error breaks in while */ if (state->mode == BAD) break; - /* build code tables */ + /* check for end-of-block code (better have one) */ + if (state->lens[256] == 0) { + strm->msg = (char *)"invalid code -- missing end-of-block"; + state->mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; - state->lencode = (code const FAR *)(state->next); + state->lencode = (const code FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); @@ -936,7 +1004,7 @@ state->mode = BAD; break; } - state->distcode = (code const FAR *)(state->next); + state->distcode = (const code FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); @@ -946,88 +1014,102 @@ break; } Tracev((stderr, "inflate: codes ok\n")); + state->mode = LEN_; + if (flush == Z_TREES) goto inf_leave; + case LEN_: state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); + if (state->mode == TYPE) + state->back = -1; break; } + state->back = 0; for (;;) { - this = state->lencode[BITS(state->lenbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->lencode[BITS(state->lenbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if (this.op && (this.op & 0xf0) == 0) { - last = this; + if (here.op && (here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->lencode[last.val + + here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); + state->back += last.bits; } - DROPBITS(this.bits); - state->length = (unsigned)this.val; - if ((int)(this.op) == 0) { - Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? + DROPBITS(here.bits); + state->back += here.bits; + state->length = (unsigned)here.val; + if ((int)(here.op) == 0) { + Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", this.val)); + "inflate: literal 0x%02x\n", here.val)); state->mode = LIT; break; } - if (this.op & 32) { + if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); + state->back = -1; state->mode = TYPE; break; } - if (this.op & 64) { + if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } - state->extra = (unsigned)(this.op) & 15; + state->extra = (unsigned)(here.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); + state->back += state->extra; } Tracevv((stderr, "inflate: length %u\n", state->length)); + state->was = state->length; state->mode = DIST; case DIST: for (;;) { - this = state->distcode[BITS(state->distbits)]; - if ((unsigned)(this.bits) <= bits) break; + here = state->distcode[BITS(state->distbits)]; + if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } - if ((this.op & 0xf0) == 0) { - last = this; + if ((here.op & 0xf0) == 0) { + last = here; for (;;) { - this = state->distcode[last.val + + here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; - if ((unsigned)(last.bits + this.bits) <= bits) break; + if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); + state->back += last.bits; } - DROPBITS(this.bits); - if (this.op & 64) { + DROPBITS(here.bits); + state->back += here.bits; + if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } - state->offset = (unsigned)this.val; - state->extra = (unsigned)(this.op) & 15; + state->offset = (unsigned)here.val; + state->extra = (unsigned)(here.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); + state->back += state->extra; } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { @@ -1036,11 +1118,6 @@ break; } #endif - if (state->offset > state->whave + out - left) { - strm->msg = (char *)"invalid distance too far back"; - state->mode = BAD; - break; - } Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; case MATCH: @@ -1048,12 +1125,32 @@ copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; - if (copy > state->write) { - copy -= state->write; + if (copy > state->whave) { + if (state->sane) { + strm->msg = (char *)"invalid distance too far back"; + state->mode = BAD; + break; + } +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + Trace((stderr, "inflate.c too far\n")); + copy -= state->whave; + if (copy > state->length) copy = state->length; + if (copy > left) copy = left; + left -= copy; + state->length -= copy; + do { + *put++ = 0; + } while (--copy); + if (state->length == 0) state->mode = LEN; + break; +#endif + } + if (copy > state->wnext) { + copy -= state->wnext; from = state->window + (state->wsize - copy); } else - from = state->window + (state->write - copy); + from = state->window + (state->wnext - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ @@ -1088,7 +1185,7 @@ #ifdef GUNZIP state->flags ? hold : #endif - REVERSE(hold)) != state->check) { + ZSWAP32(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; @@ -1132,8 +1229,9 @@ */ inf_leave: RESTORE(); - if (state->wsize || (state->mode < CHECK && out != strm->avail_out)) - if (updatewindow(strm, out)) { + if (state->wsize || (out != strm->avail_out && state->mode < BAD && + (state->mode < CHECK || flush != Z_FINISH))) + if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } @@ -1146,7 +1244,8 @@ strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = state->bits + (state->last ? 64 : 0) + - (state->mode == TYPE ? 128 : 0); + (state->mode == TYPE ? 128 : 0) + + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; @@ -1166,13 +1265,37 @@ return Z_OK; } +int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) +z_streamp strm; +Bytef *dictionary; +uInt *dictLength; +{ + struct inflate_state FAR *state; + + /* check state */ + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + + /* copy dictionary */ + if (state->whave && dictionary != Z_NULL) { + zmemcpy(dictionary, state->window + state->wnext, + state->whave - state->wnext); + zmemcpy(dictionary + state->whave - state->wnext, + state->window, state->wnext); + } + if (dictLength != Z_NULL) + *dictLength = state->whave; + return Z_OK; +} + int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { struct inflate_state FAR *state; - unsigned long id; + unsigned long dictid; + int ret; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; @@ -1180,29 +1303,21 @@ if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; - /* check for correct dictionary id */ + /* check for correct dictionary identifier */ if (state->mode == DICT) { - id = adler32(0L, Z_NULL, 0); - id = adler32(id, dictionary, dictLength); - if (id != state->check) + dictid = adler32(0L, Z_NULL, 0); + dictid = adler32(dictid, dictionary, dictLength); + if (dictid != state->check) return Z_DATA_ERROR; } - /* copy dictionary to window */ - if (updatewindow(strm, strm->avail_out)) { + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary + dictLength, dictLength); + if (ret) { state->mode = MEM; return Z_MEM_ERROR; } - if (dictLength > state->wsize) { - zmemcpy(state->window, dictionary + dictLength - state->wsize, - state->wsize); - state->whave = state->wsize; - } - else { - zmemcpy(state->window + state->wsize - dictLength, dictionary, - dictLength); - state->whave = dictLength; - } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; @@ -1238,7 +1353,7 @@ */ local unsigned syncsearch(have, buf, len) unsigned FAR *have; -unsigned char FAR *buf; +const unsigned char FAR *buf; unsigned len; { unsigned got; @@ -1350,8 +1465,8 @@ } /* copy state */ - zmemcpy(dest, source, sizeof(z_stream)); - zmemcpy(copy, state, sizeof(struct inflate_state)); + zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); + zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); @@ -1366,3 +1481,32 @@ dest->state = (struct internal_state FAR *)copy; return Z_OK; } + +int ZEXPORT inflateUndermine(strm, subvert) +z_streamp strm; +int subvert; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + state = (struct inflate_state FAR *)strm->state; + state->sane = !subvert; +#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR + return Z_OK; +#else + state->sane = 1; + return Z_DATA_ERROR; +#endif +} + +long ZEXPORT inflateMark(strm) +z_streamp strm; +{ + struct inflate_state FAR *state; + + if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; + state = (struct inflate_state FAR *)strm->state; + return ((long)(state->back) << 16) + + (state->mode == COPY ? state->length : + (state->mode == MATCH ? state->was - state->length : 0)); +} diff --git a/Modules/zlib/inflate.h b/Modules/zlib/inflate.h --- a/Modules/zlib/inflate.h +++ b/Modules/zlib/inflate.h @@ -1,5 +1,5 @@ /* inflate.h -- internal inflate state definition - * Copyright (C) 1995-2004 Mark Adler + * Copyright (C) 1995-2009 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -32,11 +32,13 @@ TYPE, /* i: waiting for type bits, including last-flag bit */ TYPEDO, /* i: same, but skip check to exit inflate on new block */ STORED, /* i: waiting for stored size (length and complement) */ + COPY_, /* i/o: same as COPY below, but only first time in */ COPY, /* i/o: waiting for input or output to copy stored block */ TABLE, /* i: waiting for dynamic block table lengths */ LENLENS, /* i: waiting for code length code lengths */ CODELENS, /* i: waiting for length/lit and distance code lengths */ - LEN, /* i: waiting for length/lit code */ + LEN_, /* i: same as LEN below, but only first time in */ + LEN, /* i: waiting for length/lit/eob code */ LENEXT, /* i: waiting for length extra bits */ DIST, /* i: waiting for distance code */ DISTEXT, /* i: waiting for distance extra bits */ @@ -53,19 +55,21 @@ /* State transitions between above modes - - (most modes can go to the BAD or MEM mode -- not shown for clarity) + (most modes can go to BAD or MEM on error -- not shown for clarity) Process header: - HEAD -> (gzip) or (zlib) - (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME - NAME -> COMMENT -> HCRC -> TYPE + HEAD -> (gzip) or (zlib) or (raw) + (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> + HCRC -> TYPE (zlib) -> DICTID or TYPE DICTID -> DICT -> TYPE + (raw) -> TYPEDO Read deflate blocks: - TYPE -> STORED or TABLE or LEN or CHECK - STORED -> COPY -> TYPE - TABLE -> LENLENS -> CODELENS -> LEN - Read deflate codes: + TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK + STORED -> COPY_ -> COPY -> TYPE + TABLE -> LENLENS -> CODELENS -> LEN_ + LEN_ -> LEN + Read deflate codes in fixed or dynamic block: LEN -> LENEXT or LIT or TYPE LENEXT -> DIST -> DISTEXT -> MATCH -> LEN LIT -> LEN @@ -73,7 +77,7 @@ CHECK -> LENGTH -> DONE */ -/* state maintained between inflate() calls. Approximately 7K bytes. */ +/* state maintained between inflate() calls. Approximately 10K bytes. */ struct inflate_state { inflate_mode mode; /* current inflate mode */ int last; /* true if processing last block */ @@ -88,7 +92,7 @@ unsigned wbits; /* log base 2 of requested window size */ unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ - unsigned write; /* window write index */ + unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if needed */ /* bit accumulator */ unsigned long hold; /* input bit accumulator */ @@ -112,4 +116,7 @@ unsigned short lens[320]; /* temporary storage for code lengths */ unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ + int sane; /* if false, allow invalid distance too far */ + int back; /* bits back of last unprocessed length/lit */ + unsigned was; /* initial length of match */ }; diff --git a/Modules/zlib/inftrees.c b/Modules/zlib/inftrees.c --- a/Modules/zlib/inftrees.c +++ b/Modules/zlib/inftrees.c @@ -1,5 +1,5 @@ /* inftrees.c -- generate Huffman trees for efficient decoding - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2013 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -9,7 +9,7 @@ #define MAXBITS 15 const char inflate_copyright[] = - " inflate 1.2.3 Copyright 1995-2005 Mark Adler "; + " inflate 1.2.8 Copyright 1995-2013 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot @@ -29,7 +29,7 @@ table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ -int inflate_table(type, lens, codes, table, bits, work) +int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; @@ -50,7 +50,7 @@ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ - code this; /* table entry for duplication */ + code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ @@ -62,7 +62,7 @@ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, - 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196}; + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, @@ -115,15 +115,15 @@ if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ - this.op = (unsigned char)64; /* invalid code marker */ - this.bits = (unsigned char)1; - this.val = (unsigned short)0; - *(*table)++ = this; /* make a table to force an error */ - *(*table)++ = this; + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)1; + here.val = (unsigned short)0; + *(*table)++ = here; /* make a table to force an error */ + *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } - for (min = 1; min <= MAXBITS; min++) + for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; @@ -166,11 +166,10 @@ entered in the tables. used keeps track of how many table entries have been allocated from the - provided *table space. It is checked when a LENS table is being made - against the space in *table, ENOUGH, minus the maximum space needed by - the worst case distance code, MAXD. This should never happen, but the - sufficiency of ENOUGH has not been proven exhaustively, hence the check. - This assumes that when type == LENS, bits == 9. + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This @@ -209,24 +208,25 @@ mask = used - 1; /* mask for comparing low */ /* check available table space */ - if (type == LENS && used >= ENOUGH - MAXD) + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ - this.bits = (unsigned char)(len - drop); + here.bits = (unsigned char)(len - drop); if ((int)(work[sym]) < end) { - this.op = (unsigned char)0; - this.val = work[sym]; + here.op = (unsigned char)0; + here.val = work[sym]; } else if ((int)(work[sym]) > end) { - this.op = (unsigned char)(extra[work[sym]]); - this.val = base[work[sym]]; + here.op = (unsigned char)(extra[work[sym]]); + here.val = base[work[sym]]; } else { - this.op = (unsigned char)(32 + 64); /* end of block */ - this.val = 0; + here.op = (unsigned char)(32 + 64); /* end of block */ + here.val = 0; } /* replicate for those indices with low len bits equal to huff */ @@ -235,7 +235,7 @@ min = fill; /* save offset to next table */ do { fill -= incr; - next[(huff >> drop) + fill] = this; + next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ @@ -277,7 +277,8 @@ /* check for enough space */ used += 1U << curr; - if (type == LENS && used >= ENOUGH - MAXD) + if ((type == LENS && used > ENOUGH_LENS) || + (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ @@ -288,38 +289,14 @@ } } - /* - Fill in rest of table for incomplete codes. This loop is similar to the - loop above in incrementing huff for table indices. It is assumed that - len is equal to curr + drop, so there is no loop needed to increment - through high index bits. When the current sub-table is filled, the loop - drops back to the root table to fill in any remaining entries there. - */ - this.op = (unsigned char)64; /* invalid code marker */ - this.bits = (unsigned char)(len - drop); - this.val = (unsigned short)0; - while (huff != 0) { - /* when done with sub-table, drop back to root table */ - if (drop != 0 && (huff & mask) != low) { - drop = 0; - len = root; - next = *table; - this.bits = (unsigned char)len; - } - - /* put invalid code marker in table */ - next[huff >> drop] = this; - - /* backwards increment the len-bit code huff */ - incr = 1U << (len - 1); - while (huff & incr) - incr >>= 1; - if (incr != 0) { - huff &= incr - 1; - huff += incr; - } - else - huff = 0; + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff != 0) { + here.op = (unsigned char)64; /* invalid code marker */ + here.bits = (unsigned char)(len - drop); + here.val = (unsigned short)0; + next[huff] = here; } /* set return parameters */ diff --git a/Modules/zlib/inftrees.h b/Modules/zlib/inftrees.h --- a/Modules/zlib/inftrees.h +++ b/Modules/zlib/inftrees.h @@ -1,5 +1,5 @@ /* inftrees.h -- header to use inftrees.c - * Copyright (C) 1995-2005 Mark Adler + * Copyright (C) 1995-2005, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -35,21 +35,28 @@ 01000000 - invalid code */ -/* Maximum size of dynamic tree. The maximum found in a long but non- - exhaustive search was 1444 code structures (852 for length/literals - and 592 for distances, the latter actually the result of an - exhaustive search). The true maximum is not known, but the value - below is more than safe. */ -#define ENOUGH 2048 -#define MAXD 592 +/* Maximum size of the dynamic table. The maximum number of code structures is + 1444, which is the sum of 852 for literal/length codes and 592 for distance + codes. These values were found by exhaustive searches using the program + examples/enough.c found in the zlib distribtution. The arguments to that + program are the number of symbols, the initial root table size, and the + maximum bit length of a code. "enough 286 9 15" for literal/length codes + returns returns 852, and "enough 30 6 15" for distance codes returns 592. + The initial root table size (9 or 6) is found in the fifth argument of the + inflate_table() calls in inflate.c and infback.c. If the root table size is + changed, then these maximum sizes would be need to be recalculated and + updated. */ +#define ENOUGH_LENS 852 +#define ENOUGH_DISTS 592 +#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) -/* Type of code to build for inftable() */ +/* Type of code to build for inflate_table() */ typedef enum { CODES, LENS, DISTS } codetype; -extern int inflate_table OF((codetype type, unsigned short FAR *lens, +int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work)); diff --git a/Modules/zlib/make_vms.com b/Modules/zlib/make_vms.com --- a/Modules/zlib/make_vms.com +++ b/Modules/zlib/make_vms.com @@ -1,46 +1,95 @@ $! make libz under VMS written by $! Martin P.J. Zinser -$! +$! +$! In case of problems with the install you might contact me at +$! zinser at zinser.no-ip.info(preferred) or +$! martin.zinser at eurexchange.com (work) +$! +$! Make procedure history for Zlib +$! +$!------------------------------------------------------------------------------ +$! Version history +$! 0.01 20060120 First version to receive a number +$! 0.02 20061008 Adapt to new Makefile.in +$! 0.03 20091224 Add support for large file check +$! 0.04 20100110 Add new gzclose, gzlib, gzread, gzwrite +$! 0.05 20100221 Exchange zlibdefs.h by zconf.h.in +$! 0.06 20120111 Fix missing amiss_err, update zconf_h.in, fix new exmples +$! subdir path, update module search in makefile.in +$! 0.07 20120115 Triggered by work done by Alexey Chupahin completly redesigned +$! shared image creation +$! 0.08 20120219 Make it work on VAX again, pre-load missing symbols to shared +$! image +$! 0.09 20120305 SMS. P1 sets builder ("MMK", "MMS", " " (built-in)). +$! "" -> automatic, preference: MMK, MMS, built-in. $! $ on error then goto err_exit $! -$! -$! Just some general constants... -$! $ true = 1 $ false = 0 $ tmpnam = "temp_" + f$getjpi("","pid") -$ SAY = "WRITE SYS$OUTPUT" +$ tt = tmpnam + ".txt" +$ tc = tmpnam + ".c" +$ th = tmpnam + ".h" +$ define/nolog tconfig 'th' +$ its_decc = false +$ its_vaxc = false +$ its_gnuc = false +$ s_case = False $! $! Setup variables holding "config" information $! -$ Make = "" +$ Make = "''p1'" $ name = "Zlib" $ version = "?.?.?" $ v_string = "ZLIB_VERSION" $ v_file = "zlib.h" -$ ccopt = "" -$ lopts = "" +$ ccopt = "/include = []" +$ lopts = "" +$ dnsrl = "" +$ aconf_in_file = "zconf.h.in#zconf.h_in#zconf_h.in" +$ conf_check_string = "" $ linkonly = false $ optfile = name + ".opt" -$ its_decc = false -$ its_vaxc = false -$ its_gnuc = false -$ axp = f$getsyi("HW_MODEL").ge.1024 -$ s_case = false +$ mapfile = name + ".map" +$ libdefs = "" +$ vax = f$getsyi("HW_MODEL").lt.1024 +$ axp = f$getsyi("HW_MODEL").ge.1024 .and. f$getsyi("HW_MODEL").lt.4096 +$ ia64 = f$getsyi("HW_MODEL").ge.4096 +$! +$! 2012-03-05 SMS. +$! Why is this needed? And if it is needed, why not simply ".not. vax"? +$! +$!!! if axp .or. ia64 then set proc/parse=extended +$! +$ whoami = f$parse(f$environment("Procedure"),,,,"NO_CONCEAL") +$ mydef = F$parse(whoami,,,"DEVICE") +$ mydir = f$parse(whoami,,,"DIRECTORY") - "][" +$ myproc = f$parse(whoami,,,"Name") + f$parse(whoami,,,"type") +$! $! Check for MMK/MMS $! -$ If F$Search ("Sys$System:MMS.EXE") .nes. "" Then Make = "MMS" -$ If F$Type (MMK) .eqs. "STRING" Then Make = "MMK" -$! +$ if (Make .eqs. "") +$ then +$ If F$Search ("Sys$System:MMS.EXE") .nes. "" Then Make = "MMS" +$ If F$Type (MMK) .eqs. "STRING" Then Make = "MMK" +$ else +$ Make = f$edit( Make, "trim") +$ endif $! $ gosub find_version $! +$ open/write topt tmp.opt +$ open/write optf 'optfile' +$! $ gosub check_opts $! $! Look for the compiler used $! $ gosub check_compiler +$ close topt +$ close optf +$! $ if its_decc $ then $ ccopt = "/prefix=all" + ccopt @@ -54,18 +103,79 @@ $ define sys decc$library_include: $ endif $ endif +$! +$! 2012-03-05 SMS. +$! Why /NAMES = AS_IS? Why not simply ".not. vax"? And why not on VAX? +$! +$ if axp .or. ia64 +$ then +$ ccopt = ccopt + "/name=as_is/opt=(inline=speed)" +$ s_case = true +$ endif $ endif $ if its_vaxc .or. its_gnuc $ then $ if f$trnlnm("SYS").eqs."" then define sys sys$library: $ endif $! +$! Build a fake configure input header +$! +$ open/write conf_hin config.hin +$ write conf_hin "#undef _LARGEFILE64_SOURCE" +$ close conf_hin +$! +$! +$ i = 0 +$FIND_ACONF: +$ fname = f$element(i,"#",aconf_in_file) +$ if fname .eqs. "#" then goto AMISS_ERR +$ if f$search(fname) .eqs. "" +$ then +$ i = i + 1 +$ goto find_aconf +$ endif +$ open/read/err=aconf_err aconf_in 'fname' +$ open/write aconf zconf.h +$ACONF_LOOP: +$ read/end_of_file=aconf_exit aconf_in line +$ work = f$edit(line, "compress,trim") +$ if f$extract(0,6,work) .nes. "#undef" +$ then +$ if f$extract(0,12,work) .nes. "#cmakedefine" +$ then +$ write aconf line +$ endif +$ else +$ cdef = f$element(1," ",work) +$ gosub check_config +$ endif +$ goto aconf_loop +$ACONF_EXIT: +$ write aconf "" +$ write aconf "/* VMS specifics added by make_vms.com: */" +$ write aconf "#define VMS 1" +$ write aconf "#include " +$ write aconf "#include " +$ write aconf "#ifdef _LARGEFILE" +$ write aconf "# define off64_t __off64_t" +$ write aconf "# define fopen64 fopen" +$ write aconf "# define fseeko64 fseeko" +$ write aconf "# define lseek64 lseek" +$ write aconf "# define ftello64 ftell" +$ write aconf "#endif" +$ write aconf "#if !defined( __VAX) && (__CRTL_VER >= 70312000)" +$ write aconf "# define HAVE_VSNPRINTF" +$ write aconf "#endif" +$ close aconf_in +$ close aconf +$ if f$search("''th'") .nes. "" then delete 'th';* $! Build the thing plain or with mms $! $ write sys$output "Compiling Zlib sources ..." $ if make.eqs."" -$ then -$ dele example.obj;*,minigzip.obj;* +$ then +$ if (f$search( "example.obj;*") .nes. "") then delete example.obj;* +$ if (f$search( "minigzip.obj;*") .nes. "") then delete minigzip.obj;* $ CALL MAKE adler32.OBJ "CC ''CCOPT' adler32" - adler32.c zlib.h zconf.h $ CALL MAKE compress.OBJ "CC ''CCOPT' compress" - @@ -74,8 +184,14 @@ crc32.c zlib.h zconf.h $ CALL MAKE deflate.OBJ "CC ''CCOPT' deflate" - deflate.c deflate.h zutil.h zlib.h zconf.h -$ CALL MAKE gzio.OBJ "CC ''CCOPT' gzio" - - gzio.c zutil.h zlib.h zconf.h +$ CALL MAKE gzclose.OBJ "CC ''CCOPT' gzclose" - + gzclose.c zutil.h zlib.h zconf.h +$ CALL MAKE gzlib.OBJ "CC ''CCOPT' gzlib" - + gzlib.c zutil.h zlib.h zconf.h +$ CALL MAKE gzread.OBJ "CC ''CCOPT' gzread" - + gzread.c zutil.h zlib.h zconf.h +$ CALL MAKE gzwrite.OBJ "CC ''CCOPT' gzwrite" - + gzwrite.c zutil.h zlib.h zconf.h $ CALL MAKE infback.OBJ "CC ''CCOPT' infback" - infback.c zutil.h inftrees.h inflate.h inffast.h inffixed.h $ CALL MAKE inffast.OBJ "CC ''CCOPT' inffast" - @@ -93,46 +209,47 @@ $ write sys$output "Building Zlib ..." $ CALL MAKE libz.OLB "lib/crea libz.olb *.obj" *.OBJ $ write sys$output "Building example..." -$ CALL MAKE example.OBJ "CC ''CCOPT' example" - - example.c zlib.h zconf.h +$ CALL MAKE example.OBJ "CC ''CCOPT' [.test]example" - + [.test]example.c zlib.h zconf.h $ call make example.exe "LINK example,libz.olb/lib" example.obj libz.olb -$ if f$search("x11vms:xvmsutils.olb") .nes. "" -$ then -$ write sys$output "Building minigzip..." -$ CALL MAKE minigzip.OBJ "CC ''CCOPT' minigzip" - - minigzip.c zlib.h zconf.h -$ call make minigzip.exe - - "LINK minigzip,libz.olb/lib,x11vms:xvmsutils.olb/lib" - - minigzip.obj libz.olb -$ endif -$ else +$ write sys$output "Building minigzip..." +$ CALL MAKE minigzip.OBJ "CC ''CCOPT' [.test]minigzip" - + [.test]minigzip.c zlib.h zconf.h +$ call make minigzip.exe - + "LINK minigzip,libz.olb/lib" - + minigzip.obj libz.olb +$ else $ gosub crea_mms -$ SAY "Make ''name' ''version' with ''Make' " +$ write sys$output "Make ''name' ''version' with ''Make' " $ 'make' -$ endif +$ endif $! -$! Alpha gets a shareable image +$! Create shareable image $! -$ If axp -$ Then -$ gosub crea_olist -$ write sys$output "Creating libzshr.exe" -$ call anal_obj_axp modules.opt _link.opt -$ if s_case -$ then -$ open/append optf modules.opt -$ write optf "case_sensitive=YES" -$ close optf -$ endif -$ LINK_'lopts'/SHARE=libzshr.exe modules.opt/opt,_link.opt/opt -$ endif +$ gosub crea_olist +$ write sys$output "Creating libzshr.exe" +$ call map_2_shopt 'mapfile' 'optfile' +$ LINK_'lopts'/SHARE=libzshr.exe modules.opt/opt,'optfile'/opt $ write sys$output "Zlib build completed" +$ delete/nolog tmp.opt;* $ exit +$AMISS_ERR: +$ write sys$output "No source for config.hin found." +$ write sys$output "Tried any of ''aconf_in_file'" +$ goto err_exit $CC_ERR: $ write sys$output "C compiler required to build ''name'" $ goto err_exit $ERR_EXIT: $ set message/facil/ident/sever/text +$ close/nolog optf +$ close/nolog topt +$ close/nolog aconf_in +$ close/nolog aconf +$ close/nolog out +$ close/nolog min +$ close/nolog mod +$ close/nolog h_in $ write sys$output "Exiting..." $ exit 2 $! @@ -180,61 +297,72 @@ $! $! Check command line options and set symbols accordingly $! +$!------------------------------------------------------------------------------ +$! Version history +$! 0.01 20041206 First version to receive a number +$! 0.02 20060126 Add new "HELP" target $ CHECK_OPTS: $ i = 1 $ OPT_LOOP: $ if i .lt. 9 $ then $ cparm = f$edit(p'i',"upcase") -$ if cparm .eqs. "DEBUG" +$! +$! Check if parameter actually contains something +$! +$ if f$edit(cparm,"trim") .nes. "" $ then -$ ccopt = ccopt + "/noopt/deb" -$ lopts = lopts + "/deb" -$ endif -$ if f$locate("CCOPT=",cparm) .lt. f$length(cparm) -$ then -$ start = f$locate("=",cparm) + 1 -$ len = f$length(cparm) - start -$ ccopt = ccopt + f$extract(start,len,cparm) -$ if f$locate("AS_IS",f$edit(ccopt,"UPCASE")) .lt. f$length(ccopt) - - then s_case = true -$ endif -$ if cparm .eqs. "LINK" then linkonly = true -$ if f$locate("LOPTS=",cparm) .lt. f$length(cparm) -$ then -$ start = f$locate("=",cparm) + 1 -$ len = f$length(cparm) - start -$ lopts = lopts + f$extract(start,len,cparm) -$ endif -$ if f$locate("CC=",cparm) .lt. f$length(cparm) -$ then -$ start = f$locate("=",cparm) + 1 -$ len = f$length(cparm) - start -$ cc_com = f$extract(start,len,cparm) - if (cc_com .nes. "DECC") .and. - - (cc_com .nes. "VAXC") .and. - - (cc_com .nes. "GNUC") +$ if cparm .eqs. "DEBUG" $ then -$ write sys$output "Unsupported compiler choice ''cc_com' ignored" -$ write sys$output "Use DECC, VAXC, or GNUC instead" -$ else -$ if cc_com .eqs. "DECC" then its_decc = true -$ if cc_com .eqs. "VAXC" then its_vaxc = true -$ if cc_com .eqs. "GNUC" then its_gnuc = true +$ ccopt = ccopt + "/noopt/deb" +$ lopts = lopts + "/deb" $ endif -$ endif -$ if f$locate("MAKE=",cparm) .lt. f$length(cparm) -$ then -$ start = f$locate("=",cparm) + 1 -$ len = f$length(cparm) - start -$ mmks = f$extract(start,len,cparm) -$ if (mmks .eqs. "MMK") .or. (mmks .eqs. "MMS") +$ if f$locate("CCOPT=",cparm) .lt. f$length(cparm) $ then -$ make = mmks -$ else -$ write sys$output "Unsupported make choice ''mmks' ignored" -$ write sys$output "Use MMK or MMS instead" +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ ccopt = ccopt + f$extract(start,len,cparm) +$ if f$locate("AS_IS",f$edit(ccopt,"UPCASE")) .lt. f$length(ccopt) - + then s_case = true $ endif +$ if cparm .eqs. "LINK" then linkonly = true +$ if f$locate("LOPTS=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ lopts = lopts + f$extract(start,len,cparm) +$ endif +$ if f$locate("CC=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ cc_com = f$extract(start,len,cparm) + if (cc_com .nes. "DECC") .and. - + (cc_com .nes. "VAXC") .and. - + (cc_com .nes. "GNUC") +$ then +$ write sys$output "Unsupported compiler choice ''cc_com' ignored" +$ write sys$output "Use DECC, VAXC, or GNUC instead" +$ else +$ if cc_com .eqs. "DECC" then its_decc = true +$ if cc_com .eqs. "VAXC" then its_vaxc = true +$ if cc_com .eqs. "GNUC" then its_gnuc = true +$ endif +$ endif +$ if f$locate("MAKE=",cparm) .lt. f$length(cparm) +$ then +$ start = f$locate("=",cparm) + 1 +$ len = f$length(cparm) - start +$ mmks = f$extract(start,len,cparm) +$ if (mmks .eqs. "MMK") .or. (mmks .eqs. "MMS") +$ then +$ make = mmks +$ else +$ write sys$output "Unsupported make choice ''mmks' ignored" +$ write sys$output "Use MMK or MMS instead" +$ endif +$ endif +$ if cparm .eqs. "HELP" then gosub bhelp $ endif $ i = i + 1 $ goto opt_loop @@ -244,6 +372,11 @@ $! $! Look for the compiler used $! +$! Version history +$! 0.01 20040223 First version to receive a number +$! 0.02 20040229 Save/set value of decc$no_rooted_search_lists +$! 0.03 20060202 Extend handling of GNU C +$! 0.04 20090402 Compaq -> hp $CHECK_COMPILER: $ if (.not. (its_decc .or. its_vaxc .or. its_gnuc)) $ then @@ -257,9 +390,26 @@ $ if (.not. (its_decc .or. its_vaxc .or. its_gnuc)) $ then goto CC_ERR $ else -$ if its_decc then write sys$output "CC compiler check ... Compaq C" -$ if its_vaxc then write sys$output "CC compiler check ... VAX C" -$ if its_gnuc then write sys$output "CC compiler check ... GNU C" +$ if its_decc +$ then +$ write sys$output "CC compiler check ... hp C" +$ if f$trnlnm("decc$no_rooted_search_lists") .nes. "" +$ then +$ dnrsl = f$trnlnm("decc$no_rooted_search_lists") +$ endif +$ define/nolog decc$no_rooted_search_lists 1 +$ else +$ if its_vaxc then write sys$output "CC compiler check ... VAX C" +$ if its_gnuc +$ then +$ write sys$output "CC compiler check ... GNU C" +$ if f$trnlnm(topt) then write topt "gnu_cc:[000000]gcclib.olb/lib" +$ if f$trnlnm(optf) then write optf "gnu_cc:[000000]gcclib.olb/lib" +$ cc = "gcc" +$ endif +$ if f$trnlnm(topt) then write topt "sys$share:vaxcrtl.exe/share" +$ if f$trnlnm(optf) then write optf "sys$share:vaxcrtl.exe/share" +$ endif $ endif $ return $!------------------------------------------------------------------------------ @@ -274,19 +424,19 @@ $ deck # descrip.mms: MMS description file for building zlib on VMS # written by Martin P.J. Zinser -# +# -OBJS = adler32.obj, compress.obj, crc32.obj, gzio.obj, uncompr.obj, infback.obj\ +OBJS = adler32.obj, compress.obj, crc32.obj, gzclose.obj, gzlib.obj\ + gzread.obj, gzwrite.obj, uncompr.obj, infback.obj\ deflate.obj, trees.obj, zutil.obj, inflate.obj, \ inftrees.obj, inffast.obj $ eod $ write out "CFLAGS=", ccopt $ write out "LOPTS=", lopts +$ write out "all : example.exe minigzip.exe libz.olb" $ copy sys$input: out $ deck - -all : example.exe minigzip.exe libz.olb @ write sys$output " Example applications available" libz.olb : libz.olb($(OBJS)) @@ -296,7 +446,7 @@ link $(LOPTS) example,libz.olb/lib minigzip.exe : minigzip.obj libz.olb - link $(LOPTS) minigzip,libz.olb/lib,x11vms:xvmsutils.olb/lib + link $(LOPTS) minigzip,libz.olb/lib clean : delete *.obj;*,libz.olb;*,*.opt;*,*.exe;* @@ -307,12 +457,15 @@ compress.obj : compress.c zlib.h zconf.h crc32.obj : crc32.c zutil.h zlib.h zconf.h deflate.obj : deflate.c deflate.h zutil.h zlib.h zconf.h -example.obj : example.c zlib.h zconf.h -gzio.obj : gzio.c zutil.h zlib.h zconf.h +example.obj : [.test]example.c zlib.h zconf.h +gzclose.obj : gzclose.c zutil.h zlib.h zconf.h +gzlib.obj : gzlib.c zutil.h zlib.h zconf.h +gzread.obj : gzread.c zutil.h zlib.h zconf.h +gzwrite.obj : gzwrite.c zutil.h zlib.h zconf.h inffast.obj : inffast.c zutil.h zlib.h zconf.h inftrees.h inffast.h inflate.obj : inflate.c zutil.h zlib.h zconf.h inftrees.obj : inftrees.c zutil.h zlib.h zconf.h inftrees.h -minigzip.obj : minigzip.c zlib.h zconf.h +minigzip.obj : [.test]minigzip.c zlib.h zconf.h trees.obj : trees.c deflate.h zutil.h zlib.h zconf.h uncompr.obj : uncompr.c zlib.h zconf.h zutil.obj : zutil.c zutil.h zlib.h zconf.h @@ -328,13 +481,18 @@ $CREA_OLIST: $ open/read min makefile.in $ open/write mod modules.opt -$ src_check = "OBJS =" +$ src_check_list = "OBJZ =#OBJG =" $MRLOOP: $ read/end=mrdone min rec -$ if (f$extract(0,6,rec) .nes. src_check) then goto mrloop +$ i = 0 +$SRC_CHECK_LOOP: +$ src_check = f$element(i, "#", src_check_list) +$ i = i+1 +$ if src_check .eqs. "#" then goto mrloop +$ if (f$extract(0,6,rec) .nes. src_check) then goto src_check_loop $ rec = rec - src_check $ gosub extra_filnam -$ if (f$element(1,"\",rec) .eqs. "\") then goto mrdone +$ if (f$element(1,"\",rec) .eqs. "\") then goto mrloop $MRSLOOP: $ read/end=mrdone min rec $ gosub extra_filnam @@ -382,80 +540,328 @@ $ return $!------------------------------------------------------------------------------ $! -$! Analyze Object files for OpenVMS AXP to extract Procedure and Data -$! information to build a symbol vector for a shareable image -$! All the "brains" of this logic was suggested by Hartmut Becker -$! (Hartmut.Becker at compaq.com). All the bugs were introduced by me -$! (zinser at decus.de), so if you do have problem reports please do not -$! bother Hartmut/HP, but get in touch with me +$CHECK_CONFIG: $! -$ ANAL_OBJ_AXP: Subroutine -$ V = 'F$Verify(0) +$ in_ldef = f$locate(cdef,libdefs) +$ if (in_ldef .lt. f$length(libdefs)) +$ then +$ write aconf "#define ''cdef' 1" +$ libdefs = f$extract(0,in_ldef,libdefs) + - + f$extract(in_ldef + f$length(cdef) + 1, - + f$length(libdefs) - in_ldef - f$length(cdef) - 1, - + libdefs) +$ else +$ if (f$type('cdef') .eqs. "INTEGER") +$ then +$ write aconf "#define ''cdef' ", 'cdef' +$ else +$ if (f$type('cdef') .eqs. "STRING") +$ then +$ write aconf "#define ''cdef' ", """", '''cdef'', """" +$ else +$ gosub check_cc_def +$ endif +$ endif +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Check if this is a define relating to the properties of the C/C++ +$! compiler +$! +$ CHECK_CC_DEF: +$ if (cdef .eqs. "_LARGEFILE64_SOURCE") +$ then +$ copy sys$input: 'tc' +$ deck +#include "tconfig" +#define _LARGEFILE +#include + +int main(){ +FILE *fp; + fp = fopen("temp.txt","r"); + fseeko(fp,1,SEEK_SET); + fclose(fp); +} + +$ eod +$ test_inv = false +$ comm_h = false +$ gosub cc_prop_check +$ return +$ endif +$ write aconf "/* ", line, " */" +$ return +$!------------------------------------------------------------------------------ +$! +$! Check for properties of C/C++ compiler +$! +$! Version history +$! 0.01 20031020 First version to receive a number +$! 0.02 20031022 Added logic for defines with value +$! 0.03 20040309 Make sure local config file gets not deleted +$! 0.04 20041230 Also write include for configure run +$! 0.05 20050103 Add processing of "comment defines" +$CC_PROP_CHECK: +$ cc_prop = true +$ is_need = false +$ is_need = (f$extract(0,4,cdef) .eqs. "NEED") .or. (test_inv .eq. true) +$ if f$search(th) .eqs. "" then create 'th' +$ set message/nofac/noident/nosever/notext +$ on error then continue +$ cc 'tmpnam' +$ if .not. ($status) then cc_prop = false +$ on error then continue +$! The headers might lie about the capabilities of the RTL +$ link 'tmpnam',tmp.opt/opt +$ if .not. ($status) then cc_prop = false +$ set message/fac/ident/sever/text +$ on error then goto err_exit +$ delete/nolog 'tmpnam'.*;*/exclude='th' +$ if (cc_prop .and. .not. is_need) .or. - + (.not. cc_prop .and. is_need) +$ then +$ write sys$output "Checking for ''cdef'... yes" +$ if f$type('cdef_val'_yes) .nes. "" +$ then +$ if f$type('cdef_val'_yes) .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,'cdef_val'_yes) +$ if f$type('cdef_val'_yes) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,'cdef_val'_yes) +$ else +$ call write_config f$fao("#define !AS 1",cdef) +$ endif +$ if (cdef .eqs. "HAVE_FSEEKO") .or. (cdef .eqs. "_LARGE_FILES") .or. - + (cdef .eqs. "_LARGEFILE64_SOURCE") then - + call write_config f$string("#define _LARGEFILE 1") +$ else +$ write sys$output "Checking for ''cdef'... no" +$ if (comm_h) +$ then + call write_config f$fao("/* !AS */",line) +$ else +$ if f$type('cdef_val'_no) .nes. "" +$ then +$ if f$type('cdef_val'_no) .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,'cdef_val'_no) +$ if f$type('cdef_val'_no) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,'cdef_val'_no) +$ else +$ call write_config f$fao("#undef !AS",cdef) +$ endif +$ endif +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Check for properties of C/C++ compiler with multiple result values +$! +$! Version history +$! 0.01 20040127 First version +$! 0.02 20050103 Reconcile changes from cc_prop up to version 0.05 +$CC_MPROP_CHECK: +$ cc_prop = true +$ i = 1 +$ idel = 1 +$ MT_LOOP: +$ if f$type(result_'i') .eqs. "STRING" +$ then +$ set message/nofac/noident/nosever/notext +$ on error then continue +$ cc 'tmpnam'_'i' +$ if .not. ($status) then cc_prop = false +$ on error then continue +$! The headers might lie about the capabilities of the RTL +$ link 'tmpnam'_'i',tmp.opt/opt +$ if .not. ($status) then cc_prop = false +$ set message/fac/ident/sever/text +$ on error then goto err_exit +$ delete/nolog 'tmpnam'_'i'.*;* +$ if (cc_prop) +$ then +$ write sys$output "Checking for ''cdef'... ", mdef_'i' +$ if f$type(mdef_'i') .eqs. "INTEGER" - + then call write_config f$fao("#define !AS !UL",cdef,mdef_'i') +$ if f$type('cdef_val'_yes) .eqs. "STRING" - + then call write_config f$fao("#define !AS !AS",cdef,mdef_'i') +$ goto msym_clean +$ else +$ i = i + 1 +$ goto mt_loop +$ endif +$ endif +$ write sys$output "Checking for ''cdef'... no" +$ call write_config f$fao("#undef !AS",cdef) +$ MSYM_CLEAN: +$ if (idel .le. msym_max) +$ then +$ delete/sym mdef_'idel' +$ idel = idel + 1 +$ goto msym_clean +$ endif +$ return +$!------------------------------------------------------------------------------ +$! +$! Write configuration to both permanent and temporary config file +$! +$! Version history +$! 0.01 20031029 First version to receive a number +$! +$WRITE_CONFIG: SUBROUTINE +$ write aconf 'p1' +$ open/append confh 'th' +$ write confh 'p1' +$ close confh +$ENDSUBROUTINE +$!------------------------------------------------------------------------------ +$! +$! Analyze the project map file and create the symbol vector for a shareable +$! image from it +$! +$! Version history +$! 0.01 20120128 First version +$! 0.02 20120226 Add pre-load logic +$! +$ MAP_2_SHOPT: Subroutine +$! $ SAY := "WRITE_ SYS$OUTPUT" -$ +$! $ IF F$SEARCH("''P1'") .EQS. "" $ THEN -$ SAY "ANAL_OBJ_AXP-E-NOSUCHFILE: Error, inputfile ''p1' not available" -$ goto exit_aa +$ SAY "MAP_2_SHOPT-E-NOSUCHFILE: Error, inputfile ''p1' not available" +$ goto exit_m2s $ ENDIF $ IF "''P2'" .EQS. "" $ THEN -$ SAY "ANAL_OBJ_AXP: Error, no output file provided" -$ goto exit_aa +$ SAY "MAP_2_SHOPT: Error, no output file provided" +$ goto exit_m2s $ ENDIF -$ -$ open/read in 'p1 -$ create a.tmp -$ open/append atmp a.tmp -$ loop: -$ read/end=end_loop in line -$ f= f$search(line) -$ if f .eqs. "" +$! +$ module1 = "deflate#deflateEnd#deflateInit_#deflateParams#deflateSetDictionary" +$ module2 = "gzclose#gzerror#gzgetc#gzgets#gzopen#gzprintf#gzputc#gzputs#gzread" +$ module3 = "gzseek#gztell#inflate#inflateEnd#inflateInit_#inflateSetDictionary" +$ module4 = "inflateSync#uncompress#zlibVersion#compress" +$ open/read map 'p1 +$ if axp .or. ia64 $ then -$ write sys$output "ANAL_OBJ_AXP-w-nosuchfile, ''line'" -$ goto loop +$ open/write aopt a.opt +$ open/write bopt b.opt +$ write aopt " CASE_SENSITIVE=YES" +$ write bopt "SYMBOL_VECTOR= (-" +$ mod_sym_num = 1 +$ MOD_SYM_LOOP: +$ if f$type(module'mod_sym_num') .nes. "" +$ then +$ mod_in = 0 +$ MOD_SYM_IN: +$ shared_proc = f$element(mod_in, "#", module'mod_sym_num') +$ if shared_proc .nes. "#" +$ then +$ write aopt f$fao(" symbol_vector=(!AS/!AS=PROCEDURE)",- + f$edit(shared_proc,"upcase"),shared_proc) +$ write bopt f$fao("!AS=PROCEDURE,-",shared_proc) +$ mod_in = mod_in + 1 +$ goto mod_sym_in +$ endif +$ mod_sym_num = mod_sym_num + 1 +$ goto mod_sym_loop +$ endif +$MAP_LOOP: +$ read/end=map_end map line +$ if (f$locate("{",line).lt. f$length(line)) .or. - + (f$locate("global:", line) .lt. f$length(line)) +$ then +$ proc = true +$ goto map_loop +$ endif +$ if f$locate("}",line).lt. f$length(line) then proc = false +$ if f$locate("local:", line) .lt. f$length(line) then proc = false +$ if proc +$ then +$ shared_proc = f$edit(line,"collapse") +$ chop_semi = f$locate(";", shared_proc) +$ if chop_semi .lt. f$length(shared_proc) then - + shared_proc = f$extract(0, chop_semi, shared_proc) +$ write aopt f$fao(" symbol_vector=(!AS/!AS=PROCEDURE)",- + f$edit(shared_proc,"upcase"),shared_proc) +$ write bopt f$fao("!AS=PROCEDURE,-",shared_proc) +$ endif +$ goto map_loop +$MAP_END: +$ close/nolog aopt +$ close/nolog bopt +$ open/append libopt 'p2' +$ open/read aopt a.opt +$ open/read bopt b.opt +$ALOOP: +$ read/end=aloop_end aopt line +$ write libopt line +$ goto aloop +$ALOOP_END: +$ close/nolog aopt +$ sv = "" +$BLOOP: +$ read/end=bloop_end bopt svn +$ if (svn.nes."") +$ then +$ if (sv.nes."") then write libopt sv +$ sv = svn +$ endif +$ goto bloop +$BLOOP_END: +$ write libopt f$extract(0,f$length(sv)-2,sv), "-" +$ write libopt ")" +$ close/nolog bopt +$ delete/nolog/noconf a.opt;*,b.opt;* +$ else +$ if vax +$ then +$ open/append libopt 'p2' +$ mod_sym_num = 1 +$ VMOD_SYM_LOOP: +$ if f$type(module'mod_sym_num') .nes. "" +$ then +$ mod_in = 0 +$ VMOD_SYM_IN: +$ shared_proc = f$element(mod_in, "#", module'mod_sym_num') +$ if shared_proc .nes. "#" +$ then +$ write libopt f$fao("UNIVERSAL=!AS",- + f$edit(shared_proc,"upcase")) +$ mod_in = mod_in + 1 +$ goto vmod_sym_in +$ endif +$ mod_sym_num = mod_sym_num + 1 +$ goto vmod_sym_loop +$ endif +$VMAP_LOOP: +$ read/end=vmap_end map line +$ if (f$locate("{",line).lt. f$length(line)) .or. - + (f$locate("global:", line) .lt. f$length(line)) +$ then +$ proc = true +$ goto vmap_loop +$ endif +$ if f$locate("}",line).lt. f$length(line) then proc = false +$ if f$locate("local:", line) .lt. f$length(line) then proc = false +$ if proc +$ then +$ shared_proc = f$edit(line,"collapse") +$ chop_semi = f$locate(";", shared_proc) +$ if chop_semi .lt. f$length(shared_proc) then - + shared_proc = f$extract(0, chop_semi, shared_proc) +$ write libopt f$fao("UNIVERSAL=!AS",- + f$edit(shared_proc,"upcase")) +$ endif +$ goto vmap_loop +$VMAP_END: +$ else +$ write sys$output "Unknown Architecture (Not VAX, AXP, or IA64)" +$ write sys$output "No options file created" +$ endif $ endif -$ define/user sys$output nl: -$ define/user sys$error nl: -$ anal/obj/gsd 'f /out=x.tmp -$ open/read xtmp x.tmp -$ XLOOP: -$ read/end=end_xloop xtmp xline -$ xline = f$edit(xline,"compress") -$ write atmp xline -$ goto xloop -$ END_XLOOP: -$ close xtmp -$ goto loop -$ end_loop: -$ close in -$ close atmp -$ if f$search("a.tmp") .eqs. "" - - then $ exit -$ ! all global definitions -$ search a.tmp "symbol:","EGSY$V_DEF 1","EGSY$V_NORM 1"/out=b.tmp -$ ! all procedures -$ search b.tmp "EGSY$V_NORM 1"/wind=(0,1) /out=c.tmp -$ search c.tmp "symbol:"/out=d.tmp -$ define/user sys$output nl: -$ edito/edt/command=sys$input d.tmp -sub/symbol: "/symbol_vector=(/whole -sub/"/=PROCEDURE)/whole -exit -$ ! all data -$ search b.tmp "EGSY$V_DEF 1"/wind=(0,1) /out=e.tmp -$ search e.tmp "symbol:"/out=f.tmp -$ define/user sys$output nl: -$ edito/edt/command=sys$input f.tmp -sub/symbol: "/symbol_vector=(/whole -sub/"/=DATA)/whole -exit -$ sort/nodupl d.tmp,f.tmp 'p2' -$ delete a.tmp;*,b.tmp;*,c.tmp;*,d.tmp;*,e.tmp;*,f.tmp;* -$ if f$search("x.tmp") .nes. "" - - then $ delete x.tmp;* -$! -$ EXIT_AA: -$ if V then set verify +$ EXIT_M2S: +$ close/nolog map +$ close/nolog libopt $ endsubroutine -$!------------------------------------------------------------------------------ diff --git a/Modules/zlib/minigzip.c b/Modules/zlib/minigzip.c --- a/Modules/zlib/minigzip.c +++ b/Modules/zlib/minigzip.c @@ -1,5 +1,5 @@ /* minigzip.c -- simulate gzip using the zlib compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2006, 2010, 2011 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -15,8 +15,8 @@ /* @(#) $Id$ */ +#include "zlib.h" #include -#include "zlib.h" #ifdef STDC # include @@ -32,11 +32,18 @@ #if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) # include # include +# ifdef UNDER_CE +# include +# endif # define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) #else # define SET_BINARY_MODE(file) #endif +#ifdef _MSC_VER +# define snprintf _snprintf +#endif + #ifdef VMS # define unlink delete # define GZ_SUFFIX "-gz" @@ -50,9 +57,75 @@ # include /* for fileno */ #endif +#if !defined(Z_HAVE_UNISTD_H) && !defined(_LARGEFILE64_SOURCE) #ifndef WIN32 /* unlink already in stdio.h for WIN32 */ extern int unlink OF((const char *)); #endif +#endif + +#if defined(UNDER_CE) +# include +# define perror(s) pwinerror(s) + +/* Map the Windows error number in ERROR to a locale-dependent error + message string and return a pointer to it. Typically, the values + for ERROR come from GetLastError. + + The string pointed to shall not be modified by the application, + but may be overwritten by a subsequent call to strwinerror + + The strwinerror function does not change the current setting + of GetLastError. */ + +static char *strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +static void pwinerror (s) + const char *s; +{ + if (s && *s) + fprintf(stderr, "%s: %s\n", s, strwinerror(GetLastError ())); + else + fprintf(stderr, "%s\n", strwinerror(GetLastError ())); +} + +#endif /* UNDER_CE */ #ifndef GZ_SUFFIX # define GZ_SUFFIX ".gz" @@ -69,6 +142,197 @@ # define local #endif +#ifdef Z_SOLO +/* for Z_SOLO, create simplified gz* functions using deflate and inflate */ + +#if defined(Z_HAVE_UNISTD_H) || defined(Z_LARGE) +# include /* for unlink() */ +#endif + +void *myalloc OF((void *, unsigned, unsigned)); +void myfree OF((void *, void *)); + +void *myalloc(q, n, m) + void *q; + unsigned n, m; +{ + q = Z_NULL; + return calloc(n, m); +} + +void myfree(q, p) + void *q, *p; +{ + q = Z_NULL; + free(p); +} + +typedef struct gzFile_s { + FILE *file; + int write; + int err; + char *msg; + z_stream strm; +} *gzFile; + +gzFile gzopen OF((const char *, const char *)); +gzFile gzdopen OF((int, const char *)); +gzFile gz_open OF((const char *, int, const char *)); + +gzFile gzopen(path, mode) +const char *path; +const char *mode; +{ + return gz_open(path, -1, mode); +} + +gzFile gzdopen(fd, mode) +int fd; +const char *mode; +{ + return gz_open(NULL, fd, mode); +} + +gzFile gz_open(path, fd, mode) + const char *path; + int fd; + const char *mode; +{ + gzFile gz; + int ret; + + gz = malloc(sizeof(struct gzFile_s)); + if (gz == NULL) + return NULL; + gz->write = strchr(mode, 'w') != NULL; + gz->strm.zalloc = myalloc; + gz->strm.zfree = myfree; + gz->strm.opaque = Z_NULL; + if (gz->write) + ret = deflateInit2(&(gz->strm), -1, 8, 15 + 16, 8, 0); + else { + gz->strm.next_in = 0; + gz->strm.avail_in = Z_NULL; + ret = inflateInit2(&(gz->strm), 15 + 16); + } + if (ret != Z_OK) { + free(gz); + return NULL; + } + gz->file = path == NULL ? fdopen(fd, gz->write ? "wb" : "rb") : + fopen(path, gz->write ? "wb" : "rb"); + if (gz->file == NULL) { + gz->write ? deflateEnd(&(gz->strm)) : inflateEnd(&(gz->strm)); + free(gz); + return NULL; + } + gz->err = 0; + gz->msg = ""; + return gz; +} + +int gzwrite OF((gzFile, const void *, unsigned)); + +int gzwrite(gz, buf, len) + gzFile gz; + const void *buf; + unsigned len; +{ + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL || !gz->write) + return 0; + strm = &(gz->strm); + strm->next_in = (void *)buf; + strm->avail_in = len; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_NO_FLUSH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + return len; +} + +int gzread OF((gzFile, void *, unsigned)); + +int gzread(gz, buf, len) + gzFile gz; + void *buf; + unsigned len; +{ + int ret; + unsigned got; + unsigned char in[1]; + z_stream *strm; + + if (gz == NULL || gz->write) + return 0; + if (gz->err) + return 0; + strm = &(gz->strm); + strm->next_out = (void *)buf; + strm->avail_out = len; + do { + got = fread(in, 1, 1, gz->file); + if (got == 0) + break; + strm->next_in = in; + strm->avail_in = 1; + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_DATA_ERROR) { + gz->err = Z_DATA_ERROR; + gz->msg = strm->msg; + return 0; + } + if (ret == Z_STREAM_END) + inflateReset(strm); + } while (strm->avail_out); + return len - strm->avail_out; +} + +int gzclose OF((gzFile)); + +int gzclose(gz) + gzFile gz; +{ + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL) + return Z_STREAM_ERROR; + strm = &(gz->strm); + if (gz->write) { + strm->next_in = Z_NULL; + strm->avail_in = 0; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_FINISH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + deflateEnd(strm); + } + else + inflateEnd(strm); + fclose(gz->file); + free(gz); + return Z_OK; +} + +const char *gzerror OF((gzFile, int *)); + +const char *gzerror(gz, err) + gzFile gz; + int *err; +{ + *err = gz->err; + return gz->msg; +} + +#endif + char *prog; void error OF((const char *msg)); @@ -198,8 +462,17 @@ FILE *in; gzFile out; + if (strlen(file) + strlen(GZ_SUFFIX) >= sizeof(outfile)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outfile, sizeof(outfile), "%s%s", file, GZ_SUFFIX); +#else strcpy(outfile, file); strcat(outfile, GZ_SUFFIX); +#endif in = fopen(file, "rb"); if (in == NULL) { @@ -227,9 +500,18 @@ char *infile, *outfile; FILE *out; gzFile in; - uInt len = (uInt)strlen(file); + size_t len = strlen(file); + if (len + strlen(GZ_SUFFIX) >= sizeof(buf)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf, sizeof(buf), "%s", file); +#else strcpy(buf, file); +#endif if (len > SUFFIX_LEN && strcmp(file+len-SUFFIX_LEN, GZ_SUFFIX) == 0) { infile = file; @@ -238,7 +520,11 @@ } else { outfile = file; infile = buf; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf + len, sizeof(buf) - len, "%s", GZ_SUFFIX); +#else strcat(infile, GZ_SUFFIX); +#endif } in = gzopen(infile, "rb"); if (in == NULL) { @@ -258,7 +544,8 @@ /* =========================================================================== - * Usage: minigzip [-d] [-f] [-h] [-r] [-1 to -9] [files...] + * Usage: minigzip [-c] [-d] [-f] [-h] [-r] [-1 to -9] [files...] + * -c : write to standard output * -d : decompress * -f : compress with Z_FILTERED * -h : compress with Z_HUFFMAN_ONLY @@ -270,17 +557,34 @@ int argc; char *argv[]; { + int copyout = 0; int uncompr = 0; gzFile file; - char outmode[20]; + char *bname, outmode[20]; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outmode, sizeof(outmode), "%s", "wb6 "); +#else strcpy(outmode, "wb6 "); +#endif prog = argv[0]; + bname = strrchr(argv[0], '/'); + if (bname) + bname++; + else + bname = argv[0]; argc--, argv++; + if (!strcmp(bname, "gunzip")) + uncompr = 1; + else if (!strcmp(bname, "zcat")) + copyout = uncompr = 1; + while (argc > 0) { - if (strcmp(*argv, "-d") == 0) + if (strcmp(*argv, "-c") == 0) + copyout = 1; + else if (strcmp(*argv, "-d") == 0) uncompr = 1; else if (strcmp(*argv, "-f") == 0) outmode[3] = 'f'; @@ -310,11 +614,36 @@ gz_compress(stdin, file); } } else { + if (copyout) { + SET_BINARY_MODE(stdout); + } do { if (uncompr) { - file_uncompress(*argv); + if (copyout) { + file = gzopen(*argv, "rb"); + if (file == NULL) + fprintf(stderr, "%s: can't gzopen %s\n", prog, *argv); + else + gz_uncompress(file, stdout); + } else { + file_uncompress(*argv); + } } else { - file_compress(*argv, outmode); + if (copyout) { + FILE * in = fopen(*argv, "rb"); + + if (in == NULL) { + perror(*argv); + } else { + file = gzdopen(fileno(stdout), outmode); + if (file == NULL) error("can't gzdopen stdout"); + + gz_compress(in, file); + } + + } else { + file_compress(*argv, outmode); + } } } while (argv++, --argc); } diff --git a/Modules/zlib/trees.c b/Modules/zlib/trees.c --- a/Modules/zlib/trees.c +++ b/Modules/zlib/trees.c @@ -1,5 +1,6 @@ /* trees.c -- output deflated data using Huffman coding - * Copyright (C) 1995-2005 Jean-loup Gailly + * Copyright (C) 1995-2012 Jean-loup Gailly + * detect_data_type() function provided freely by Cosmin Truta, 2006 * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -73,11 +74,6 @@ * probability, to avoid transmitting the lengths for unused bit length codes. */ -#define Buf_size (8 * 2*sizeof(char)) -/* Number of bits used within bi_buf. (bi_buf might be implemented on - * more than 16 bits on some systems.) - */ - /* =========================================================================== * Local data. These are initialized only once. */ @@ -150,9 +146,9 @@ local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); -local void compress_block OF((deflate_state *s, ct_data *ltree, - ct_data *dtree)); -local void set_data_type OF((deflate_state *s)); +local void compress_block OF((deflate_state *s, const ct_data *ltree, + const ct_data *dtree)); +local int detect_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); @@ -203,12 +199,12 @@ * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { - s->bi_buf |= (value << s->bi_valid); + s->bi_buf |= (ush)value << s->bi_valid; put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { - s->bi_buf |= value << s->bi_valid; + s->bi_buf |= (ush)value << s->bi_valid; s->bi_valid += length; } } @@ -218,12 +214,12 @@ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = value;\ - s->bi_buf |= (val << s->bi_valid);\ + s->bi_buf |= (ush)val << s->bi_valid;\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ - s->bi_buf |= (value) << s->bi_valid;\ + s->bi_buf |= (ush)(value) << s->bi_valid;\ s->bi_valid += len;\ }\ } @@ -250,11 +246,13 @@ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ +#ifdef NO_INIT_GLOBAL_POINTERS static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; +#endif /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; @@ -348,13 +346,14 @@ static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } - fprintf(header, "const uch _dist_code[DIST_CODE_LEN] = {\n"); + fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } - fprintf(header, "const uch _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); + fprintf(header, + "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); @@ -379,7 +378,7 @@ /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ -void _tr_init(s) +void ZLIB_INTERNAL _tr_init(s) deflate_state *s; { tr_static_init(); @@ -395,7 +394,6 @@ s->bi_buf = 0; s->bi_valid = 0; - s->last_eob_len = 8; /* enough lookahead for inflate */ #ifdef DEBUG s->compressed_len = 0L; s->bits_sent = 0L; @@ -864,13 +862,13 @@ /* =========================================================================== * Send a stored block */ -void _tr_stored_block(s, buf, stored_len, eof) +void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ - int eof; /* true if this is the last block for a file */ + int last; /* one if this is the last block for a file */ { - send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ + send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ #ifdef DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; @@ -879,17 +877,19 @@ } /* =========================================================================== + * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) + */ +void ZLIB_INTERNAL _tr_flush_bits(s) + deflate_state *s; +{ + bi_flush(s); +} + +/* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. - * The current inflate code requires 9 bits of lookahead. If the - * last two codes for the previous block (real code plus EOB) were coded - * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode - * the last real code. In this case we send two empty static blocks instead - * of one. (There are no problems if the previous block is stored or fixed.) - * To simplify the code, we assume the worst case of last real code encoded - * on one bit only. */ -void _tr_align(s) +void ZLIB_INTERNAL _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); @@ -898,31 +898,17 @@ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); - /* Of the 10 bits for the empty block, we have already sent - * (10 - bi_valid) bits. The lookahead for the last real code (before - * the EOB of the previous block) was thus at least one plus the length - * of the EOB plus what we have just sent of the empty static block. - */ - if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { - send_bits(s, STATIC_TREES<<1, 3); - send_code(s, END_BLOCK, static_ltree); -#ifdef DEBUG - s->compressed_len += 10L; -#endif - bi_flush(s); - } - s->last_eob_len = 7; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and output the encoded block to the zip file. */ -void _tr_flush_block(s, buf, stored_len, eof) +void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ - int eof; /* true if this is the last block for a file */ + int last; /* one if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ @@ -931,8 +917,8 @@ if (s->level > 0) { /* Check if the file is binary or text */ - if (stored_len > 0 && s->strm->data_type == Z_UNKNOWN) - set_data_type(s); + if (s->strm->data_type == Z_UNKNOWN) + s->strm->data_type = detect_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); @@ -978,23 +964,25 @@ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ - _tr_stored_block(s, buf, stored_len, eof); + _tr_stored_block(s, buf, stored_len, last); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { #endif - send_bits(s, (STATIC_TREES<<1)+eof, 3); - compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); + send_bits(s, (STATIC_TREES<<1)+last, 3); + compress_block(s, (const ct_data *)static_ltree, + (const ct_data *)static_dtree); #ifdef DEBUG s->compressed_len += 3 + s->static_len; #endif } else { - send_bits(s, (DYN_TREES<<1)+eof, 3); + send_bits(s, (DYN_TREES<<1)+last, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); - compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); + compress_block(s, (const ct_data *)s->dyn_ltree, + (const ct_data *)s->dyn_dtree); #ifdef DEBUG s->compressed_len += 3 + s->opt_len; #endif @@ -1005,21 +993,21 @@ */ init_block(s); - if (eof) { + if (last) { bi_windup(s); #ifdef DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, - s->compressed_len-7*eof)); + s->compressed_len-7*last)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ -int _tr_tally (s, dist, lc) +int ZLIB_INTERNAL _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ @@ -1071,8 +1059,8 @@ */ local void compress_block(s, ltree, dtree) deflate_state *s; - ct_data *ltree; /* literal tree */ - ct_data *dtree; /* distance tree */ + const ct_data *ltree; /* literal tree */ + const ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ @@ -1114,28 +1102,48 @@ } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); - s->last_eob_len = ltree[END_BLOCK].Len; } /* =========================================================================== - * Set the data type to BINARY or TEXT, using a crude approximation: - * set it to Z_TEXT if all symbols are either printable characters (33 to 255) - * or white spaces (9 to 13, or 32); or set it to Z_BINARY otherwise. + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "black list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). * IN assertion: the fields Freq of dyn_ltree are set. */ -local void set_data_type(s) +local int detect_data_type(s) deflate_state *s; { + /* black_mask is the bit mask of black-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + unsigned long black_mask = 0xf3ffc07fUL; int n; - for (n = 0; n < 9; n++) + /* Check for non-textual ("black-listed") bytes. */ + for (n = 0; n <= 31; n++, black_mask >>= 1) + if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) + return Z_BINARY; + + /* Check for textual ("white-listed") bytes. */ + if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 + || s->dyn_ltree[13].Freq != 0) + return Z_TEXT; + for (n = 32; n < LITERALS; n++) if (s->dyn_ltree[n].Freq != 0) - break; - if (n == 9) - for (n = 14; n < 32; n++) - if (s->dyn_ltree[n].Freq != 0) - break; - s->strm->data_type = (n == 32) ? Z_TEXT : Z_BINARY; + return Z_TEXT; + + /* There are no "black-listed" or "white-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; } /* =========================================================================== @@ -1201,7 +1209,6 @@ int header; /* true if block header must be written */ { bi_windup(s); /* align on byte boundary */ - s->last_eob_len = 8; /* enough lookahead for inflate */ if (header) { put_short(s, (ush)len); diff --git a/Modules/zlib/trees.h b/Modules/zlib/trees.h --- a/Modules/zlib/trees.h +++ b/Modules/zlib/trees.h @@ -70,7 +70,7 @@ {{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} }; -const uch _dist_code[DIST_CODE_LEN] = { +const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, @@ -99,7 +99,7 @@ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; -const uch _length_code[MAX_MATCH-MIN_MATCH+1]= { +const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, diff --git a/Modules/zlib/uncompr.c b/Modules/zlib/uncompr.c --- a/Modules/zlib/uncompr.c +++ b/Modules/zlib/uncompr.c @@ -1,5 +1,5 @@ /* uncompr.c -- decompress a memory buffer - * Copyright (C) 1995-2003 Jean-loup Gailly. + * Copyright (C) 1995-2003, 2010 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -16,8 +16,6 @@ been saved previously by the compressor and transmitted to the decompressor by some mechanism outside the scope of this compression library.) Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to decompress a whole file at once if the - input file is mmap'ed. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output @@ -32,7 +30,7 @@ z_stream stream; int err; - stream.next_in = (Bytef*)source; + stream.next_in = (z_const Bytef *)source; stream.avail_in = (uInt)sourceLen; /* Check for source > 64K on 16-bit machine: */ if ((uLong)stream.avail_in != sourceLen) return Z_BUF_ERROR; diff --git a/Modules/zlib/zconf.h b/Modules/zlib/zconf.h --- a/Modules/zlib/zconf.h +++ b/Modules/zlib/zconf.h @@ -1,5 +1,5 @@ /* zconf.h -- configuration of the zlib compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -11,52 +11,145 @@ /* * If you *really* need a unique prefix for all types and library functions, * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". */ -#ifdef Z_PREFIX +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ # define deflateInit_ z_deflateInit_ -# define deflate z_deflate -# define deflateEnd z_deflateEnd +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ # define inflateInit_ z_inflateInit_ -# define inflate z_inflate -# define inflateEnd z_inflateEnd -# define deflateInit2_ z_deflateInit2_ -# define deflateSetDictionary z_deflateSetDictionary -# define deflateCopy z_deflateCopy -# define deflateReset z_deflateReset -# define deflateParams z_deflateParams -# define deflateBound z_deflateBound -# define deflatePrime z_deflatePrime -# define inflateInit2_ z_inflateInit2_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 # define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary # define inflateSync z_inflateSync # define inflateSyncPoint z_inflateSyncPoint -# define inflateCopy z_inflateCopy -# define inflateReset z_inflateReset -# define inflateBack z_inflateBack -# define inflateBackEnd z_inflateBackEnd -# define compress z_compress -# define compress2 z_compress2 -# define compressBound z_compressBound -# define uncompress z_uncompress -# define adler32 z_adler32 -# define crc32 z_crc32 -# define get_crc_table z_get_crc_table +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif # define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef # define alloc_func z_alloc_func +# define charf z_charf # define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp # define in_func z_in_func +# define intf z_intf # define out_func z_out_func -# define Byte z_Byte # define uInt z_uInt +# define uIntf z_uIntf # define uLong z_uLong -# define Bytef z_Bytef -# define charf z_charf -# define intf z_intf -# define uIntf z_uIntf # define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc # define voidpf z_voidpf -# define voidp z_voidp + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + #endif #if defined(__MSDOS__) && !defined(MSDOS) @@ -125,6 +218,12 @@ # endif #endif +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + /* Some Mac compilers merge all .h files incorrectly: */ #if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) # define NO_DUMMY_DECL @@ -171,6 +270,14 @@ # endif #endif +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + /* The following definitions for FAR are needed only for MSDOS mixed * model programming (small or medium model with some far allocations). * This was tested only with MSC; for other MSDOS compilers you may have @@ -284,49 +391,121 @@ typedef Byte *voidp; #endif -#if 0 /* HAVE_UNISTD_H -- this line is updated by ./configure */ -# include /* for off_t */ -# include /* for SEEK_* and off_t */ -# ifdef VMS -# include /* for off_t */ +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short # endif -# define z_off_t off_t #endif -#ifndef SEEK_SET + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) # define SEEK_SET 0 /* Seek from beginning of file. */ # define SEEK_CUR 1 /* Seek from current position. */ # define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ #endif + #ifndef z_off_t # define z_off_t long #endif -#if defined(__OS400__) -# define NO_vsnprintf -#endif - -#if defined(__MVS__) -# define NO_vsnprintf -# ifdef FAR -# undef FAR +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t # endif #endif /* MVS linker does not support external names larger than 8 bytes */ #if defined(__MVS__) -# pragma map(deflateInit_,"DEIN") -# pragma map(deflateInit2_,"DEIN2") -# pragma map(deflateEnd,"DEEND") -# pragma map(deflateBound,"DEBND") -# pragma map(inflateInit_,"ININ") -# pragma map(inflateInit2_,"ININ2") -# pragma map(inflateEnd,"INEND") -# pragma map(inflateSync,"INSY") -# pragma map(inflateSetDictionary,"INSEDI") -# pragma map(compressBound,"CMBND") -# pragma map(inflate_table,"INTABL") -# pragma map(inflate_fast,"INFA") -# pragma map(inflate_copyright,"INCOPY") + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") #endif #endif /* ZCONF_H */ diff --git a/Modules/zlib/zconf.h.cmakein b/Modules/zlib/zconf.h.cmakein new file mode 100644 --- /dev/null +++ b/Modules/zlib/zconf.h.cmakein @@ -0,0 +1,513 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H +#cmakedefine Z_PREFIX +#cmakedefine Z_HAVE_UNISTD_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/Modules/zlib/zconf.h.in b/Modules/zlib/zconf.h.in new file mode 100644 --- /dev/null +++ b/Modules/zlib/zconf.h.in @@ -0,0 +1,511 @@ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-2013 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* @(#) $Id$ */ + +#ifndef ZCONF_H +#define ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + * Even better than compiling with -DZ_PREFIX would be to use configure to set + * this permanently in zconf.h using "./configure --zprefix". + */ +#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ +# define Z_PREFIX_SET + +/* all linked symbols */ +# define _dist_code z__dist_code +# define _length_code z__length_code +# define _tr_align z__tr_align +# define _tr_flush_bits z__tr_flush_bits +# define _tr_flush_block z__tr_flush_block +# define _tr_init z__tr_init +# define _tr_stored_block z__tr_stored_block +# define _tr_tally z__tr_tally +# define adler32 z_adler32 +# define adler32_combine z_adler32_combine +# define adler32_combine64 z_adler32_combine64 +# ifndef Z_SOLO +# define compress z_compress +# define compress2 z_compress2 +# define compressBound z_compressBound +# endif +# define crc32 z_crc32 +# define crc32_combine z_crc32_combine +# define crc32_combine64 z_crc32_combine64 +# define deflate z_deflate +# define deflateBound z_deflateBound +# define deflateCopy z_deflateCopy +# define deflateEnd z_deflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateInit_ z_deflateInit_ +# define deflateParams z_deflateParams +# define deflatePending z_deflatePending +# define deflatePrime z_deflatePrime +# define deflateReset z_deflateReset +# define deflateResetKeep z_deflateResetKeep +# define deflateSetDictionary z_deflateSetDictionary +# define deflateSetHeader z_deflateSetHeader +# define deflateTune z_deflateTune +# define deflate_copyright z_deflate_copyright +# define get_crc_table z_get_crc_table +# ifndef Z_SOLO +# define gz_error z_gz_error +# define gz_intmax z_gz_intmax +# define gz_strwinerror z_gz_strwinerror +# define gzbuffer z_gzbuffer +# define gzclearerr z_gzclearerr +# define gzclose z_gzclose +# define gzclose_r z_gzclose_r +# define gzclose_w z_gzclose_w +# define gzdirect z_gzdirect +# define gzdopen z_gzdopen +# define gzeof z_gzeof +# define gzerror z_gzerror +# define gzflush z_gzflush +# define gzgetc z_gzgetc +# define gzgetc_ z_gzgetc_ +# define gzgets z_gzgets +# define gzoffset z_gzoffset +# define gzoffset64 z_gzoffset64 +# define gzopen z_gzopen +# define gzopen64 z_gzopen64 +# ifdef _WIN32 +# define gzopen_w z_gzopen_w +# endif +# define gzprintf z_gzprintf +# define gzvprintf z_gzvprintf +# define gzputc z_gzputc +# define gzputs z_gzputs +# define gzread z_gzread +# define gzrewind z_gzrewind +# define gzseek z_gzseek +# define gzseek64 z_gzseek64 +# define gzsetparams z_gzsetparams +# define gztell z_gztell +# define gztell64 z_gztell64 +# define gzungetc z_gzungetc +# define gzwrite z_gzwrite +# endif +# define inflate z_inflate +# define inflateBack z_inflateBack +# define inflateBackEnd z_inflateBackEnd +# define inflateBackInit_ z_inflateBackInit_ +# define inflateCopy z_inflateCopy +# define inflateEnd z_inflateEnd +# define inflateGetHeader z_inflateGetHeader +# define inflateInit2_ z_inflateInit2_ +# define inflateInit_ z_inflateInit_ +# define inflateMark z_inflateMark +# define inflatePrime z_inflatePrime +# define inflateReset z_inflateReset +# define inflateReset2 z_inflateReset2 +# define inflateSetDictionary z_inflateSetDictionary +# define inflateGetDictionary z_inflateGetDictionary +# define inflateSync z_inflateSync +# define inflateSyncPoint z_inflateSyncPoint +# define inflateUndermine z_inflateUndermine +# define inflateResetKeep z_inflateResetKeep +# define inflate_copyright z_inflate_copyright +# define inflate_fast z_inflate_fast +# define inflate_table z_inflate_table +# ifndef Z_SOLO +# define uncompress z_uncompress +# endif +# define zError z_zError +# ifndef Z_SOLO +# define zcalloc z_zcalloc +# define zcfree z_zcfree +# endif +# define zlibCompileFlags z_zlibCompileFlags +# define zlibVersion z_zlibVersion + +/* all zlib typedefs in zlib.h and zconf.h */ +# define Byte z_Byte +# define Bytef z_Bytef +# define alloc_func z_alloc_func +# define charf z_charf +# define free_func z_free_func +# ifndef Z_SOLO +# define gzFile z_gzFile +# endif +# define gz_header z_gz_header +# define gz_headerp z_gz_headerp +# define in_func z_in_func +# define intf z_intf +# define out_func z_out_func +# define uInt z_uInt +# define uIntf z_uIntf +# define uLong z_uLong +# define uLongf z_uLongf +# define voidp z_voidp +# define voidpc z_voidpc +# define voidpf z_voidpf + +/* all zlib structs in zlib.h and zconf.h */ +# define gz_header_s z_gz_header_s +# define internal_state z_internal_state + +#endif + +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif +#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) +# define OS2 +#endif +#if defined(_WINDOWS) && !defined(WINDOWS) +# define WINDOWS +#endif +#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) +# ifndef WIN32 +# define WIN32 +# endif +#endif +#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) +# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) +# ifndef SYS16BIT +# define SYS16BIT +# endif +# endif +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#ifdef SYS16BIT +# define MAXSEG_64K +#endif +#ifdef MSDOS +# define UNALIGNED_OK +#endif + +#ifdef __STDC_VERSION__ +# ifndef STDC +# define STDC +# endif +# if __STDC_VERSION__ >= 199901L +# ifndef STDC99 +# define STDC99 +# endif +# endif +#endif +#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) +# define STDC +#endif +#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) +# define STDC +#endif +#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) +# define STDC +#endif +#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) +# define STDC +#endif + +#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const /* note: need a more gentle solution here */ +# endif +#endif + +#if defined(ZLIB_CONST) && !defined(z_const) +# define z_const const +#else +# define z_const +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2. + * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files + * created by gzip. (Files created by minigzip can still be extracted by + * gzip.) + */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + (1 << (windowBits+2)) + (1 << (memLevel+9)) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +#ifndef Z_ARG /* function prototypes for stdarg */ +# if defined(STDC) || defined(Z_HAVE_STDARG_H) +# define Z_ARG(args) args +# else +# define Z_ARG(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#ifdef SYS16BIT +# if defined(M_I86SM) || defined(M_I86MM) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR _far +# else +# define FAR far +# endif +# endif +# if (defined(__SMALL__) || defined(__MEDIUM__)) + /* Turbo C small or medium model */ +# define SMALL_MEDIUM +# ifdef __BORLANDC__ +# define FAR _far +# else +# define FAR far +# endif +# endif +#endif + +#if defined(WINDOWS) || defined(WIN32) + /* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ +# ifdef ZLIB_DLL +# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) +# ifdef ZLIB_INTERNAL +# define ZEXTERN extern __declspec(dllexport) +# else +# define ZEXTERN extern __declspec(dllimport) +# endif +# endif +# endif /* ZLIB_DLL */ + /* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ +# ifdef ZLIB_WINAPI +# ifdef FAR +# undef FAR +# endif +# include + /* No need for _export, use ZLIB.DEF instead. */ + /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +# define ZEXPORT WINAPI +# ifdef WIN32 +# define ZEXPORTVA WINAPIV +# else +# define ZEXPORTVA FAR CDECL +# endif +# endif +#endif + +#if defined (__BEOS__) +# ifdef ZLIB_DLL +# ifdef ZLIB_INTERNAL +# define ZEXPORT __declspec(dllexport) +# define ZEXPORTVA __declspec(dllexport) +# else +# define ZEXPORT __declspec(dllimport) +# define ZEXPORTVA __declspec(dllimport) +# endif +# endif +#endif + +#ifndef ZEXTERN +# define ZEXTERN extern +#endif +#ifndef ZEXPORT +# define ZEXPORT +#endif +#ifndef ZEXPORTVA +# define ZEXPORTVA +#endif + +#ifndef FAR +# define FAR +#endif + +#if !defined(__MACTYPES__) +typedef unsigned char Byte; /* 8 bits */ +#endif +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#ifdef SMALL_MEDIUM + /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void const *voidpc; + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte const *voidpc; + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + +#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) +# include +# if (UINT_MAX == 0xffffffffUL) +# define Z_U4 unsigned +# elif (ULONG_MAX == 0xffffffffUL) +# define Z_U4 unsigned long +# elif (USHRT_MAX == 0xffffffffUL) +# define Z_U4 unsigned short +# endif +#endif + +#ifdef Z_U4 + typedef Z_U4 z_crc_t; +#else + typedef unsigned long z_crc_t; +#endif + +#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_UNISTD_H +#endif + +#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ +# define Z_HAVE_STDARG_H +#endif + +#ifdef STDC +# ifndef Z_SOLO +# include /* for off_t */ +# endif +#endif + +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +# include /* for va_list */ +# endif +#endif + +#ifdef _WIN32 +# ifndef Z_SOLO +# include /* for wchar_t */ +# endif +#endif + +/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and + * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even + * though the former does not conform to the LFS document), but considering + * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as + * equivalently requesting no 64-bit operations + */ +#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 +# undef _LARGEFILE64_SOURCE +#endif + +#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) +# define Z_HAVE_UNISTD_H +#endif +#ifndef Z_SOLO +# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ +# ifdef VMS +# include /* for off_t */ +# endif +# ifndef z_off_t +# define z_off_t off_t +# endif +# endif +#endif + +#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 +# define Z_LFS64 +#endif + +#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) +# define Z_LARGE64 +#endif + +#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) +# define Z_WANT64 +#endif + +#if !defined(SEEK_SET) && !defined(Z_SOLO) +# define SEEK_SET 0 /* Seek from beginning of file. */ +# define SEEK_CUR 1 /* Seek from current position. */ +# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ +#endif + +#ifndef z_off_t +# define z_off_t long +#endif + +#if !defined(_WIN32) && defined(Z_LARGE64) +# define z_off64_t off64_t +#else +# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) +# define z_off64_t __int64 +# else +# define z_off64_t z_off_t +# endif +#endif + +/* MVS linker does not support external names larger than 8 bytes */ +#if defined(__MVS__) + #pragma map(deflateInit_,"DEIN") + #pragma map(deflateInit2_,"DEIN2") + #pragma map(deflateEnd,"DEEND") + #pragma map(deflateBound,"DEBND") + #pragma map(inflateInit_,"ININ") + #pragma map(inflateInit2_,"ININ2") + #pragma map(inflateEnd,"INEND") + #pragma map(inflateSync,"INSY") + #pragma map(inflateSetDictionary,"INSEDI") + #pragma map(compressBound,"CMBND") + #pragma map(inflate_table,"INTABL") + #pragma map(inflate_fast,"INFA") + #pragma map(inflate_copyright,"INCOPY") +#endif + +#endif /* ZCONF_H */ diff --git a/Modules/zlib/zconf.in.h b/Modules/zlib/zconf.in.h deleted file mode 100644 --- a/Modules/zlib/zconf.in.h +++ /dev/null @@ -1,332 +0,0 @@ -/* zconf.h -- configuration of the zlib compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - */ - -/* @(#) $Id$ */ - -#ifndef ZCONF_H -#define ZCONF_H - -/* - * If you *really* need a unique prefix for all types and library functions, - * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. - */ -#ifdef Z_PREFIX -# define deflateInit_ z_deflateInit_ -# define deflate z_deflate -# define deflateEnd z_deflateEnd -# define inflateInit_ z_inflateInit_ -# define inflate z_inflate -# define inflateEnd z_inflateEnd -# define deflateInit2_ z_deflateInit2_ -# define deflateSetDictionary z_deflateSetDictionary -# define deflateCopy z_deflateCopy -# define deflateReset z_deflateReset -# define deflateParams z_deflateParams -# define deflateBound z_deflateBound -# define deflatePrime z_deflatePrime -# define inflateInit2_ z_inflateInit2_ -# define inflateSetDictionary z_inflateSetDictionary -# define inflateSync z_inflateSync -# define inflateSyncPoint z_inflateSyncPoint -# define inflateCopy z_inflateCopy -# define inflateReset z_inflateReset -# define inflateBack z_inflateBack -# define inflateBackEnd z_inflateBackEnd -# define compress z_compress -# define compress2 z_compress2 -# define compressBound z_compressBound -# define uncompress z_uncompress -# define adler32 z_adler32 -# define crc32 z_crc32 -# define get_crc_table z_get_crc_table -# define zError z_zError - -# define alloc_func z_alloc_func -# define free_func z_free_func -# define in_func z_in_func -# define out_func z_out_func -# define Byte z_Byte -# define uInt z_uInt -# define uLong z_uLong -# define Bytef z_Bytef -# define charf z_charf -# define intf z_intf -# define uIntf z_uIntf -# define uLongf z_uLongf -# define voidpf z_voidpf -# define voidp z_voidp -#endif - -#if defined(__MSDOS__) && !defined(MSDOS) -# define MSDOS -#endif -#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) -# define OS2 -#endif -#if defined(_WINDOWS) && !defined(WINDOWS) -# define WINDOWS -#endif -#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) -# ifndef WIN32 -# define WIN32 -# endif -#endif -#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) -# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) -# ifndef SYS16BIT -# define SYS16BIT -# endif -# endif -#endif - -/* - * Compile with -DMAXSEG_64K if the alloc function cannot allocate more - * than 64k bytes at a time (needed on systems with 16-bit int). - */ -#ifdef SYS16BIT -# define MAXSEG_64K -#endif -#ifdef MSDOS -# define UNALIGNED_OK -#endif - -#ifdef __STDC_VERSION__ -# ifndef STDC -# define STDC -# endif -# if __STDC_VERSION__ >= 199901L -# ifndef STDC99 -# define STDC99 -# endif -# endif -#endif -#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) -# define STDC -#endif -#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) -# define STDC -#endif -#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) -# define STDC -#endif -#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) -# define STDC -#endif - -#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ -# define STDC -#endif - -#ifndef STDC -# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ -# define const /* note: need a more gentle solution here */ -# endif -#endif - -/* Some Mac compilers merge all .h files incorrectly: */ -#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) -# define NO_DUMMY_DECL -#endif - -/* Maximum value for memLevel in deflateInit2 */ -#ifndef MAX_MEM_LEVEL -# ifdef MAXSEG_64K -# define MAX_MEM_LEVEL 8 -# else -# define MAX_MEM_LEVEL 9 -# endif -#endif - -/* Maximum value for windowBits in deflateInit2 and inflateInit2. - * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files - * created by gzip. (Files created by minigzip can still be extracted by - * gzip.) - */ -#ifndef MAX_WBITS -# define MAX_WBITS 15 /* 32K LZ77 window */ -#endif - -/* The memory requirements for deflate are (in bytes): - (1 << (windowBits+2)) + (1 << (memLevel+9)) - that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) - plus a few kilobytes for small objects. For example, if you want to reduce - the default memory requirements from 256K to 128K, compile with - make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" - Of course this will generally degrade compression (there's no free lunch). - - The memory requirements for inflate are (in bytes) 1 << windowBits - that is, 32K for windowBits=15 (default value) plus a few kilobytes - for small objects. -*/ - - /* Type declarations */ - -#ifndef OF /* function prototypes */ -# ifdef STDC -# define OF(args) args -# else -# define OF(args) () -# endif -#endif - -/* The following definitions for FAR are needed only for MSDOS mixed - * model programming (small or medium model with some far allocations). - * This was tested only with MSC; for other MSDOS compilers you may have - * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, - * just define FAR to be empty. - */ -#ifdef SYS16BIT -# if defined(M_I86SM) || defined(M_I86MM) - /* MSC small or medium model */ -# define SMALL_MEDIUM -# ifdef _MSC_VER -# define FAR _far -# else -# define FAR far -# endif -# endif -# if (defined(__SMALL__) || defined(__MEDIUM__)) - /* Turbo C small or medium model */ -# define SMALL_MEDIUM -# ifdef __BORLANDC__ -# define FAR _far -# else -# define FAR far -# endif -# endif -#endif - -#if defined(WINDOWS) || defined(WIN32) - /* If building or using zlib as a DLL, define ZLIB_DLL. - * This is not mandatory, but it offers a little performance increase. - */ -# ifdef ZLIB_DLL -# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) -# ifdef ZLIB_INTERNAL -# define ZEXTERN extern __declspec(dllexport) -# else -# define ZEXTERN extern __declspec(dllimport) -# endif -# endif -# endif /* ZLIB_DLL */ - /* If building or using zlib with the WINAPI/WINAPIV calling convention, - * define ZLIB_WINAPI. - * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. - */ -# ifdef ZLIB_WINAPI -# ifdef FAR -# undef FAR -# endif -# include - /* No need for _export, use ZLIB.DEF instead. */ - /* For complete Windows compatibility, use WINAPI, not __stdcall. */ -# define ZEXPORT WINAPI -# ifdef WIN32 -# define ZEXPORTVA WINAPIV -# else -# define ZEXPORTVA FAR CDECL -# endif -# endif -#endif - -#if defined (__BEOS__) -# ifdef ZLIB_DLL -# ifdef ZLIB_INTERNAL -# define ZEXPORT __declspec(dllexport) -# define ZEXPORTVA __declspec(dllexport) -# else -# define ZEXPORT __declspec(dllimport) -# define ZEXPORTVA __declspec(dllimport) -# endif -# endif -#endif - -#ifndef ZEXTERN -# define ZEXTERN extern -#endif -#ifndef ZEXPORT -# define ZEXPORT -#endif -#ifndef ZEXPORTVA -# define ZEXPORTVA -#endif - -#ifndef FAR -# define FAR -#endif - -#if !defined(__MACTYPES__) -typedef unsigned char Byte; /* 8 bits */ -#endif -typedef unsigned int uInt; /* 16 bits or more */ -typedef unsigned long uLong; /* 32 bits or more */ - -#ifdef SMALL_MEDIUM - /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ -# define Bytef Byte FAR -#else - typedef Byte FAR Bytef; -#endif -typedef char FAR charf; -typedef int FAR intf; -typedef uInt FAR uIntf; -typedef uLong FAR uLongf; - -#ifdef STDC - typedef void const *voidpc; - typedef void FAR *voidpf; - typedef void *voidp; -#else - typedef Byte const *voidpc; - typedef Byte FAR *voidpf; - typedef Byte *voidp; -#endif - -#if 0 /* HAVE_UNISTD_H -- this line is updated by ./configure */ -# include /* for off_t */ -# include /* for SEEK_* and off_t */ -# ifdef VMS -# include /* for off_t */ -# endif -# define z_off_t off_t -#endif -#ifndef SEEK_SET -# define SEEK_SET 0 /* Seek from beginning of file. */ -# define SEEK_CUR 1 /* Seek from current position. */ -# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ -#endif -#ifndef z_off_t -# define z_off_t long -#endif - -#if defined(__OS400__) -# define NO_vsnprintf -#endif - -#if defined(__MVS__) -# define NO_vsnprintf -# ifdef FAR -# undef FAR -# endif -#endif - -/* MVS linker does not support external names larger than 8 bytes */ -#if defined(__MVS__) -# pragma map(deflateInit_,"DEIN") -# pragma map(deflateInit2_,"DEIN2") -# pragma map(deflateEnd,"DEEND") -# pragma map(deflateBound,"DEBND") -# pragma map(inflateInit_,"ININ") -# pragma map(inflateInit2_,"ININ2") -# pragma map(inflateEnd,"INEND") -# pragma map(inflateSync,"INSY") -# pragma map(inflateSetDictionary,"INSEDI") -# pragma map(compressBound,"CMBND") -# pragma map(inflate_table,"INTABL") -# pragma map(inflate_fast,"INFA") -# pragma map(inflate_copyright,"INCOPY") -#endif - -#endif /* ZCONF_H */ diff --git a/Modules/zlib/zlib.3 b/Modules/zlib/zlib.3 --- a/Modules/zlib/zlib.3 +++ b/Modules/zlib/zlib.3 @@ -1,4 +1,4 @@ -.TH ZLIB 3 "18 July 2005" +.TH ZLIB 3 "28 Apr 2013" .SH NAME zlib \- compression/decompression library .SH SYNOPSIS @@ -9,15 +9,15 @@ The .I zlib library is a general purpose data compression library. -The code is thread safe. +The code is thread safe, assuming that the standard library functions +used are thread safe, such as memory allocation routines. It provides in-memory compression and decompression functions, including integrity checks of the uncompressed data. This version of the library supports only one compression method (deflation) -but other algorithms will be added later -and will have the same stream interface. +but other algorithms may be added later +with the same stream interface. .LP Compression can be done in a single step if the buffers are large enough -(for example if an input file is mmap'ed), or can be done by repeated calls of the compression function. In the latter case, the application must provide more input and/or consume the output @@ -30,26 +30,27 @@ .LP The library does not install any signal handler. The decoder checks the consistency of the compressed data, -so the library should never crash even in case of corrupted input. +so the library should never crash even in the case of corrupted input. .LP All functions of the compression library are documented in the file .IR zlib.h . The distribution source includes examples of use of the library in the files -.I example.c +.I test/example.c and -.IR minigzip.c . +.IR test/minigzip.c, +as well as other examples in the +.IR examples/ +directory. .LP Changes to this version are documented in the file .I ChangeLog -that accompanies the source, -and are concerned primarily with bug fixes and portability enhancements. +that accompanies the source. .LP -A Java implementation of .I zlib -is available in the Java Development Kit 1.1: +is available in Java using the java.util.zip package: .IP -http://www.javasoft.com/products/JDK/1.1/docs/api/Package-java.util.zip.html +http://java.sun.com/developer/technicalArticles/Programming/compression/ .LP A Perl interface to .IR zlib , @@ -57,23 +58,20 @@ is available at CPAN (Comprehensive Perl Archive Network) sites, including: .IP -http://www.cpan.org/modules/by-module/Compress/ +http://search.cpan.org/~pmqs/IO-Compress-Zlib/ .LP A Python interface to .IR zlib , written by A.M. Kuchling (amk at magnet.com), is available in Python 1.5 and later versions: .IP -http://www.python.org/doc/lib/module-zlib.html +http://docs.python.org/library/zlib.html .LP -A .I zlib -binding for -.IR tcl (1), -written by Andreas Kupries (a.kupries at westend.com), -is availlable at: +is built into +.IR tcl: .IP -http://www.westend.com/~kupries/doc/trf/man/man.html +http://wiki.tcl.tk/4610 .LP An experimental package to read and write files in .zip format, written on top of @@ -81,40 +79,34 @@ by Gilles Vollant (info at winimage.com), is available at: .IP -http://www.winimage.com/zLibDll/unzip.html +http://www.winimage.com/zLibDll/minizip.html and also in the .I contrib/minizip directory of the main .I zlib -web site. +source distribution. .SH "SEE ALSO" The .I zlib -web site can be found at either of these locations: +web site can be found at: .IP -http://www.zlib.org -.br -http://www.gzip.org/zlib/ +http://zlib.net/ .LP The data format used by the zlib library is described by RFC (Request for Comments) 1950 to 1952 in the files: .IP -http://www.ietf.org/rfc/rfc1950.txt (concerning zlib format) +http://tools.ietf.org/html/rfc1950 (for the zlib header and trailer format) .br -http://www.ietf.org/rfc/rfc1951.txt (concerning deflate format) +http://tools.ietf.org/html/rfc1951 (for the deflate compressed data format) .br -http://www.ietf.org/rfc/rfc1952.txt (concerning gzip format) +http://tools.ietf.org/html/rfc1952 (for the gzip header and trailer format) .LP -These documents are also available in other formats from: -.IP -ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html -.LP -Mark Nelson (markn at ieee.org) wrote an article about +Mark Nelson wrote an article about .I zlib for the Jan. 1997 issue of Dr. Dobb's Journal; a copy of the article is available at: .IP -http://dogma.net/markn/articles/zlibtool/zlibtool.htm +http://marknelson.us/1997/01/01/zlib-engine/ .SH "REPORTING PROBLEMS" Before reporting a problem, please check the @@ -127,14 +119,14 @@ .I zlib FAQ at: .IP -http://www.gzip.org/zlib/zlib_faq.html +http://zlib.net/zlib_faq.html .LP before asking for help. Send questions and/or comments to zlib at gzip.org, or (for the Windows DLL version) to Gilles Vollant (info at winimage.com). .SH AUTHORS -Version 1.2.3 -Copyright (C) 1995-2005 Jean-loup Gailly (jloup at gzip.org) +Version 1.2.8 +Copyright (C) 1995-2013 Jean-loup Gailly (jloup at gzip.org) and Mark Adler (madler at alumni.caltech.edu). .LP This software is provided "as-is," diff --git a/Modules/zlib/zlib.h b/Modules/zlib/zlib.h --- a/Modules/zlib/zlib.h +++ b/Modules/zlib/zlib.h @@ -1,7 +1,7 @@ /* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.3, July 18th, 2005 + version 1.2.8, April 28th, 2013 - Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler + Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages @@ -24,8 +24,8 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt - (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). + Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 + (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). */ #ifndef ZLIB_H @@ -37,41 +37,44 @@ extern "C" { #endif -#define ZLIB_VERSION "1.2.3" -#define ZLIB_VERNUM 0x1230 +#define ZLIB_VERSION "1.2.8" +#define ZLIB_VERNUM 0x1280 +#define ZLIB_VER_MAJOR 1 +#define ZLIB_VER_MINOR 2 +#define ZLIB_VER_REVISION 8 +#define ZLIB_VER_SUBREVISION 0 /* - The 'zlib' compression library provides in-memory compression and - decompression functions, including integrity checks of the uncompressed - data. This version of the library supports only one compression method - (deflation) but other algorithms will be added later and will have the same - stream interface. + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed data. + This version of the library supports only one compression method (deflation) + but other algorithms will be added later and will have the same stream + interface. - Compression can be done in a single step if the buffers are large - enough (for example if an input file is mmap'ed), or can be done by - repeated calls of the compression function. In the latter case, the - application must provide more input and/or consume the output + Compression can be done in a single step if the buffers are large enough, + or can be done by repeated calls of the compression function. In the latter + case, the application must provide more input and/or consume the output (providing more output space) before each call. - The compressed data format used by default by the in-memory functions is + The compressed data format used by default by the in-memory functions is the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped around a deflate stream, which is itself documented in RFC 1951. - The library also supports reading and writing files in gzip (.gz) format + The library also supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. - This library can optionally read and write gzip streams in memory as well. + This library can optionally read and write gzip streams in memory as well. - The zlib format was designed to be compact and fast for use in memory + The zlib format was designed to be compact and fast for use in memory and on communications channels. The gzip format was designed for single- file compression on file systems, has a larger header than zlib to maintain directory information, and uses a different, slower check method than zlib. - The library does not install any signal handler. The decoder checks - the consistency of the compressed data, so the library should never - crash even in case of corrupted input. + The library does not install any signal handler. The decoder checks + the consistency of the compressed data, so the library should never crash + even in case of corrupted input. */ typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); @@ -80,15 +83,15 @@ struct internal_state; typedef struct z_stream_s { - Bytef *next_in; /* next input byte */ + z_const Bytef *next_in; /* next input byte */ uInt avail_in; /* number of bytes available at next_in */ - uLong total_in; /* total nb of input bytes read so far */ + uLong total_in; /* total number of input bytes read so far */ Bytef *next_out; /* next output byte should be put there */ uInt avail_out; /* remaining free space at next_out */ - uLong total_out; /* total nb of bytes output so far */ + uLong total_out; /* total number of bytes output so far */ - char *msg; /* last error message, NULL if no error */ + z_const char *msg; /* last error message, NULL if no error */ struct internal_state FAR *state; /* not visible by applications */ alloc_func zalloc; /* used to allocate the internal state */ @@ -126,45 +129,45 @@ typedef gz_header FAR *gz_headerp; /* - The application must update next_in and avail_in when avail_in has - dropped to zero. It must update next_out and avail_out when avail_out - has dropped to zero. The application must initialize zalloc, zfree and - opaque before calling the init function. All other fields are set by the - compression library and must not be updated by the application. + The application must update next_in and avail_in when avail_in has dropped + to zero. It must update next_out and avail_out when avail_out has dropped + to zero. The application must initialize zalloc, zfree and opaque before + calling the init function. All other fields are set by the compression + library and must not be updated by the application. - The opaque value provided by the application will be passed as the first - parameter for calls of zalloc and zfree. This can be useful for custom - memory management. The compression library attaches no meaning to the + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the opaque value. - zalloc must return Z_NULL if there is not enough memory for the object. + zalloc must return Z_NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. - On 16-bit systems, the functions zalloc and zfree must be able to allocate - exactly 65536 bytes, but will not be required to allocate more than this - if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, - pointers returned by zalloc for objects of exactly 65536 bytes *must* - have their offset normalized to zero. The default allocation function - provided by this library ensures this (see zutil.c). To reduce memory - requirements and avoid any allocation of 64K objects, at the expense of - compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this if + the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers + returned by zalloc for objects of exactly 65536 bytes *must* have their + offset normalized to zero. The default allocation function provided by this + library ensures this (see zutil.c). To reduce memory requirements and avoid + any allocation of 64K objects, at the expense of compression ratio, compile + the library with -DMAX_WBITS=14 (see zconf.h). - The fields total_in and total_out can be used for statistics or - progress reports. After compression, total_in holds the total size of - the uncompressed data and may be saved for use in the decompressor - (particularly if the decompressor wants to decompress everything in - a single step). + The fields total_in and total_out can be used for statistics or progress + reports. After compression, total_in holds the total size of the + uncompressed data and may be saved for use in the decompressor (particularly + if the decompressor wants to decompress everything in a single step). */ /* constants */ #define Z_NO_FLUSH 0 -#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */ +#define Z_PARTIAL_FLUSH 1 #define Z_SYNC_FLUSH 2 #define Z_FULL_FLUSH 3 #define Z_FINISH 4 #define Z_BLOCK 5 +#define Z_TREES 6 /* Allowed flush values; see deflate() and inflate() below for details */ #define Z_OK 0 @@ -176,8 +179,8 @@ #define Z_MEM_ERROR (-4) #define Z_BUF_ERROR (-5) #define Z_VERSION_ERROR (-6) -/* Return codes for the compression/decompression functions. Negative - * values are errors, positive values are used for special but normal events. +/* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. */ #define Z_NO_COMPRESSION 0 @@ -207,119 +210,141 @@ #define zlib_version zlibVersion() /* for compatibility with versions < 1.0.2 */ + /* basic functions */ ZEXTERN const char * ZEXPORT zlibVersion OF((void)); /* The application can compare zlibVersion and ZLIB_VERSION for consistency. - If the first character differs, the library code actually used is - not compatible with the zlib.h header file used by the application. - This check is automatically made by deflateInit and inflateInit. + If the first character differs, the library code actually used is not + compatible with the zlib.h header file used by the application. This check + is automatically made by deflateInit and inflateInit. */ /* ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. - If zalloc and zfree are set to Z_NULL, deflateInit updates them to - use default allocation functions. + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, deflateInit updates them to use default + allocation functions. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at - all (the input data is simply copied a block at a time). - Z_DEFAULT_COMPRESSION requests a default compromise between speed and - compression (currently equivalent to level 6). + 1 gives best speed, 9 gives best compression, 0 gives no compression at all + (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION + requests a default compromise between speed and compression (currently + equivalent to level 6). - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if level is not a valid compression level, + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if level is not a valid compression level, or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). - msg is set to null if there is no error message. deflateInit does not - perform any compression: this will be done by deflate(). + with the version assumed by the caller (ZLIB_VERSION). msg is set to null + if there is no error message. deflateInit does not perform any compression: + this will be done by deflate(). */ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); /* deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce some - output latency (reading input without producing any output) except when + buffer becomes empty or the output buffer becomes full. It may introduce + some output latency (reading input without producing any output) except when forced to flush. - The detailed semantics are as follows. deflate performs one or both of the + The detailed semantics are as follows. deflate performs one or both of the following actions: - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not + accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in and avail_in are updated and processing will resume at this point for the next call of deflate(). - Provide more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. + accordingly. This action is forced if the parameter flush is non zero. Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary (in interactive applications). - Some output may be provided even if flush is not set. + should be set only when necessary (in interactive applications). Some + output may be provided even if flush is not set. - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating avail_in or avail_out accordingly; avail_out - should never be zero before the call. The application can consume the - compressed output when it wants, for example when the output buffer is full - (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK - and with zero avail_out, it must be called again after making room in the - output buffer because there might be more output pending. + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating avail_in or avail_out accordingly; avail_out should + never be zero before the call. The application can consume the compressed + output when it wants, for example when the output buffer is full (avail_out + == 0), or after each call of deflate(). If deflate returns Z_OK and with + zero avail_out, it must be called again after making room in the output + buffer because there might be more output pending. Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumualte before producing output, in order to + decide how much data to accumulate before producing output, in order to maximize compression. If the parameter flush is set to Z_SYNC_FLUSH, all pending output is flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In particular - avail_in is zero after the call if enough output space has been provided - before the call.) Flushing may degrade compression for some compression - algorithms and so it should be used only when necessary. + that the decompressor can get all input data available so far. (In + particular avail_in is zero after the call if enough output space has been + provided before the call.) Flushing may degrade compression for some + compression algorithms and so it should be used only when necessary. This + completes the current deflate block and follows it with an empty stored block + that is three bits plus filler bits to the next byte, followed by four bytes + (00 00 ff ff). + + If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the + output buffer, but the output is not aligned to a byte boundary. All of the + input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. + This completes the current deflate block and follows it with an empty fixed + codes block that is 10 bits long. This assures that enough bytes are output + in order for the decompressor to finish the block before the empty fixed code + block. + + If flush is set to Z_BLOCK, a deflate block is completed and emitted, as + for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to + seven bits of the current block are held to be written as the next byte after + the next deflate block is completed. In this case, the decompressor may not + be provided enough bits at this point in order to complete decompression of + the data provided so far to the compressor. It may need to wait for the next + block to be emitted. This is for advanced applications that need to control + the emission of deflate blocks. If flush is set to Z_FULL_FLUSH, all output is flushed as with Z_SYNC_FLUSH, and the compression state is reset so that decompression can restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + random access is desired. Using Z_FULL_FLUSH too often can seriously degrade compression. If deflate returns with avail_out == 0, this function must be called again with the same value of the flush parameter and more output space (updated avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that avail_out is greater than six to avoid repeated flush markers due to avail_out == 0 on return. If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there - was enough output space; if deflate returns with Z_OK, this function must be + pending output is flushed and deflate returns with Z_STREAM_END if there was + enough output space; if deflate returns with Z_OK, this function must be called again with Z_FINISH and more output space (updated avail_out) but no - more input data, until it returns with Z_STREAM_END or an error. After - deflate has returned Z_STREAM_END, the only possible operations on the - stream are deflateReset or deflateEnd. + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the stream + are deflateReset or deflateEnd. Z_FINISH can be used immediately after deflateInit if all the compression - is to be done in a single step. In this case, avail_out must be at least - the value returned by deflateBound (see below). If deflate does not return - Z_STREAM_END, then it must be called again as described above. + is to be done in a single step. In this case, avail_out must be at least the + value returned by deflateBound (see below). Then deflate is guaranteed to + return Z_STREAM_END. If not enough output space is provided, deflate will + not return Z_STREAM_END, and it must be called again as described above. deflate() sets strm->adler to the adler32 checksum of all input read so far (that is, total_in bytes). deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered - binary. This field is only for information purposes and does not affect - the compression algorithm in any manner. + the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered + binary. This field is only for information purposes and does not affect the + compression algorithm in any manner. deflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if all input has been consumed and all output has been produced (only when flush is set to Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible - (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not + if next_in or next_out was Z_NULL), Z_BUF_ERROR if no progress is possible + (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and deflate() can be called again with more input and more output space to continue compressing. */ @@ -328,13 +353,13 @@ ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. + This function discards any unprocessed input and does not flush any pending + output. deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent, Z_DATA_ERROR if the stream was freed - prematurely (some input or output was discarded). In the error case, - msg may be set but then points to a static string (which must not be + prematurely (some input or output was discarded). In the error case, msg + may be set but then points to a static string (which must not be deallocated). */ @@ -342,10 +367,10 @@ /* ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); - Initializes the internal stream state for decompression. The fields + Initializes the internal stream state for decompression. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. If next_in is not Z_NULL and avail_in is large enough (the exact - value depends on the compression method), inflateInit determines the + the caller. If next_in is not Z_NULL and avail_in is large enough (the + exact value depends on the compression method), inflateInit determines the compression method from the zlib header and allocates all data structures accordingly; otherwise the allocation will be deferred to the first call of inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to @@ -353,95 +378,116 @@ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller. msg is set to null if there is no error - message. inflateInit does not perform any decompression apart from reading - the zlib header if present: this will be done by inflate(). (So next_in and - avail_in may be modified, but next_out and avail_out are unchanged.) + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit() does not process any header information -- that is deferred + until inflate() is called. */ ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); /* inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce + buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. - The detailed semantics are as follows. inflate performs one or both of the + The detailed semantics are as follows. inflate performs one or both of the following actions: - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in is updated and processing - will resume at this point for the next call of inflate(). + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing will + resume at this point for the next call of inflate(). - Provide more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there - is no more input data or no more space in the output buffer (see below - about the flush parameter). + accordingly. inflate() provides as much output as possible, until there is + no more input data or no more space in the output buffer (see below about + the flush parameter). - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating the next_* and avail_* values accordingly. - The application can consume the uncompressed output when it wants, for - example when the output buffer is full (avail_out == 0), or after each - call of inflate(). If inflate returns Z_OK and with zero avail_out, it - must be called again after making room in the output buffer because there - might be more output pending. + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming more + output, and updating the next_* and avail_* values accordingly. The + application can consume the uncompressed output when it wants, for example + when the output buffer is full (avail_out == 0), or after each call of + inflate(). If inflate returns Z_OK and with zero avail_out, it must be + called again after making room in the output buffer because there might be + more output pending. - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, - Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() stop - if and when it gets to the next deflate block boundary. When decoding the - zlib or gzip format, this will cause inflate() to return immediately after - the header and before the first block. When doing a raw inflate, inflate() - will go ahead and process the first block, and will return when it gets to - the end of that block, or when it runs out of data. + The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, + Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much + output as possible to the output buffer. Z_BLOCK requests that inflate() + stop if and when it gets to the next deflate block boundary. When decoding + the zlib or gzip format, this will cause inflate() to return immediately + after the header and before the first block. When doing a raw inflate, + inflate() will go ahead and process the first block, and will return when it + gets to the end of that block, or when it runs out of data. The Z_BLOCK option assists in appending to or combining deflate streams. Also to assist in this, on return inflate() will set strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 - if inflate() is currently decoding the last block in the deflate stream, - plus 128 if inflate() returned immediately after decoding an end-of-block - code or decoding the complete header up to just before the first byte of the - deflate stream. The end-of-block will not be indicated until all of the - uncompressed data from that block has been written to strm->next_out. The - number of unused bits may in general be greater than seven, except when - bit 7 of data_type is set, in which case the number of unused bits will be - less than eight. + number of unused bits in the last byte taken from strm->next_in, plus 64 if + inflate() is currently decoding the last block in the deflate stream, plus + 128 if inflate() returned immediately after decoding an end-of-block code or + decoding the complete header up to just before the first byte of the deflate + stream. The end-of-block will not be indicated until all of the uncompressed + data from that block has been written to strm->next_out. The number of + unused bits may in general be greater than seven, except when bit 7 of + data_type is set, in which case the number of unused bits will be less than + eight. data_type is set as noted here every time inflate() returns for all + flush options, and so can be used to determine the amount of currently + consumed input in bits. + + The Z_TREES option behaves as Z_BLOCK does, but it also returns when the + end of each deflate block header is reached, before any actual data in that + block is decoded. This allows the caller to determine the length of the + deflate block header for later use in random access within a deflate block. + 256 is added to the value of strm->data_type when inflate() returns + immediately after reaching the end of the deflate block header. inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step - (a single call of inflate), the parameter flush should be set to - Z_FINISH. In this case all pending input is processed and all pending - output is flushed; avail_out must be large enough to hold all the - uncompressed data. (The size of the uncompressed data may have been saved - by the compressor for this purpose.) The next operation on this stream must - be inflateEnd to deallocate the decompression state. The use of Z_FINISH - is never required, but can be used to inform inflate that a faster approach - may be used for the single inflate() call. + error. However if all decompression is to be performed in a single step (a + single call of inflate), the parameter flush should be set to Z_FINISH. In + this case all pending input is processed and all pending output is flushed; + avail_out must be large enough to hold all of the uncompressed data for the + operation to complete. (The size of the uncompressed data may have been + saved by the compressor for this purpose.) The use of Z_FINISH is not + required to perform an inflation in one step. However it may be used to + inform inflate that a faster approach can be used for the single inflate() + call. Z_FINISH also informs inflate to not maintain a sliding window if the + stream completes, which reduces inflate's memory footprint. If the stream + does not complete, either because not all of the stream is provided or not + enough output space is provided, then a sliding window will be allocated and + inflate() can be called again to continue the operation as if Z_NO_FLUSH had + been used. In this implementation, inflate() always flushes as much output as possible to the output buffer, and always uses the faster approach on the - first call. So the only effect of the flush parameter in this implementation - is on the return value of inflate(), as noted below, or when it returns early - because Z_BLOCK is used. + first call. So the effects of the flush parameter in this implementation are + on the return value of inflate() as noted below, when inflate() returns early + when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of + memory for a sliding window when Z_FINISH is used. If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the adler32 checksum of the dictionary + below), inflate sets strm->adler to the Adler-32 checksum of the dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the adler32 checksum of all output produced so far (that is, + strm->adler to the Adler-32 checksum of all output produced so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed adler32 + below. At the end of the stream, inflate() checks that its computed adler32 checksum is equal to that saved by the compressor and returns Z_STREAM_END only if the checksum is correct. - inflate() will decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically. Any information - contained in the gzip header is not retained, so applications that need that - information should instead use raw inflate, see inflateInit2() below, or - inflateBack() and perform their own processing of the gzip header and - trailer. + inflate() can decompress and check either zlib-wrapped or gzip-wrapped + deflate data. The header type is detected automatically, if requested when + initializing with inflateInit2(). Any information contained in the gzip + header is not retained, so applications that need that information should + instead use raw inflate, see inflateInit2() below, or inflateBack() and + perform their own processing of the gzip header and trailer. When processing + gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output + producted so far. The CRC-32 is checked against the gzip trailer. inflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if the end of the compressed data has @@ -449,27 +495,28 @@ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was corrupted (input stream not conforming to the zlib format or incorrect check value), Z_STREAM_ERROR if the stream structure was inconsistent (for example - if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, + next_in or next_out was Z_NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no progress is possible or if there was not enough room in the - output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may then - call inflateSync() to look for a good compression block if a partial recovery - of the data is desired. + continue decompressing. If Z_DATA_ERROR is returned, the application may + then call inflateSync() to look for a good compression block if a partial + recovery of the data is desired. */ ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. + This function discards any unprocessed input and does not flush any pending + output. inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state - was inconsistent. In the error case, msg may be set but then points to a + was inconsistent. In the error case, msg may be set but then points to a static string (which must not be deallocated). */ + /* Advanced functions */ /* @@ -484,55 +531,57 @@ int memLevel, int strategy)); - This is another version of deflateInit with more compression options. The - fields next_in, zalloc, zfree and opaque must be initialized before by - the caller. + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by the + caller. - The method parameter is the compression method. It must be Z_DEFLATED in + The method parameter is the compression method. It must be Z_DEFLATED in this version of the library. The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if + (the size of the history buffer). It should be in the range 8..15 for this + version of the library. Larger values of this parameter result in better + compression at the expense of memory usage. The default value is 15 if deflateInit is used instead. - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data + windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + determines the window size. deflate() will then generate raw deflate data with no zlib header or trailer, and will not compute an adler32 check value. - windowBits can also be greater than 15 for optional gzip encoding. Add + windowBits can also be greater than 15 for optional gzip encoding. Add 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), - no header crc, and the operating system will be set to 255 (unknown). If a + compressed data instead of a zlib wrapper. The gzip header will have no + file name, no extra data, no comment, no modification time (set to zero), no + header crc, and the operating system will be set to 255 (unknown). If a gzip stream is being written, strm->adler is a crc32 instead of an adler32. The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but - is slow and reduces compression ratio; memLevel=9 uses maximum memory - for optimal speed. The default value is 8. See zconf.h for total memory - usage as a function of windowBits and memLevel. + for the internal compression state. memLevel=1 uses minimum memory but is + slow and reduces compression ratio; memLevel=9 uses maximum memory for + optimal speed. The default value is 8. See zconf.h for total memory usage + as a function of windowBits and memLevel. - The strategy parameter is used to tune the compression algorithm. Use the + The strategy parameter is used to tune the compression algorithm. Use the value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman + encoding). Filtered data consists mostly of small values with a somewhat + random distribution. In this case, the compression algorithm is tuned to + compress them better. The effect of Z_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between - Z_DEFAULT and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as - Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy - parameter only affects the compression ratio but not the correctness of the - compressed output even if it is not set appropriately. Z_FIXED prevents the - use of dynamic Huffman codes, allowing for a simpler decoder for special - applications. + Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as + fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The + strategy parameter only affects the compression ratio but not the + correctness of the compressed output even if it is not set appropriately. + Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler + decoder for special applications. - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid - method). msg is set to null if there is no error message. deflateInit2 does - not perform any compression: this will be done by deflate(). + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid + method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is + incompatible with the version assumed by the caller (ZLIB_VERSION). msg is + set to null if there is no error message. deflateInit2 does not perform any + compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, @@ -540,38 +589,43 @@ uInt dictLength)); /* Initializes the compression dictionary from the given byte sequence - without producing any compressed output. This function must be called - immediately after deflateInit, deflateInit2 or deflateReset, before any - call of deflate. The compressor and decompressor must use exactly the same - dictionary (see inflateSetDictionary). + without producing any compressed output. When using the zlib format, this + function must be called immediately after deflateInit, deflateInit2 or + deflateReset, and before any call of deflate. When doing raw deflate, this + function must be called either before any call of deflate, or immediately + after the completion of a deflate block, i.e. after all input has been + consumed and all output has been delivered when using any of the flush + options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The + compressor and decompressor must use exactly the same dictionary (see + inflateSetDictionary). The dictionary should consist of strings (byte sequences) that are likely to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a + used strings preferably put towards the end of the dictionary. Using a dictionary is most useful when the data to be compressed is short and can be predicted with good accuracy; the data can then be compressed better than with the default empty dictionary. Depending on the size of the compression data structures selected by deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size in - deflate or deflate2. Thus the strings most likely to be useful should be - put at the end of the dictionary, not at the front. In addition, the - current implementation of deflate will use at most the window size minus - 262 bytes of the provided dictionary. + discarded, for example if the dictionary is larger than the window size + provided in deflateInit or deflateInit2. Thus the strings most likely to be + useful should be put at the end of the dictionary, not at the front. In + addition, the current implementation of deflate will use at most the window + size minus 262 bytes of the provided dictionary. Upon return of this function, strm->adler is set to the adler32 value of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The adler32 value + which dictionary has been used by the compressor. (The adler32 value applies to the whole dictionary even if only a subset of the dictionary is actually used by the compressor.) If a raw deflate was requested, then the adler32 value is not computed and strm->adler is not set. deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent (for example if deflate has already been called for this stream - or if the compression method is bsort). deflateSetDictionary does not - perform any compression: this will be done by deflate(). + or if not at a block boundary for raw deflate). deflateSetDictionary does + not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, @@ -581,26 +635,26 @@ This function can be useful when several compression strategies will be tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed + data with a filter. The streams that will be discarded should then be freed by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and - can consume lots of memory. + compression state which can be quite large, so this strategy is slow and can + consume lots of memory. deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and + (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); /* This function is equivalent to deflateEnd followed by deflateInit, - but does not free and reallocate all the internal compression state. - The stream will keep the same compression level and any other attributes - that may have been set by deflateInit2. + but does not free and reallocate all the internal compression state. The + stream will keep the same compression level and any other attributes that + may have been set by deflateInit2. - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, @@ -610,18 +664,18 @@ Dynamically update the compression level and compression strategy. The interpretation of level and strategy is as in deflateInit2. This can be used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different - strategy. If the compression level is changed, the input available so far - is compressed with the old level (and may be flushed); the new level will - take effect only at the next call of deflate(). + to switch to a different kind of input data requiring a different strategy. + If the compression level is changed, the input available so far is + compressed with the old level (and may be flushed); the new level will take + effect only at the next call of deflate(). Before the call of deflateParams, the stream state must be set as for - a call of deflate(), since the currently available input may have to - be compressed and flushed. In particular, strm->avail_out must be non-zero. + a call of deflate(), since the currently available input may have to be + compressed and flushed. In particular, strm->avail_out must be non-zero. deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source - stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR - if strm->avail_out was zero. + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR if + strm->avail_out was zero. */ ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, @@ -645,31 +699,53 @@ uLong sourceLen)); /* deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() - or deflateInit2(). This would be used to allocate an output buffer - for deflation in a single pass, and so would be called before deflate(). + deflation of sourceLen bytes. It must be called after deflateInit() or + deflateInit2(), and after deflateSetHeader(), if used. This would be used + to allocate an output buffer for deflation in a single pass, and so would be + called before deflate(). If that first deflate() call is provided the + sourceLen input bytes, an output buffer allocated to the size returned by + deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed + to return Z_STREAM_END. Note that it is possible for the compressed size to + be larger than the value returned by deflateBound() if flush options other + than Z_FINISH or Z_NO_FLUSH are used. */ +ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, + unsigned *pending, + int *bits)); +/* + deflatePending() returns the number of bytes and bits of output that have + been generated, but not yet provided in the available output. The bytes not + provided would be due to the available output space having being consumed. + The number of bits of output not provided are between 0 and 7, where they + await more bits to join them in order to fill out a full byte. If pending + or bits are Z_NULL, then those values are not set. + + deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, int bits, int value)); /* deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the - bits leftover from a previous deflate stream when appending to it. As such, - this function can only be used for raw deflate, and must be used before the - first deflate() call after a deflateInit2() or deflateReset(). bits must be - less than or equal to 16, and that many of the least significant bits of - value will be inserted in the output. + is that this function is used to start off the deflate output with the bits + leftover from a previous deflate stream when appending to it. As such, this + function can only be used for raw deflate, and must be used before the first + deflate() call after a deflateInit2() or deflateReset(). bits must be less + than or equal to 16, and that many of the least significant bits of value + will be inserted in the output. - deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. + deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough + room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the + source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, gz_headerp head)); /* - deflateSetHeader() provides gzip header information for when a gzip + deflateSetHeader() provides gzip header information for when a gzip stream is requested by deflateInit2(). deflateSetHeader() may be called after deflateInit2() or deflateReset() and before the first call of deflate(). The text, time, os, extra field, name, and comment information @@ -682,11 +758,11 @@ 1.3.x) do not support header crc's, and will report that it is a "multi-part gzip file" and give up. - If deflateSetHeader is not used, the default gzip header has text false, + If deflateSetHeader is not used, the default gzip header has text false, the time set to zero, and os set to 255, with no extra, name, or comment fields. The gzip header is returned to the default state by deflateReset(). - deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ @@ -694,43 +770,50 @@ ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, int windowBits)); - This is another version of inflateInit with an extra parameter. The + This is another version of inflateInit with an extra parameter. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. The windowBits parameter is the base two logarithm of the maximum window size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value + this version of the library. The default value is 15 if inflateInit is used + instead. windowBits must be greater than or equal to the windowBits value provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window + deflateInit2() was not used. If a compressed stream with a larger window size is given as input, inflate() will return with the error code Z_DATA_ERROR instead of trying to allocate a larger window. - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, + windowBits can also be zero to request that inflate use the window size in + the zlib header of the compressed stream. + + windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + determines the window size. inflate() will then process raw deflate data, not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This + looking for any check values for comparison at the end of the stream. This is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom + such as zip. Those formats provide their own check values. If a custom format is developed using the raw deflate format for compressed data, it is recommended that a check value such as an adler32 or a crc32 be applied to the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments + most applications, the zlib format should be used as is. Note that comments above on the use in deflateInit2() applies to the magnitude of windowBits. - windowBits can also be greater than 15 for optional gzip decoding. Add + windowBits can also be greater than 15 for optional gzip decoding. Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is - a crc32 instead of an adler32. + return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a + crc32 instead of an adler32. inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg - is set to null if there is no error message. inflateInit2 does not perform - any decompression apart from reading the zlib header if present: this will - be done by inflate(). (So next_in and avail_in may be modified, but next_out - and avail_out are unchanged.) + memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + version assumed by the caller, or Z_STREAM_ERROR if the parameters are + invalid, such as a null pointer to the structure. msg is set to null if + there is no error message. inflateInit2 does not perform any decompression + apart from possibly reading the zlib header if present: actual decompression + will be done by inflate(). (So next_in and avail_in may be modified, but + next_out and avail_out are unused and unchanged.) The current implementation + of inflateInit2() does not process any header information -- that is + deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, @@ -738,36 +821,56 @@ uInt dictLength)); /* Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + sequence. This function must be called immediately after a call of inflate, + if that call returned Z_NEED_DICT. The dictionary chosen by the compressor can be determined from the adler32 value returned by that call of inflate. The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called - immediately after inflateInit2() or inflateReset() and before any call of - inflate() to set the dictionary. The application must insure that the - dictionary that was used for compression is provided. + deflateSetDictionary). For raw inflate, this function can be called at any + time to set the dictionary. If the provided dictionary is smaller than the + window and there is already data in the window, then the provided dictionary + will amend what's there. The application must insure that the dictionary + that was used for compression is provided. inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is + parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect adler32 value). inflateSetDictionary does not + expected one (incorrect adler32 value). inflateSetDictionary does not perform any decompression: this will be done by subsequent calls of inflate(). */ +ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, + Bytef *dictionary, + uInt *dictLength)); +/* + Returns the sliding dictionary being maintained by inflate. dictLength is + set to the number of bytes in the dictionary, and that many bytes are copied + to dictionary. dictionary must have enough space, where 32768 bytes is + always enough. If inflateGetDictionary() is called with dictionary equal to + Z_NULL, then only the dictionary length is returned, and nothing is copied. + Similary, if dictLength is Z_NULL, then it is not set. + + inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the + stream state is inconsistent. +*/ + ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); /* - Skips invalid compressed data until a full flush point (see above the - description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. + Skips invalid compressed data until a possible full flush point (see above + for the description of deflate with Z_FULL_FLUSH) can be found, or until all + available input is skipped. No output is provided. - inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR - if no more input was provided, Z_DATA_ERROR if no flush point has been found, - or Z_STREAM_ERROR if the stream structure was inconsistent. In the success - case, the application may save the current value of total_in which - indicates where valid compressed data was found. In the error case, the - application may repeatedly call inflateSync, providing more input each time, - until success or end of the input data. + inflateSync searches for a 00 00 FF FF pattern in the compressed data. + All full flush points have this pattern, but not all occurrences of this + pattern are full flush points. + + inflateSync returns Z_OK if a possible full flush point has been found, + Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point + has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. + In the success case, the application may save the current current value of + total_in which indicates where valid compressed data was found. In the + error case, the application may repeatedly call inflateSync, providing more + input each time, until success or end of the input data. */ ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, @@ -782,18 +885,30 @@ inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and + (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); /* This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate all the internal decompression state. - The stream will keep attributes that may have been set by inflateInit2. + but does not free and reallocate all the internal decompression state. The + stream will keep attributes that may have been set by inflateInit2. - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). + inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL). +*/ + +ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, + int windowBits)); +/* + This function is the same as inflateReset, but it also permits changing + the wrap and window size requests. The windowBits parameter is interpreted + the same as it is for inflateInit2. + + inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being Z_NULL), or if + the windowBits parameter is invalid. */ ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, @@ -801,54 +916,87 @@ int value)); /* This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. + that this function is used to start inflating at a bit position in the + middle of a byte. The provided bits will be used before any bytes are used + from next_in. This function should only be used with raw inflate, and + should be used before the first inflate() call after inflateInit2() or + inflateReset(). bits must be less than or equal to 16, and that many of the + least significant bits of value will be inserted in the input. - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + If bits is negative, then the input stream bit buffer is emptied. Then + inflatePrime() can be called again to put bits in the buffer. This is used + to clear out bits leftover after feeding inflate a block description prior + to feeding inflate codes. + + inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ +ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); +/* + This function returns two values, one in the lower 16 bits of the return + value, and the other in the remaining upper bits, obtained by shifting the + return value down 16 bits. If the upper value is -1 and the lower value is + zero, then inflate() is currently decoding information outside of a block. + If the upper value is -1 and the lower value is non-zero, then inflate is in + the middle of a stored block, with the lower value equaling the number of + bytes from the input remaining to copy. If the upper value is not -1, then + it is the number of bits back from the current bit position in the input of + the code (literal or length/distance pair) currently being processed. In + that case the lower value is the number of bytes already emitted for that + code. + + A code is being processed if inflate is waiting for more input to complete + decoding of the code, or if it has completed decoding but is waiting for + more output space to write the literal or match data. + + inflateMark() is used to mark locations in the input data for random + access, which may be at bit positions, and to note those cases where the + output of a code may span boundaries of random access blocks. The current + location in the input stream can be determined from avail_in and data_type + as noted in the description for the Z_BLOCK flush parameter for inflate. + + inflateMark returns the value noted above or -1 << 16 if the provided + source stream state was inconsistent. +*/ + ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, gz_headerp head)); /* - inflateGetHeader() requests that gzip header information be stored in the + inflateGetHeader() requests that gzip header information be stored in the provided gz_header structure. inflateGetHeader() may be called after inflateInit2() or inflateReset(), and before the first call of inflate(). As inflate() processes the gzip stream, head->done is zero until the header is completed, at which time head->done is set to one. If a zlib stream is being decoded, then head->done is set to -1 to indicate that there will be - no gzip header information forthcoming. Note that Z_BLOCK can be used to - force inflate() to return immediately after header processing is complete - and before any actual data is decompressed. + no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be + used to force inflate() to return immediately after header processing is + complete and before any actual data is decompressed. - The text, time, xflags, and os fields are filled in with the gzip header + The text, time, xflags, and os fields are filled in with the gzip header contents. hcrc is set to true if there is a header CRC. (The header CRC - was valid if done is set to one.) If extra is not Z_NULL, then extra_max + was valid if done is set to one.) If extra is not Z_NULL, then extra_max contains the maximum number of bytes to write to extra. Once done is true, extra_len contains the actual extra field length, and extra contains the extra field, or that field truncated if extra_max is less than extra_len. If name is not Z_NULL, then up to name_max characters are written there, terminated with a zero unless the length is greater than name_max. If comment is not Z_NULL, then up to comm_max characters are written there, - terminated with a zero unless the length is greater than comm_max. When - any of extra, name, or comment are not Z_NULL and the respective field is - not present in the header, then that field is set to Z_NULL to signal its + terminated with a zero unless the length is greater than comm_max. When any + of extra, name, or comment are not Z_NULL and the respective field is not + present in the header, then that field is set to Z_NULL to signal its absence. This allows the use of deflateSetHeader() with the returned structure to duplicate the header. However if those fields are set to allocated memory, then the application will need to save those pointers elsewhere so that they can be eventually freed. - If inflateGetHeader is not used, then the header information is simply + If inflateGetHeader is not used, then the header information is simply discarded. The header is always checked for validity, including the header CRC if present. inflateReset() will reset the process to discard the header information. The application would need to call inflateGetHeader() again to retrieve the header from the next gzip stream. - inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ @@ -869,12 +1017,13 @@ See inflateBack() for the usage of these routines. inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of - the paramaters are invalid, Z_MEM_ERROR if the internal state could not - be allocated, or Z_VERSION_ERROR if the version of the library does not - match the version of the header file. + the parameters are invalid, Z_MEM_ERROR if the internal state could not be + allocated, or Z_VERSION_ERROR if the version of the library does not match + the version of the header file. */ -typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *)); +typedef unsigned (*in_func) OF((void FAR *, + z_const unsigned char FAR * FAR *)); typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, @@ -882,24 +1031,25 @@ out_func out, void FAR *out_desc)); /* inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is more efficient than inflate() for - file i/o applications in that it avoids copying between the output and the - sliding window by simply making the window itself the output buffer. This - function trusts the application to not change the output buffer passed by - the output function, at least until inflateBack() returns. + interface for input and output. This is potentially more efficient than + inflate() for file i/o applications, in that it avoids copying between the + output and the sliding window by simply making the window itself the output + buffer. inflate() can be faster on modern CPUs when used with large + buffers. inflateBack() trusts the application to not change the output + buffer passed by the output function, at least until inflateBack() returns. inflateBackInit() must be called first to allocate the internal state and to initialize the state with the user-provided window buffer. inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free - the allocated state. + deflate stream with each call. inflateBackEnd() is then called to free the + allocated state. A raw deflate stream is one with no zlib or gzip header or trailer. This routine would normally be used in a utility that reads zip or gzip files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects - only the raw deflate stream to decompress. This is different from the - normal behavior of inflate(), which expects either a zlib or gzip header and + header and process the trailer on its own, hence this routine expects only + the raw deflate stream to decompress. This is different from the normal + behavior of inflate(), which expects either a zlib or gzip header and trailer around the deflate stream. inflateBack() uses two subroutines supplied by the caller that are then @@ -925,7 +1075,7 @@ calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + initially be taken from strm->next_in[0 .. strm->avail_in - 1]. The in_desc and out_desc parameters of inflateBack() is passed as the first parameter of in() and out() respectively when they are called. These @@ -935,15 +1085,15 @@ On return, inflateBack() will set strm->next_in and strm->avail_in to pass back any unused input that was provided by the last in() call. The return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format - error in the deflate stream (in which case strm->msg is set to indicate the - nature of the error), or Z_STREAM_ERROR if the stream was not properly - initialized. In the case of Z_BUF_ERROR, an input or output error can be - distinguished using strm->next_in which will be Z_NULL only if in() returned - an error. If strm->next is not Z_NULL, then the Z_BUF_ERROR was due to - out() returning non-zero. (in() will always be called before out(), so - strm->next_in is assured to be defined if out() returns non-zero.) Note - that inflateBack() cannot return Z_OK. + if in() or out() returned an error, Z_DATA_ERROR if there was a format error + in the deflate stream (in which case strm->msg is set to indicate the nature + of the error), or Z_STREAM_ERROR if the stream was not properly initialized. + In the case of Z_BUF_ERROR, an input or output error can be distinguished + using strm->next_in which will be Z_NULL only if in() returned an error. If + strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning + non-zero. (in() will always be called before out(), so strm->next_in is + assured to be defined if out() returns non-zero.) Note that inflateBack() + cannot return Z_OK. */ ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); @@ -995,27 +1145,27 @@ 27-31: 0 (reserved) */ +#ifndef Z_SOLO /* utility functions */ /* - The following utility functions are implemented on top of the - basic stream-oriented functions. To simplify the interface, some - default options are assumed (compression level and memory usage, - standard memory allocation functions). The source code of these - utility functions can easily be modified if you need special options. + The following utility functions are implemented on top of the basic + stream-oriented functions. To simplify the interface, some default options + are assumed (compression level and memory usage, standard memory allocation + functions). The source code of these utility functions can be modified if + you need special options. */ ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be at least the value returned - by compressBound(sourceLen). Upon exit, destLen is the actual size of the + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be at least the value returned by + compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to compress a whole file at once if the - input file is mmap'ed. + compress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer. @@ -1025,11 +1175,11 @@ const Bytef *source, uLong sourceLen, int level)); /* - Compresses the source buffer into the destination buffer. The level + Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the + length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the + compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed buffer. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough @@ -1040,159 +1190,255 @@ ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); /* compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before - a compress() or compress2() call to allocate the destination buffer. + compress() or compress2() on sourceLen bytes. It would be used before a + compress() or compress2() call to allocate the destination buffer. */ ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be large enough to hold the - entire uncompressed data. (The size of the uncompressed data must have - been saved previously by the compressor and transmitted to the decompressor - by some mechanism outside the scope of this compression library.) - Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to decompress a whole file at once if the - input file is mmap'ed. + the byte length of the source buffer. Upon entry, destLen is the total size + of the destination buffer, which must be large enough to hold the entire + uncompressed data. (The size of the uncompressed data must have been saved + previously by the compressor and transmitted to the decompressor by some + mechanism outside the scope of this compression library.) Upon exit, destLen + is the actual size of the uncompressed buffer. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. + buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In + the case where there is not enough room, uncompress() will fill the output + buffer with the uncompressed data up to that point. */ + /* gzip file access functions */ -typedef voidp gzFile; +/* + This library supports reading and writing files in gzip (.gz) format with + an interface similar to that of stdio, using the functions that start with + "gz". The gzip format is different from the zlib format. gzip is a gzip + wrapper, documented in RFC 1952, wrapped around a deflate stream. +*/ -ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); +typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ + /* - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb") but can also include a compression level - ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for - Huffman only compression as in "wb1h", or 'R' for run-length encoding - as in "wb1R". (See the description of deflateInit2 for more information - about the strategy parameter.) +ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); + + Opens a gzip (.gz) file for reading or writing. The mode parameter is as + in fopen ("rb" or "wb") but can also include a compression level ("wb9") or + a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only + compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' + for fixed code compression as in "wb9F". (See the description of + deflateInit2 for more information about the strategy parameter.) 'T' will + request transparent writing or appending with no compression and not using + the gzip format. + + "a" can be used instead of "w" to request that the gzip stream that will + be written be appended to the file. "+" will result in an error, since + reading and writing to the same gzip file is not supported. The addition of + "x" when writing will create the file exclusively, which fails if the file + already exists. On systems that support it, the addition of "e" when + reading or writing will set the flag to close the file on an execve() call. + + These functions, as well as gzip, will read and decode a sequence of gzip + streams in a file. The append function of gzopen() can be used to create + such a file. (Also see gzflush() for another way to do this.) When + appending, gzopen does not test whether the file begins with a gzip stream, + nor does it look for the end of the gzip streams to begin appending. gzopen + will simply append a gzip stream to the existing file. gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. + case gzread will directly read from the file without decompression. When + reading, this will be detected automatically by looking for the magic two- + byte gzip header. - gzopen returns NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). */ + gzopen returns NULL if the file could not be opened, if there was + insufficient memory to allocate the gzFile state, or if an invalid mode was + specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). + errno can be checked to determine if the reason gzopen failed was that the + file could not be opened. +*/ -ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); +ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); /* - gzdopen() associates a gzFile with the file descriptor fd. File - descriptors are obtained from calls like open, dup, creat, pipe or - fileno (in the file has been previously opened with fopen). - The mode parameter is as in gzopen. - The next call of gzclose on the returned gzFile will also close the - file descriptor fd, just like fclose(fdopen(fd), mode) closes the file - descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode). - gzdopen returns NULL if there was insufficient memory to allocate - the (de)compression state. + gzdopen associates a gzFile with the file descriptor fd. File descriptors + are obtained from calls like open, dup, creat, pipe or fileno (if the file + has been previously opened with fopen). The mode parameter is as in gzopen. + + The next call of gzclose on the returned gzFile will also close the file + descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor + fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, + mode);. The duplicated descriptor should be saved to avoid a leak, since + gzdopen does not close fd if it fails. If you are using fileno() to get the + file descriptor from a FILE *, then you will have to use dup() to avoid + double-close()ing the file descriptor. Both gzclose() and fclose() will + close the associated file descriptor, so they need to have different file + descriptors. + + gzdopen returns NULL if there was insufficient memory to allocate the + gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not + provided, or '+' was provided), or if fd is -1. The file descriptor is not + used until the next gz* read, write, seek, or close operation, so gzdopen + will not detect if fd is invalid (unless fd is -1). +*/ + +ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); +/* + Set the internal buffer size used by this library's functions. The + default buffer size is 8192 bytes. This function must be called after + gzopen() or gzdopen(), and before any other calls that read or write the + file. The buffer memory allocation is always deferred to the first read or + write. Two buffers are allocated, either both of the specified size when + writing, or one of the specified size and the other twice that size when + reading. A larger buffer size of, for example, 64K or 128K bytes will + noticeably increase the speed of decompression (reading). + + The new buffer size also affects the maximum length for gzprintf(). + + gzbuffer() returns 0 on success, or -1 on failure, such as being called + too late. */ ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); /* - Dynamically update the compression level or strategy. See the description + Dynamically update the compression level or strategy. See the description of deflateInit2 for the meaning of these parameters. + gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not opened for writing. */ -ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); +ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); /* - Reads the given number of uncompressed bytes from the compressed file. - If the input file was not in gzip format, gzread copies the given number - of bytes into the buffer. - gzread returns the number of uncompressed bytes actually read (0 for - end of file, -1 for error). */ + Reads the given number of uncompressed bytes from the compressed file. If + the input file is not in gzip format, gzread copies the given number of + bytes into the buffer directly from the file. -ZEXTERN int ZEXPORT gzwrite OF((gzFile file, - voidpc buf, unsigned len)); + After reaching the end of a gzip stream in the input, gzread will continue + to read, looking for another gzip stream. Any number of gzip streams may be + concatenated in the input file, and will all be decompressed by gzread(). + If something other than a gzip stream is encountered after a gzip stream, + that remaining trailing garbage is ignored (and no error is returned). + + gzread can be used to read a gzip file that is being concurrently written. + Upon reaching the end of the input, gzread will return with the available + data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then + gzclearerr can be used to clear the end of file indicator in order to permit + gzread to be tried again. Z_OK indicates that a gzip stream was completed + on the last gzread. Z_BUF_ERROR indicates that the input file ended in the + middle of a gzip stream. Note that gzread does not return -1 in the event + of an incomplete gzip stream. This error is deferred until gzclose(), which + will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip + stream. Alternatively, gzerror can be used before gzclose to detect this + case. + + gzread returns the number of uncompressed bytes actually read, less than + len for end of file, or -1 for error. +*/ + +ZEXTERN int ZEXPORT gzwrite OF((gzFile file, + voidpc buf, unsigned len)); /* Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes actually written - (0 in case of error). + gzwrite returns the number of uncompressed bytes written or 0 in case of + error. */ -ZEXTERN int ZEXPORTVA gzprintf OF((gzFile file, const char *format, ...)); +ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); /* - Converts, formats, and writes the args to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written (0 in case of error). The number of - uncompressed bytes written is limited to 4095. The caller should assure that - this limit is not exceeded. If it is exceeded, then gzprintf() will return - return an error (0) with nothing written. In this case, there may also be a - buffer overflow with unpredictable consequences, which is possible only if - zlib was compiled with the insecure functions sprintf() or vsprintf() - because the secure snprintf() or vsnprintf() functions were not available. + Converts, formats, and writes the arguments to the compressed file under + control of the format string, as in fprintf. gzprintf returns the number of + uncompressed bytes actually written, or 0 in case of error. The number of + uncompressed bytes written is limited to 8191, or one less than the buffer + size given to gzbuffer(). The caller should assure that this limit is not + exceeded. If it is exceeded, then gzprintf() will return an error (0) with + nothing written. In this case, there may also be a buffer overflow with + unpredictable consequences, which is possible only if zlib was compiled with + the insecure functions sprintf() or vsprintf() because the secure snprintf() + or vsnprintf() functions were not available. This can be determined using + zlibCompileFlags(). */ ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); /* - Writes the given null-terminated string to the compressed file, excluding + Writes the given null-terminated string to the compressed file, excluding the terminating null character. - gzputs returns the number of characters written, or -1 in case of error. + + gzputs returns the number of characters written, or -1 in case of error. */ ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); /* - Reads bytes from the compressed file until len-1 characters are read, or - a newline character is read and transferred to buf, or an end-of-file - condition is encountered. The string is then terminated with a null - character. - gzgets returns buf, or Z_NULL in case of error. + Reads bytes from the compressed file until len-1 characters are read, or a + newline character is read and transferred to buf, or an end-of-file + condition is encountered. If any characters are read or if len == 1, the + string is terminated with a null character. If no characters are read due + to an end-of-file or len < 1, then the buffer is left untouched. + + gzgets returns buf which is a null-terminated string, or it returns NULL + for end-of-file or in case of error. If there was an error, the contents at + buf are indeterminate. */ -ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); +ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); /* - Writes c, converted to an unsigned char, into the compressed file. - gzputc returns the value that was written, or -1 in case of error. + Writes c, converted to an unsigned char, into the compressed file. gzputc + returns the value that was written, or -1 in case of error. */ -ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); +ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); /* - Reads one byte from the compressed file. gzgetc returns this byte - or -1 in case of end of file or error. + Reads one byte from the compressed file. gzgetc returns this byte or -1 + in case of end of file or error. This is implemented as a macro for speed. + As such, it does not do all of the checking the other functions do. I.e. + it does not check to see if file is NULL, nor whether the structure file + points to has been clobbered or not. */ -ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); +ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); /* - Push one character back onto the stream to be read again later. - Only one character of push-back is allowed. gzungetc() returns the - character pushed, or -1 on failure. gzungetc() will fail if a - character has been pushed but not read yet, or if c is -1. The pushed - character will be discarded if the stream is repositioned with gzseek() - or gzrewind(). + Push one character back onto the stream to be read as the first character + on the next read. At least one character of push-back is allowed. + gzungetc() returns the character pushed, or -1 on failure. gzungetc() will + fail if c is -1, and may fail if a character has been pushed but not read + yet. If gzungetc is used immediately after gzopen or gzdopen, at least the + output buffer size of pushed characters is allowed. (See gzbuffer above.) + The pushed character will be discarded if the stream is repositioned with + gzseek() or gzrewind(). */ -ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); +ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); /* - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. The return value is the zlib - error number (see function gzerror below). gzflush returns Z_OK if - the flush parameter is Z_FINISH and all output could be flushed. - gzflush should be called only when strictly necessary because it can - degrade compression. + Flushes all pending output into the compressed file. The parameter flush + is as in the deflate() function. The return value is the zlib error number + (see function gzerror below). gzflush is only permitted when writing. + + If the flush parameter is Z_FINISH, the remaining data is written and the + gzip stream is completed in the output. If gzwrite() is called again, a new + gzip stream will be started in the output. gzread() is able to read such + concatented gzip streams. + + gzflush should be called only when strictly necessary because it will + degrade compression if called too often. */ -ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, - z_off_t offset, int whence)); /* - Sets the starting position for the next gzread or gzwrite on the - given compressed file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); +ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, + z_off_t offset, int whence)); + + Sets the starting position for the next gzread or gzwrite on the given + compressed file. The offset represents a number of bytes in the + uncompressed data stream. The whence parameter is defined as in lseek(2); the value SEEK_END is not supported. + If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are + extremely slow. If the file is opened for writing, only forward seeks are supported; gzseek then compresses a sequence of zeroes up to the new starting position. - gzseek returns the resulting offset location as measured in bytes from + gzseek returns the resulting offset location as measured in bytes from the beginning of the uncompressed stream, or -1 in case of error, in particular if the file is opened for writing and the new starting position would be before the current position. @@ -1202,68 +1448,134 @@ /* Rewinds the given file. This function is supported only for reading. - gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) + gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) */ +/* ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); + + Returns the starting position for the next gzread or gzwrite on the given + compressed file. This position represents a number of bytes in the + uncompressed data stream, and is zero when starting, even if appending or + reading a gzip stream from the middle of a file using gzdopen(). + + gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) +*/ + /* - Returns the starting position for the next gzread or gzwrite on the - given compressed file. This position represents a number of bytes in the - uncompressed data stream. +ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); - gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) + Returns the current offset in the file being read or written. This offset + includes the count of bytes that precede the gzip stream, for example when + appending or when using gzdopen() for reading. When reading, the offset + does not include as yet unused buffered input. This information can be used + for a progress indicator. On error, gzoffset() returns -1. */ ZEXTERN int ZEXPORT gzeof OF((gzFile file)); /* - Returns 1 when EOF has previously been detected reading the given - input stream, otherwise zero. + Returns true (1) if the end-of-file indicator has been set while reading, + false (0) otherwise. Note that the end-of-file indicator is set only if the + read tried to go past the end of the input, but came up short. Therefore, + just like feof(), gzeof() may return false even if there is no more data to + read, in the event that the last read request was for the exact number of + bytes remaining in the input file. This will happen if the input file size + is an exact multiple of the buffer size. + + If gzeof() returns true, then the read functions will return no more data, + unless the end-of-file indicator is reset by gzclearerr() and the input file + has grown since the previous end of file was detected. */ ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); /* - Returns 1 if file is being read directly without decompression, otherwise - zero. + Returns true (1) if file is being copied directly while reading, or false + (0) if file is a gzip stream being decompressed. + + If the input file is empty, gzdirect() will return true, since the input + does not contain a gzip stream. + + If gzdirect() is used immediately after gzopen() or gzdopen() it will + cause buffers to be allocated to allow reading the file to determine if it + is a gzip file. Therefore if gzbuffer() is used, it should be called before + gzdirect(). + + When writing, gzdirect() returns true (1) if transparent writing was + requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: + gzdirect() is not needed when writing. Transparent writing must be + explicitly requested, so the application already knows the answer. When + linking statically, using gzdirect() will include all of the zlib code for + gzip file reading and decompression, which may not be desired.) */ ZEXTERN int ZEXPORT gzclose OF((gzFile file)); /* - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. The return value is the zlib - error number (see function gzerror below). + Flushes all pending output if necessary, closes the compressed file and + deallocates the (de)compression state. Note that once file is closed, you + cannot call gzerror with file, since its structures have been deallocated. + gzclose must not be called more than once on the same file, just as free + must not be called more than once on the same allocation. + + gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a + file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the + last read ended in the middle of a gzip stream, or Z_OK on success. +*/ + +ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); +ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); +/* + Same as gzclose(), but gzclose_r() is only for use when reading, and + gzclose_w() is only for use when writing or appending. The advantage to + using these instead of gzclose() is that they avoid linking in zlib + compression or decompression code that is not used when only reading or only + writing respectively. If gzclose() is used, then both compression and + decompression code will be included the application when linking to a static + zlib library. */ ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); /* - Returns the error message for the last error which occurred on the - given compressed file. errnum is set to zlib error number. If an - error occurred in the file system and not in the compression library, - errnum is set to Z_ERRNO and the application may consult errno - to get the exact error code. + Returns the error message for the last error which occurred on the given + compressed file. errnum is set to zlib error number. If an error occurred + in the file system and not in the compression library, errnum is set to + Z_ERRNO and the application may consult errno to get the exact error code. + + The application must not modify the returned string. Future calls to + this function may invalidate the previously returned string. If file is + closed, then the string previously returned by gzerror will no longer be + available. + + gzerror() should be used to distinguish errors from end-of-file for those + functions above that do not distinguish those cases in their return values. */ ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); /* - Clears the error and end-of-file flags for file. This is analogous to the - clearerr() function in stdio. This is useful for continuing to read a gzip + Clears the error and end-of-file flags for file. This is analogous to the + clearerr() function in stdio. This is useful for continuing to read a gzip file that is being written concurrently. */ +#endif /* !Z_SOLO */ + /* checksum functions */ /* These functions are not related to compression but are exported - anyway because they might be useful in applications using the - compression library. + anyway because they might be useful in applications using the compression + library. */ ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); /* Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. If buf is NULL, this function returns - the required initial value for the checksum. - An Adler-32 checksum is almost as reliable as a CRC32 but can be computed - much faster. Usage example: + return the updated checksum. If buf is Z_NULL, this function returns the + required initial value for the checksum. + + An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + much faster. + + Usage example: uLong adler = adler32(0L, Z_NULL, 0); @@ -1273,21 +1585,25 @@ if (adler != original_adler) error(); */ +/* ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, z_off_t len2)); -/* + Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. + seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note + that the z_off_t type (like off_t) is a signed integer. If len2 is + negative, the result has no meaning or utility. */ ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); /* Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. If buf is NULL, this function returns the required initial - value for the for the crc. Pre- and post-conditioning (one's complement) is + updated CRC-32. If buf is Z_NULL, this function returns the required + initial value for the crc. Pre- and post-conditioning (one's complement) is performed within this function so it shouldn't be done by the application. + Usage example: uLong crc = crc32(0L, Z_NULL, 0); @@ -1298,9 +1614,9 @@ if (crc != original_crc) error(); */ +/* ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); -/* Combine two CRC-32 check values into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, CRC-32 check values were calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 @@ -1329,26 +1645,121 @@ const char *version, int stream_size)); #define deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream)) + deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) #define inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream)) + inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) #define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, sizeof(z_stream)) + (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) #define inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream)) + inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ + (int)sizeof(z_stream)) #define inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, sizeof(z_stream)) + ZLIB_VERSION, (int)sizeof(z_stream)) +#ifndef Z_SOLO -#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) - struct internal_state {int dummy;}; /* hack for buggy compilers */ +/* gzgetc() macro and its supporting function and exposed data structure. Note + * that the real internal state is much larger than the exposed structure. + * This abbreviated structure exposes just enough for the gzgetc() macro. The + * user should not mess with these exposed elements, since their names or + * behavior could change in the future, perhaps even capriciously. They can + * only be used by the gzgetc() macro. You have been warned. + */ +struct gzFile_s { + unsigned have; + unsigned char *next; + z_off64_t pos; +}; +ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ +#ifdef Z_PREFIX_SET +# undef z_gzgetc +# define z_gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) +#else +# define gzgetc(g) \ + ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : gzgetc(g)) #endif +/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or + * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if + * both are true, the application gets the *64 functions, and the regular + * functions are changed to 64 bits) -- in case these are set on systems + * without large file support, _LFS64_LARGEFILE must also be true + */ +#ifdef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); + ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); +#endif + +#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) +# ifdef Z_PREFIX_SET +# define z_gzopen z_gzopen64 +# define z_gzseek z_gzseek64 +# define z_gztell z_gztell64 +# define z_gzoffset z_gzoffset64 +# define z_adler32_combine z_adler32_combine64 +# define z_crc32_combine z_crc32_combine64 +# else +# define gzopen gzopen64 +# define gzseek gzseek64 +# define gztell gztell64 +# define gzoffset gzoffset64 +# define adler32_combine adler32_combine64 +# define crc32_combine crc32_combine64 +# endif +# ifndef Z_LARGE64 + ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +# endif +#else + ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); + ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); + ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); + ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); +#endif + +#else /* Z_SOLO */ + + ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); + +#endif /* !Z_SOLO */ + +/* hack for buggy compilers */ +#if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) + struct internal_state {int dummy;}; +#endif + +/* undocumented functions */ ZEXTERN const char * ZEXPORT zError OF((int)); -ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp z)); -ZEXTERN const uLongf * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); +ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); +ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); +ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); +ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); +#if defined(_WIN32) && !defined(Z_SOLO) +ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, + const char *mode)); +#endif +#if defined(STDC) || defined(Z_HAVE_STDARG_H) +# ifndef Z_SOLO +ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, + const char *format, + va_list va)); +# endif +#endif #ifdef __cplusplus } diff --git a/Modules/zlib/zlib.map b/Modules/zlib/zlib.map new file mode 100644 --- /dev/null +++ b/Modules/zlib/zlib.map @@ -0,0 +1,83 @@ +ZLIB_1.2.0 { + global: + compressBound; + deflateBound; + inflateBack; + inflateBackEnd; + inflateBackInit_; + inflateCopy; + local: + deflate_copyright; + inflate_copyright; + inflate_fast; + inflate_table; + zcalloc; + zcfree; + z_errmsg; + gz_error; + gz_intmax; + _*; +}; + +ZLIB_1.2.0.2 { + gzclearerr; + gzungetc; + zlibCompileFlags; +} ZLIB_1.2.0; + +ZLIB_1.2.0.8 { + deflatePrime; +} ZLIB_1.2.0.2; + +ZLIB_1.2.2 { + adler32_combine; + crc32_combine; + deflateSetHeader; + inflateGetHeader; +} ZLIB_1.2.0.8; + +ZLIB_1.2.2.3 { + deflateTune; + gzdirect; +} ZLIB_1.2.2; + +ZLIB_1.2.2.4 { + inflatePrime; +} ZLIB_1.2.2.3; + +ZLIB_1.2.3.3 { + adler32_combine64; + crc32_combine64; + gzopen64; + gzseek64; + gztell64; + inflateUndermine; +} ZLIB_1.2.2.4; + +ZLIB_1.2.3.4 { + inflateReset2; + inflateMark; +} ZLIB_1.2.3.3; + +ZLIB_1.2.3.5 { + gzbuffer; + gzoffset; + gzoffset64; + gzclose_r; + gzclose_w; +} ZLIB_1.2.3.4; + +ZLIB_1.2.5.1 { + deflatePending; +} ZLIB_1.2.3.5; + +ZLIB_1.2.5.2 { + deflateResetKeep; + gzgetc_; + inflateResetKeep; +} ZLIB_1.2.5.1; + +ZLIB_1.2.7.1 { + inflateGetDictionary; + gzvprintf; +} ZLIB_1.2.5.2; diff --git a/Modules/zlib/zlib.pc.in b/Modules/zlib/zlib.pc.in new file mode 100644 --- /dev/null +++ b/Modules/zlib/zlib.pc.in @@ -0,0 +1,13 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +sharedlibdir=@sharedlibdir@ +includedir=@includedir@ + +Name: zlib +Description: zlib compression library +Version: @VERSION@ + +Requires: +Libs: -L${libdir} -L${sharedlibdir} -lz +Cflags: -I${includedir} diff --git a/Modules/zlib/zutil.c b/Modules/zlib/zutil.c --- a/Modules/zlib/zutil.c +++ b/Modules/zlib/zutil.c @@ -1,17 +1,20 @@ /* zutil.c -- target dependent utility functions for the compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2005, 2010, 2011, 2012 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" +#ifndef Z_SOLO +# include "gzguts.h" +#endif #ifndef NO_DUMMY_DECL struct internal_state {int dummy;}; /* for buggy compilers */ #endif -const char * const z_errmsg[10] = { +z_const char * const z_errmsg[10] = { "need dictionary", /* Z_NEED_DICT 2 */ "stream end", /* Z_STREAM_END 1 */ "", /* Z_OK 0 */ @@ -34,25 +37,25 @@ uLong flags; flags = 0; - switch (sizeof(uInt)) { + switch ((int)(sizeof(uInt))) { case 2: break; case 4: flags += 1; break; case 8: flags += 2; break; default: flags += 3; } - switch (sizeof(uLong)) { + switch ((int)(sizeof(uLong))) { case 2: break; case 4: flags += 1 << 2; break; case 8: flags += 2 << 2; break; default: flags += 3 << 2; } - switch (sizeof(voidpf)) { + switch ((int)(sizeof(voidpf))) { case 2: break; case 4: flags += 1 << 4; break; case 8: flags += 2 << 4; break; default: flags += 3 << 4; } - switch (sizeof(z_off_t)) { + switch ((int)(sizeof(z_off_t))) { case 2: break; case 4: flags += 1 << 6; break; case 8: flags += 2 << 6; break; @@ -85,27 +88,27 @@ #ifdef FASTEST flags += 1L << 21; #endif -#ifdef STDC +#if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifdef NO_vsnprintf - flags += 1L << 25; + flags += 1L << 25; # ifdef HAS_vsprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # else # ifdef HAS_vsnprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # endif #else - flags += 1L << 24; + flags += 1L << 24; # ifdef NO_snprintf - flags += 1L << 25; + flags += 1L << 25; # ifdef HAS_sprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # else # ifdef HAS_snprintf_void - flags += 1L << 26; + flags += 1L << 26; # endif # endif #endif @@ -117,9 +120,9 @@ # ifndef verbose # define verbose 0 # endif -int z_verbose = verbose; +int ZLIB_INTERNAL z_verbose = verbose; -void z_error (m) +void ZLIB_INTERNAL z_error (m) char *m; { fprintf(stderr, "%s\n", m); @@ -146,7 +149,7 @@ #ifndef HAVE_MEMCPY -void zmemcpy(dest, source, len) +void ZLIB_INTERNAL zmemcpy(dest, source, len) Bytef* dest; const Bytef* source; uInt len; @@ -157,7 +160,7 @@ } while (--len != 0); } -int zmemcmp(s1, s2, len) +int ZLIB_INTERNAL zmemcmp(s1, s2, len) const Bytef* s1; const Bytef* s2; uInt len; @@ -170,7 +173,7 @@ return 0; } -void zmemzero(dest, len) +void ZLIB_INTERNAL zmemzero(dest, len) Bytef* dest; uInt len; { @@ -181,6 +184,7 @@ } #endif +#ifndef Z_SOLO #ifdef SYS16BIT @@ -213,7 +217,7 @@ * a protected system like OS/2. Use Microsoft C instead. */ -voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) { voidpf buf = opaque; /* just to make some compilers happy */ ulg bsize = (ulg)items*size; @@ -237,7 +241,7 @@ return buf; } -void zcfree (voidpf opaque, voidpf ptr) +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { int n; if (*(ush*)&ptr != 0) { /* object < 64K */ @@ -272,13 +276,13 @@ # define _hfree hfree #endif -voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) { if (opaque) opaque = 0; /* to make compiler happy */ return _halloc((long)items, size); } -void zcfree (voidpf opaque, voidpf ptr) +void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { if (opaque) opaque = 0; /* to make compiler happy */ _hfree(ptr); @@ -297,7 +301,7 @@ extern void free OF((voidpf ptr)); #endif -voidpf zcalloc (opaque, items, size) +voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) voidpf opaque; unsigned items; unsigned size; @@ -307,7 +311,7 @@ (voidpf)calloc(items, size); } -void zcfree (opaque, ptr) +void ZLIB_INTERNAL zcfree (opaque, ptr) voidpf opaque; voidpf ptr; { @@ -316,3 +320,5 @@ } #endif /* MY_ZCALLOC */ + +#endif /* !Z_SOLO */ diff --git a/Modules/zlib/zutil.h b/Modules/zlib/zutil.h --- a/Modules/zlib/zutil.h +++ b/Modules/zlib/zutil.h @@ -1,5 +1,5 @@ /* zutil.h -- internal interface and configuration of the compression library - * Copyright (C) 1995-2005 Jean-loup Gailly. + * Copyright (C) 1995-2013 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h */ @@ -13,30 +13,24 @@ #ifndef ZUTIL_H #define ZUTIL_H -#define ZLIB_INTERNAL +#ifdef HAVE_HIDDEN +# define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) +#else +# define ZLIB_INTERNAL +#endif + #include "zlib.h" -#ifdef STDC -# ifndef _WIN32_WCE +#if defined(STDC) && !defined(Z_SOLO) +# if !(defined(_WIN32_WCE) && defined(_MSC_VER)) # include # endif # include # include #endif -#ifdef NO_ERRNO_H -# ifdef _WIN32_WCE - /* The Microsoft C Run-Time Library for Windows CE doesn't have - * errno. We define it as a global variable to simplify porting. - * Its value is always 0 and should not be used. We rename it to - * avoid conflict with other libraries that use the same workaround. - */ -# define errno z_errno -# endif - extern int errno; -#else -# ifndef _WIN32_WCE -# include -# endif + +#ifdef Z_SOLO + typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ #endif #ifndef local @@ -50,13 +44,13 @@ typedef ush FAR ushf; typedef unsigned long ulg; -extern const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ +extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ /* (size given to avoid silly warnings with Visual C++) */ #define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] #define ERR_RETURN(strm,err) \ - return (strm->msg = (char*)ERR_MSG(err), (err)) + return (strm->msg = ERR_MSG(err), (err)) /* To be used only when the state is known to be valid */ /* common constants */ @@ -88,16 +82,18 @@ #if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) # define OS_CODE 0x00 -# if defined(__TURBOC__) || defined(__BORLANDC__) -# if(__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) - /* Allow compilation with ANSI keywords only enabled */ - void _Cdecl farfree( void *block ); - void *_Cdecl farmalloc( unsigned long nbytes ); -# else -# include +# ifndef Z_SOLO +# if defined(__TURBOC__) || defined(__BORLANDC__) +# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) + /* Allow compilation with ANSI keywords only enabled */ + void _Cdecl farfree( void *block ); + void *_Cdecl farmalloc( unsigned long nbytes ); +# else +# include +# endif +# else /* MSC or DJGPP */ +# include # endif -# else /* MSC or DJGPP */ -# include # endif #endif @@ -117,18 +113,20 @@ #ifdef OS2 # define OS_CODE 0x06 -# ifdef M_I86 - #include +# if defined(M_I86) && !defined(Z_SOLO) +# include # endif #endif #if defined(MACOS) || defined(TARGET_OS_MAC) # define OS_CODE 0x07 -# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os -# include /* for fdopen */ -# else -# ifndef fdopen -# define fdopen(fd,mode) NULL /* No fdopen() */ +# ifndef Z_SOLO +# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fdopen */ +# else +# ifndef fdopen +# define fdopen(fd,mode) NULL /* No fdopen() */ +# endif # endif # endif #endif @@ -151,7 +149,7 @@ # define fdopen(fd,mode) NULL /* No fdopen() */ #endif -#if (defined(_MSC_VER) && (_MSC_VER > 600)) +#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX # if defined(_WIN32_WCE) # define fdopen(fd,mode) NULL /* No fdopen() */ # ifndef _PTRDIFF_T_DEFINED @@ -163,6 +161,19 @@ # endif #endif +#if defined(__BORLANDC__) && !defined(MSDOS) + #pragma warn -8004 + #pragma warn -8008 + #pragma warn -8066 +#endif + +/* provide prototypes for these when building zlib without LFS */ +#if !defined(_WIN32) && \ + (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) + ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); +#endif + /* common defaults */ #ifndef OS_CODE @@ -175,40 +186,7 @@ /* functions */ -#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#if defined(__CYGWIN__) -# ifndef HAVE_VSNPRINTF -# define HAVE_VSNPRINTF -# endif -#endif -#ifndef HAVE_VSNPRINTF -# ifdef MSDOS - /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), - but for now we just assume it doesn't. */ -# define NO_vsnprintf -# endif -# ifdef __TURBOC__ -# define NO_vsnprintf -# endif -# ifdef WIN32 - /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ -# if !defined(vsnprintf) && !defined(NO_vsnprintf) -# define vsnprintf _vsnprintf -# endif -# endif -# ifdef __SASC -# define NO_vsnprintf -# endif -#endif -#ifdef VMS -# define NO_vsnprintf -#endif - -#if defined(pyr) +#if defined(pyr) || defined(Z_SOLO) # define NO_MEMCPY #endif #if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) @@ -232,16 +210,16 @@ # define zmemzero(dest, len) memset(dest, 0, len) # endif #else - extern void zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); - extern int zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); - extern void zmemzero OF((Bytef* dest, uInt len)); + void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); + int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); + void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); #endif /* Diagnostic functions */ #ifdef DEBUG # include - extern int z_verbose; - extern void z_error OF((char *m)); + extern int ZLIB_INTERNAL z_verbose; + extern void ZLIB_INTERNAL z_error OF((char *m)); # define Assert(cond,msg) {if(!(cond)) z_error(msg);} # define Trace(x) {if (z_verbose>=0) fprintf x ;} # define Tracev(x) {if (z_verbose>0) fprintf x ;} @@ -257,13 +235,19 @@ # define Tracecv(c,x) #endif - -voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); -void zcfree OF((voidpf opaque, voidpf ptr)); +#ifndef Z_SOLO + voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, + unsigned size)); + void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); +#endif #define ZALLOC(strm, items, size) \ (*((strm)->zalloc))((strm)->opaque, (items), (size)) #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} +/* Reverse the bytes in a 32-bit value */ +#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ + (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) + #endif /* ZUTIL_H */ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 09:09:17 2014 From: python-checkins at python.org (matthias.klose) Date: Thu, 11 Sep 2014 09:09:17 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogLSBJc3N1ZSAjMjIz?= =?utf-8?q?81=3A_Update_zlib_to_1=2E2=2E8=2E?= Message-ID: <3htrn961GMz7Lk0@mail.python.org> http://hg.python.org/cpython/rev/769126143656 changeset: 92393:769126143656 branch: 2.7 user: doko at ubuntu.com date: Thu Sep 11 09:08:52 2014 +0200 summary: - Issue #22381: Update zlib to 1.2.8. Update zlib file names for the PC installers files: PC/VC6/pythoncore.dsp | 14 +++++++++++++- PC/VS7.1/pythoncore.vcproj | 11 ++++++++++- PC/VS8.0/pythoncore.vcproj | 14 +++++++++++++- PCbuild/pythoncore.vcproj | 16 ++++++++++++++-- 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/PC/VC6/pythoncore.dsp b/PC/VC6/pythoncore.dsp --- a/PC/VC6/pythoncore.dsp +++ b/PC/VC6/pythoncore.dsp @@ -458,7 +458,19 @@ # End Source File # Begin Source File -SOURCE=..\..\Modules\zlib\gzio.c +SOURCE=..\..\Modules\zlib\gzclose.c +# End Source File +# Begin Source File + +SOURCE=..\..\Modules\zlib\gzlib.c +# End Source File +# Begin Source File + +SOURCE=..\..\Modules\zlib\gzread.c +# End Source File +# Begin Source File + +SOURCE=..\..\Modules\zlib\gzwrite.c # End Source File # Begin Source File diff --git a/PC/VS7.1/pythoncore.vcproj b/PC/VS7.1/pythoncore.vcproj --- a/PC/VS7.1/pythoncore.vcproj +++ b/PC/VS7.1/pythoncore.vcproj @@ -289,7 +289,16 @@ RelativePath="..\..\Modules\zlib\deflate.c"> + RelativePath="..\..\Modules\zlib\gzclose.c"> + + + + + + diff --git a/PC/VS8.0/pythoncore.vcproj b/PC/VS8.0/pythoncore.vcproj --- a/PC/VS8.0/pythoncore.vcproj +++ b/PC/VS8.0/pythoncore.vcproj @@ -1218,7 +1218,19 @@ > + + + + + + + + + + + + - \ No newline at end of file + -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:31 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:31 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIxOTUx?= =?utf-8?q?=3A_Use_attemptckalloc=28=29_instead_of_ckalloc=28=29_in_Tkinte?= =?utf-8?q?r=2E?= Message-ID: <3htt0v4q3cz7Lk0@mail.python.org> http://hg.python.org/cpython/rev/ee969a717cb5 changeset: 92394:ee969a717cb5 branch: 2.7 user: Serhiy Storchaka date: Thu Sep 11 10:38:08 2014 +0300 summary: Issue #21951: Use attemptckalloc() instead of ckalloc() in Tkinter. ckalloc() causes the Tcl interpreter to panic, attemptckalloc() returns NULL if the memory allocation fails. files: Misc/NEWS | 3 ++ Modules/_tkinter.c | 40 +++++++++++++++++++++++---------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #21951: Tkinter now most likely raises MemoryError instead of crash + if the memory allocation fails. + - Issue #22226: First letter no longer is stripped from the "status" key in the result of Treeview.heading(). diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -408,8 +408,8 @@ PyErr_SetString(PyExc_OverflowError, "tuple is too long"); goto finally; } - argv = (char **)ckalloc((size_t)argc * sizeof(char *)); - fv = (int *)ckalloc((size_t)argc * sizeof(int)); + argv = (char **)attemptckalloc((size_t)argc * sizeof(char *)); + fv = (int *)attemptckalloc((size_t)argc * sizeof(int)); if (argv == NULL || fv == NULL) { PyErr_NoMemory(); goto finally; @@ -754,7 +754,7 @@ Tcl_SetVar(v->interp, "tcl_interactive", "0", TCL_GLOBAL_ONLY); /* This is used to get the application class for Tk 4.1 and up */ - argv0 = (char*)ckalloc(strlen(className) + 1); + argv0 = (char*)attemptckalloc(strlen(className) + 1); if (!argv0) { PyErr_NoMemory(); Py_DECREF(v); @@ -788,7 +788,7 @@ if (use) len += strlen(use) + sizeof "-use "; - args = (char*)ckalloc(len); + args = (char*)attemptckalloc(len); if (!args) { PyErr_NoMemory(); Py_DECREF(v); @@ -1056,7 +1056,7 @@ PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; } - argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *)); + argv = (Tcl_Obj **) attemptckalloc(((size_t)size) * sizeof(Tcl_Obj *)); if(!argv) return 0; for (i = 0; i < size; i++) @@ -1083,7 +1083,7 @@ return Tcl_NewUnicodeObj(inbuf, size); allocsize = ((size_t)size) * sizeof(Tcl_UniChar); if (allocsize >= size) - outbuf = (Tcl_UniChar*)ckalloc(allocsize); + outbuf = (Tcl_UniChar*)attemptckalloc(allocsize); /* Else overflow occurred, and we take the next exit */ if (!outbuf) { PyErr_NoMemory(); @@ -1272,7 +1272,7 @@ PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; } - objv = (Tcl_Obj **)ckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); + objv = (Tcl_Obj **)attemptckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); if (objv == NULL) { PyErr_NoMemory(); objc = 0; @@ -1410,7 +1410,11 @@ PyObject *exc_type, *exc_value, *exc_tb; if (!WaitForMainloop(self)) return NULL; - ev = (Tkapp_CallEvent*)ckalloc(sizeof(Tkapp_CallEvent)); + ev = (Tkapp_CallEvent*)attemptckalloc(sizeof(Tkapp_CallEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CallProc; ev->self = self; ev->args = args; @@ -1700,8 +1704,11 @@ if (!WaitForMainloop(self)) return NULL; - ev = (VarEvent*)ckalloc(sizeof(VarEvent)); - + ev = (VarEvent*)attemptckalloc(sizeof(VarEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->self = selfptr; ev->args = args; ev->flags = flags; @@ -2312,7 +2319,12 @@ #ifdef WITH_THREAD if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; - CommandEvent *ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + CommandEvent *ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + PyMem_DEL(data); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 1; @@ -2359,7 +2371,11 @@ if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; CommandEvent *ev; - ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 0; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:33 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:33 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxOTUx?= =?utf-8?q?=3A_Use_attemptckalloc=28=29_instead_of_ckalloc=28=29_in_Tkinte?= =?utf-8?q?r=2E?= Message-ID: <3htt0x0JYTz7LkP@mail.python.org> http://hg.python.org/cpython/rev/1223c882253f changeset: 92395:1223c882253f branch: 3.4 parent: 92390:3ac9f9576ce6 user: Serhiy Storchaka date: Thu Sep 11 10:38:54 2014 +0300 summary: Issue #21951: Use attemptckalloc() instead of ckalloc() in Tkinter. ckalloc() causes the Tcl interpreter to panic, attemptckalloc() returns NULL if the memory allocation fails. files: Misc/NEWS | 3 ++ Modules/_tkinter.c | 36 ++++++++++++++++++++++++--------- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #21951: Tkinter now most likely raises MemoryError instead of crash + if the memory allocation fails. + - Issue #22338: Fix a crash in the json module on memory allocation failure. - Issue #22226: First letter no longer is stripped from the "status" key in diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -598,7 +598,7 @@ Tcl_SetVar(v->interp, "tcl_interactive", "0", TCL_GLOBAL_ONLY); /* This is used to get the application class for Tk 4.1 and up */ - argv0 = (char*)ckalloc(strlen(className) + 1); + argv0 = (char*)attemptckalloc(strlen(className) + 1); if (!argv0) { PyErr_NoMemory(); Py_DECREF(v); @@ -632,7 +632,7 @@ if (use) len += strlen(use) + sizeof "-use "; - args = (char*)ckalloc(len); + args = (char*)attemptckalloc(len); if (!args) { PyErr_NoMemory(); Py_DECREF(v); @@ -903,7 +903,7 @@ PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; } - argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *)); + argv = (Tcl_Obj **) attemptckalloc(((size_t)size) * sizeof(Tcl_Obj *)); if(!argv) return 0; for (i = 0; i < size; i++) @@ -933,7 +933,7 @@ if (kind == sizeof(Tcl_UniChar)) return Tcl_NewUnicodeObj(inbuf, size); allocsize = ((size_t)size) * sizeof(Tcl_UniChar); - outbuf = (Tcl_UniChar*)ckalloc(allocsize); + outbuf = (Tcl_UniChar*)attemptckalloc(allocsize); /* Else overflow occurred, and we take the next exit */ if (!outbuf) { PyErr_NoMemory(); @@ -1098,7 +1098,7 @@ PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; } - objv = (Tcl_Obj **)ckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); + objv = (Tcl_Obj **)attemptckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); if (objv == NULL) { PyErr_NoMemory(); objc = 0; @@ -1234,7 +1234,11 @@ PyObject *exc_type, *exc_value, *exc_tb; if (!WaitForMainloop(self)) return NULL; - ev = (Tkapp_CallEvent*)ckalloc(sizeof(Tkapp_CallEvent)); + ev = (Tkapp_CallEvent*)attemptckalloc(sizeof(Tkapp_CallEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CallProc; ev->self = self; ev->args = args; @@ -1485,8 +1489,11 @@ if (!WaitForMainloop(self)) return NULL; - ev = (VarEvent*)ckalloc(sizeof(VarEvent)); - + ev = (VarEvent*)attemptckalloc(sizeof(VarEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->self = selfptr; ev->args = args; ev->flags = flags; @@ -2082,7 +2089,12 @@ #ifdef WITH_THREAD if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; - CommandEvent *ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + CommandEvent *ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + PyMem_DEL(data); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 1; @@ -2128,7 +2140,11 @@ if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; CommandEvent *ev; - ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 0; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:34 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:34 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321951=3A_Use_attemptckalloc=28=29_instead_of_ck?= =?utf-8?q?alloc=28=29_in_Tkinter=2E?= Message-ID: <3htt0y36G9z7Lk0@mail.python.org> http://hg.python.org/cpython/rev/499b60b7d067 changeset: 92396:499b60b7d067 parent: 92391:135fc23e475c parent: 92395:1223c882253f user: Serhiy Storchaka date: Thu Sep 11 10:40:44 2014 +0300 summary: Issue #21951: Use attemptckalloc() instead of ckalloc() in Tkinter. ckalloc() causes the Tcl interpreter to panic, attemptckalloc() returns NULL if the memory allocation fails. files: Misc/NEWS | 3 ++ Modules/_tkinter.c | 36 ++++++++++++++++++++++++--------- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #21951: Tkinter now most likely raises MemoryError instead of crash + if the memory allocation fails. + - Issue #22338: Fix a crash in the json module on memory allocation failure. - Issue #12410: imaplib.IMAP4 now supports the context management protocol. diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -605,7 +605,7 @@ Tcl_SetVar(v->interp, "tcl_interactive", "0", TCL_GLOBAL_ONLY); /* This is used to get the application class for Tk 4.1 and up */ - argv0 = (char*)ckalloc(strlen(className) + 1); + argv0 = (char*)attemptckalloc(strlen(className) + 1); if (!argv0) { PyErr_NoMemory(); Py_DECREF(v); @@ -639,7 +639,7 @@ if (use) len += strlen(use) + sizeof "-use "; - args = (char*)ckalloc(len); + args = (char*)attemptckalloc(len); if (!args) { PyErr_NoMemory(); Py_DECREF(v); @@ -912,7 +912,7 @@ "list is too long"); return NULL; } - argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *)); + argv = (Tcl_Obj **) attemptckalloc(((size_t)size) * sizeof(Tcl_Obj *)); if(!argv) { PyErr_NoMemory(); return NULL; @@ -944,7 +944,7 @@ if (kind == sizeof(Tcl_UniChar)) return Tcl_NewUnicodeObj(inbuf, size); allocsize = ((size_t)size) * sizeof(Tcl_UniChar); - outbuf = (Tcl_UniChar*)ckalloc(allocsize); + outbuf = (Tcl_UniChar*)attemptckalloc(allocsize); /* Else overflow occurred, and we take the next exit */ if (!outbuf) { PyErr_NoMemory(); @@ -1111,7 +1111,7 @@ "list is too long"); return NULL; } - objv = (Tcl_Obj **)ckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); + objv = (Tcl_Obj **)attemptckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); if (objv == NULL) { PyErr_NoMemory(); objc = 0; @@ -1247,7 +1247,11 @@ PyObject *exc_type, *exc_value, *exc_tb; if (!WaitForMainloop(self)) return NULL; - ev = (Tkapp_CallEvent*)ckalloc(sizeof(Tkapp_CallEvent)); + ev = (Tkapp_CallEvent*)attemptckalloc(sizeof(Tkapp_CallEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CallProc; ev->self = self; ev->args = args; @@ -1498,8 +1502,11 @@ if (!WaitForMainloop(self)) return NULL; - ev = (VarEvent*)ckalloc(sizeof(VarEvent)); - + ev = (VarEvent*)attemptckalloc(sizeof(VarEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->self = selfptr; ev->args = args; ev->flags = flags; @@ -2098,7 +2105,12 @@ #ifdef WITH_THREAD if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; - CommandEvent *ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + CommandEvent *ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + PyMem_DEL(data); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 1; @@ -2144,7 +2156,11 @@ if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) { Tcl_Condition cond = NULL; CommandEvent *ev; - ev = (CommandEvent*)ckalloc(sizeof(CommandEvent)); + ev = (CommandEvent*)attemptckalloc(sizeof(CommandEvent)); + if (ev == NULL) { + PyErr_NoMemory(); + return NULL; + } ev->ev.proc = (Tcl_EventProc*)Tkapp_CommandProc; ev->interp = self->interp; ev->create = 0; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:35 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:35 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIxOTUx?= =?utf-8?q?=3A_Fixed_a_crash_in_Tkinter_on_AIX_when_called_Tcl_command_wit?= =?utf-8?q?h?= Message-ID: <3htt0z56Wjz7Lkk@mail.python.org> http://hg.python.org/cpython/rev/d6c7ab5a2065 changeset: 92397:d6c7ab5a2065 branch: 2.7 parent: 92394:ee969a717cb5 user: Serhiy Storchaka date: Thu Sep 11 10:56:59 2014 +0300 summary: Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. On some platforms Tcl memory allocator returns NULL when allocating zero-sized block of memory. files: Lib/test/test_tcl.py | 3 ++- Misc/NEWS | 3 +++ Modules/_tkinter.c | 4 ++++ 3 files changed, 9 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -429,7 +429,6 @@ self.assertEqual(passValue((1, '2', (3.4,))), (1, '2', (3.4,)) if self.wantobjects else '1 2 3.4') - @unittest.skipIf(sys.platform.startswith("aix"), 'Issue #21951: crashes on AIX') def test_user_command(self): result = [] def testfunc(arg): @@ -456,9 +455,11 @@ check('string') check('string\xbd') check('string\xe2\x82\xac', u'string\u20ac') + check('') check(u'string') check(u'string\xbd') check(u'string\u20ac') + check(u'') check('str\xc0\x80ing', u'str\x00ing') check('str\xc0\x80ing\xe2\x82\xac', u'str\x00ing\u20ac') check(u'str\x00ing') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with + empty string or tuple argument. + - Issue #21951: Tkinter now most likely raises MemoryError instead of crash if the memory allocation fails. diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -1052,6 +1052,8 @@ Py_ssize_t size, i; size = PyTuple_Size(value); + if (size == 0) + return Tcl_NewListObj(0, NULL); if (!CHECK_SIZE(size, sizeof(Tcl_Obj *))) { PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; @@ -1075,6 +1077,8 @@ Tcl_UniChar *outbuf = NULL; Py_ssize_t i; size_t allocsize; + if (size == 0) + return Tcl_NewUnicodeObj((const void *)"", 0); if (!CHECK_SIZE(size, sizeof(Tcl_UniChar))) { PyErr_SetString(PyExc_OverflowError, "string is too long"); return NULL; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:36 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:36 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxOTUx?= =?utf-8?q?=3A_Fixed_a_crash_in_Tkinter_on_AIX_when_called_Tcl_command_wit?= =?utf-8?q?h?= Message-ID: <3htt106zPCz7Ll8@mail.python.org> http://hg.python.org/cpython/rev/6a96c28f9474 changeset: 92398:6a96c28f9474 branch: 3.4 parent: 92395:1223c882253f user: Serhiy Storchaka date: Thu Sep 11 10:57:13 2014 +0300 summary: Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. On some platforms Tcl memory allocator returns NULL when allocating zero-sized block of memory. files: Lib/test/test_tcl.py | 3 ++- Misc/NEWS | 3 +++ Modules/_tkinter.c | 4 ++++ 3 files changed, 9 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -416,7 +416,6 @@ self.assertEqual(passValue((1, '2', (3.4,))), (1, '2', (3.4,)) if self.wantobjects else '1 2 3.4') - @unittest.skipIf(sys.platform.startswith("aix"), 'Issue #21951: crashes on AIX') def test_user_command(self): result = None def testfunc(arg): @@ -444,9 +443,11 @@ check('string') check('string\xbd') check('string\u20ac') + check('') check(b'string', 'string') check(b'string\xe2\x82\xac', 'string\xe2\x82\xac') check(b'string\xbd', 'string\xbd') + check(b'', '') check('str\x00ing') check('str\x00ing\xbd') check('str\x00ing\u20ac') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with + empty string or tuple argument. + - Issue #21951: Tkinter now most likely raises MemoryError instead of crash if the memory allocation fails. diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -899,6 +899,8 @@ Py_ssize_t size, i; size = PyTuple_Size(value); + if (size == 0) + return Tcl_NewListObj(0, NULL); if (!CHECK_SIZE(size, sizeof(Tcl_Obj *))) { PyErr_SetString(PyExc_OverflowError, "tuple is too long"); return NULL; @@ -925,6 +927,8 @@ inbuf = PyUnicode_DATA(value); size = PyUnicode_GET_LENGTH(value); + if (size == 0) + return Tcl_NewUnicodeObj((const void *)"", 0); if (!CHECK_SIZE(size, sizeof(Tcl_UniChar))) { PyErr_SetString(PyExc_OverflowError, "string is too long"); return NULL; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 10:04:38 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 10:04:38 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321951=3A_Fixed_a_crash_in_Tkinter_on_AIX_when_c?= =?utf-8?q?alled_Tcl_command_with?= Message-ID: <3htt121ddwz7Lkn@mail.python.org> http://hg.python.org/cpython/rev/7b7bae546959 changeset: 92399:7b7bae546959 parent: 92396:499b60b7d067 parent: 92398:6a96c28f9474 user: Serhiy Storchaka date: Thu Sep 11 10:58:02 2014 +0300 summary: Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. On some platforms Tcl memory allocator returns NULL when allocating zero-sized block of memory. files: Lib/test/test_tcl.py | 3 ++- Misc/NEWS | 3 +++ Modules/_tkinter.c | 4 ++++ 3 files changed, 9 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -418,7 +418,6 @@ self.assertEqual(passValue(['a', ['b', 'c']]), ('a', ('b', 'c')) if self.wantobjects else 'a {b c}') - @unittest.skipIf(sys.platform.startswith("aix"), 'Issue #21951: crashes on AIX') def test_user_command(self): result = None def testfunc(arg): @@ -446,9 +445,11 @@ check('string') check('string\xbd') check('string\u20ac') + check('') check(b'string', 'string') check(b'string\xe2\x82\xac', 'string\xe2\x82\xac') check(b'string\xbd', 'string\xbd') + check(b'', '') check('str\x00ing') check('str\x00ing\xbd') check('str\x00ing\u20ac') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with + empty string or tuple argument. + - Issue #21951: Tkinter now most likely raises MemoryError instead of crash if the memory allocation fails. diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -906,6 +906,8 @@ Py_ssize_t size, i; size = PySequence_Fast_GET_SIZE(value); + if (size == 0) + return Tcl_NewListObj(0, NULL); if (!CHECK_SIZE(size, sizeof(Tcl_Obj *))) { PyErr_SetString(PyExc_OverflowError, PyTuple_Check(value) ? "tuple is too long" : @@ -936,6 +938,8 @@ inbuf = PyUnicode_DATA(value); size = PyUnicode_GET_LENGTH(value); + if (size == 0) + return Tcl_NewUnicodeObj((const void *)"", 0); if (!CHECK_SIZE(size, sizeof(Tcl_UniChar))) { PyErr_SetString(PyExc_OverflowError, "string is too long"); return NULL; -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Thu Sep 11 10:42:17 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 11 Sep 2014 10:42:17 +0200 Subject: [Python-checkins] Daily reference leaks (135fc23e475c): sum=151924 Message-ID: results for 135fc23e475c on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [-4, 0, 0] references, sum=-4 test_collections leaked [-2, 0, 0] memory blocks, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 0] references, sum=-2 test_site leaked [-2, 0, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogb29k3s', '-x'] From python-checkins at python.org Thu Sep 11 11:23:11 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 11:23:11 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2313968=3A_The_glob?= =?utf-8?q?_module_now_supports_recursive_search_in?= Message-ID: <3htvlg2FK2z7LjS@mail.python.org> http://hg.python.org/cpython/rev/ff4b9d654691 changeset: 92400:ff4b9d654691 user: Serhiy Storchaka date: Thu Sep 11 12:17:37 2014 +0300 summary: Issue #13968: The glob module now supports recursive search in subdirectories using the "**" pattern. files: Doc/library/glob.rst | 24 ++++- Doc/whatsnew/3.5.rst | 7 + Lib/glob.py | 56 +++++++++++- Lib/test/test_glob.py | 128 +++++++++++++++++++++++++++-- Misc/NEWS | 3 + 5 files changed, 199 insertions(+), 19 deletions(-) diff --git a/Doc/library/glob.rst b/Doc/library/glob.rst --- a/Doc/library/glob.rst +++ b/Doc/library/glob.rst @@ -29,7 +29,7 @@ The :mod:`pathlib` module offers high-level path objects. -.. function:: glob(pathname) +.. function:: glob(pathname, *, recursive=False) Return a possibly-empty list of path names that match *pathname*, which must be a string containing a path specification. *pathname* can be either absolute @@ -37,8 +37,19 @@ :file:`../../Tools/\*/\*.gif`), and can contain shell-style wildcards. Broken symlinks are included in the results (as in the shell). + If *recursive* is true, the pattern "``**``" will match any files and zero or + more directories and subdirectories. If the pattern is followed by a + ``os.sep``, only directories and subdirectories match. -.. function:: iglob(pathname) + .. note:: + Using the "``**``" pattern in large directory trees may consume + an inordinate amount of time. + + .. versionchanged:: 3.5 + Support for recursive globs using "``**``". + + +.. function:: iglob(pathname, recursive=False) Return an :term:`iterator` which yields the same values as :func:`glob` without actually storing them all simultaneously. @@ -55,8 +66,9 @@ .. versionadded:: 3.4 -For example, consider a directory containing only the following files: -:file:`1.gif`, :file:`2.txt`, and :file:`card.gif`. :func:`glob` will produce +For example, consider a directory containing the following files: +:file:`1.gif`, :file:`2.txt`, :file:`card.gif` and a subdirectory :file:`sub` +which contains only the file :file:`3.txt`. :func:`glob` will produce the following results. Notice how any leading components of the path are preserved. :: @@ -67,6 +79,10 @@ ['1.gif', 'card.gif'] >>> glob.glob('?.gif') ['1.gif'] + >>> glob.glob('**/*.txt', recursive=True) + ['2.txt', 'sub/3.txt'] + >>> glob.glob('./**/', recursive=True) + ['./', './sub/'] If the directory contains files starting with ``.`` they won't be matched by default. For example, consider a directory containing :file:`card.gif` and diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -141,6 +141,13 @@ *module* contains no docstrings instead of raising :exc:`ValueError` (contributed by Glenn Jones in :issue:`15916`). +glob +---- + +* :func:`~glob.iglob` and :func:`~glob.glob` now support recursive search in + subdirectories using the "``**``" pattern. + (Contributed by Serhiy Storchaka in :issue:`13968`.) + imaplib ------- diff --git a/Lib/glob.py b/Lib/glob.py --- a/Lib/glob.py +++ b/Lib/glob.py @@ -6,7 +6,7 @@ __all__ = ["glob", "iglob"] -def glob(pathname): +def glob(pathname, *, recursive=False): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la @@ -14,10 +14,12 @@ dot are special cases that are not matched by '*' and '?' patterns. + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. """ - return list(iglob(pathname)) + return list(iglob(pathname, recursive=recursive)) -def iglob(pathname): +def iglob(pathname, *, recursive=False): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la @@ -25,6 +27,8 @@ dot are special cases that are not matched by '*' and '?' patterns. + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. """ dirname, basename = os.path.split(pathname) if not has_magic(pathname): @@ -37,17 +41,23 @@ yield pathname return if not dirname: - yield from glob1(None, basename) + if recursive and _isrecursive(basename): + yield from glob2(dirname, basename) + else: + yield from glob1(dirname, basename) return # `os.path.split()` returns the argument itself as a dirname if it is a # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): - dirs = iglob(dirname) + dirs = iglob(dirname, recursive=recursive) else: dirs = [dirname] if has_magic(basename): - glob_in_dir = glob1 + if recursive and _isrecursive(basename): + glob_in_dir = glob2 + else: + glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: @@ -83,6 +93,34 @@ return [basename] return [] +# This helper function recursively yields relative pathnames inside a literal +# directory. + +def glob2(dirname, pattern): + assert _isrecursive(pattern) + if dirname: + yield pattern[:0] + yield from _rlistdir(dirname) + +# Recursively yields relative pathnames inside a literal directory. + +def _rlistdir(dirname): + if not dirname: + if isinstance(dirname, bytes): + dirname = bytes(os.curdir, 'ASCII') + else: + dirname = os.curdir + try: + names = os.listdir(dirname) + except os.error: + return + for x in names: + if not _ishidden(x): + yield x + path = os.path.join(dirname, x) if dirname else x + for y in _rlistdir(path): + yield os.path.join(x, y) + magic_check = re.compile('([*?[])') magic_check_bytes = re.compile(b'([*?[])') @@ -97,6 +135,12 @@ def _ishidden(path): return path[0] in ('.', b'.'[0]) +def _isrecursive(pattern): + if isinstance(pattern, bytes): + return pattern == b'**' + else: + return pattern == '**' + def escape(pathname): """Escape all special characters. """ diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py --- a/Lib/test/test_glob.py +++ b/Lib/test/test_glob.py @@ -4,7 +4,7 @@ import sys import unittest -from test.support import (run_unittest, TESTFN, skip_unless_symlink, +from test.support import (TESTFN, skip_unless_symlink, can_symlink, create_empty_file) @@ -13,6 +13,9 @@ def norm(self, *parts): return os.path.normpath(os.path.join(self.tempdir, *parts)) + def joins(self, *tuples): + return [os.path.join(self.tempdir, *parts) for parts in tuples] + def mktemp(self, *parts): filename = self.norm(*parts) base, file = os.path.split(filename) @@ -38,17 +41,17 @@ def tearDown(self): shutil.rmtree(self.tempdir) - def glob(self, *parts): + def glob(self, *parts, **kwargs): if len(parts) == 1: pattern = parts[0] else: pattern = os.path.join(*parts) p = os.path.join(self.tempdir, pattern) - res = glob.glob(p) - self.assertEqual(list(glob.iglob(p)), res) + res = glob.glob(p, **kwargs) + self.assertEqual(list(glob.iglob(p, **kwargs)), res) bres = [os.fsencode(x) for x in res] - self.assertEqual(glob.glob(os.fsencode(p)), bres) - self.assertEqual(list(glob.iglob(os.fsencode(p))), bres) + self.assertEqual(glob.glob(os.fsencode(p), **kwargs), bres) + self.assertEqual(list(glob.iglob(os.fsencode(p), **kwargs)), bres) return res def assertSequencesEqual_noorder(self, l1, l2): @@ -192,9 +195,116 @@ check('//?/c:/?', '//?/c:/[?]') check('//*/*/*', '//*/*/[*]') -def test_main(): - run_unittest(GlobTests) + def rglob(self, *parts, **kwargs): + return self.glob(*parts, recursive=True, **kwargs) + + def test_recursive_glob(self): + eq = self.assertSequencesEqual_noorder + full = [('ZZZ',), + ('a',), ('a', 'D'), + ('a', 'bcd'), + ('a', 'bcd', 'EF'), + ('a', 'bcd', 'efg'), + ('a', 'bcd', 'efg', 'ha'), + ('aaa',), ('aaa', 'zzzF'), + ('aab',), ('aab', 'F'), + ] + if can_symlink(): + full += [('sym1',), ('sym2',), + ('sym3',), + ('sym3', 'EF'), + ('sym3', 'efg'), + ('sym3', 'efg', 'ha'), + ] + eq(self.rglob('**'), self.joins(('',), *full)) + eq(self.rglob('.', '**'), self.joins(('.',''), + *(('.',) + i for i in full))) + dirs = [('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''), + ('aaa', ''), ('aab', '')] + if can_symlink(): + dirs += [('sym3', ''), ('sym3', 'efg', '')] + eq(self.rglob('**', ''), self.joins(('',), *dirs)) + + eq(self.rglob('a', '**'), self.joins( + ('a', ''), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), + ('a', 'bcd', 'efg'), ('a', 'bcd', 'efg', 'ha'))) + eq(self.rglob('a**'), self.joins(('a',), ('aaa',), ('aab',))) + expect = [('a', 'bcd', 'EF')] + if can_symlink(): + expect += [('sym3', 'EF')] + eq(self.rglob('**', 'EF'), self.joins(*expect)) + expect = [('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F')] + if can_symlink(): + expect += [('sym3', 'EF')] + eq(self.rglob('**', '*F'), self.joins(*expect)) + eq(self.rglob('**', '*F', ''), []) + eq(self.rglob('**', 'bcd', '*'), self.joins( + ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'))) + eq(self.rglob('a', '**', 'bcd'), self.joins(('a', 'bcd'))) + + predir = os.path.abspath(os.curdir) + try: + os.chdir(self.tempdir) + join = os.path.join + eq(glob.glob('**', recursive=True), [join(*i) for i in full]) + eq(glob.glob(join('**', ''), recursive=True), + [join(*i) for i in dirs]) + eq(glob.glob(join('**','zz*F'), recursive=True), + [join('aaa', 'zzzF')]) + eq(glob.glob('**zz*F', recursive=True), []) + expect = [join('a', 'bcd', 'EF')] + if can_symlink(): + expect += [join('sym3', 'EF')] + eq(glob.glob(join('**', 'EF'), recursive=True), expect) + finally: + os.chdir(predir) + + + at skip_unless_symlink +class SymlinkLoopGlobTests(unittest.TestCase): + + def test_selflink(self): + tempdir = TESTFN + "_dir" + os.makedirs(tempdir) + create_empty_file(os.path.join(tempdir, 'file')) + os.symlink(os.curdir, os.path.join(tempdir, 'link')) + self.addCleanup(shutil.rmtree, tempdir) + + results = glob.glob('**', recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*([tempdir] + ['link'] * depth)) + self.assertIn(path, results) + results.remove(path) + if not results: + break + path = os.path.join(path, 'file') + self.assertIn(path, results) + results.remove(path) + depth += 1 + + results = glob.glob(os.path.join('**', 'file'), recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*([tempdir] + ['link'] * depth + ['file'])) + self.assertIn(path, results) + results.remove(path) + depth += 1 + + results = glob.glob(os.path.join('**', ''), recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*([tempdir] + ['link'] * depth + [''])) + self.assertIn(path, results) + results.remove(path) + depth += 1 if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #13968: The glob module now supports recursive search in + subdirectories using the "**" pattern. + - Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 12:33:39 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 12:33:39 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIxMTQ3?= =?utf-8?q?=3A_sqlite3_now_raises_an_exception_if_the_request_contains_a_n?= =?utf-8?q?ull?= Message-ID: <3htxJz6wSbz7LjY@mail.python.org> http://hg.python.org/cpython/rev/430865e9ea9f changeset: 92401:430865e9ea9f branch: 2.7 parent: 92397:d6c7ab5a2065 user: Serhiy Storchaka date: Thu Sep 11 13:27:19 2014 +0300 summary: Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. files: Lib/sqlite3/test/regression.py | 10 ++++++++++ Misc/NEWS | 3 +++ Modules/_sqlite/connection.c | 3 ++- Modules/_sqlite/statement.c | 7 +++++-- 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/Lib/sqlite3/test/regression.py b/Lib/sqlite3/test/regression.py --- a/Lib/sqlite3/test/regression.py +++ b/Lib/sqlite3/test/regression.py @@ -319,6 +319,16 @@ sqlite.connect, ":memory:", isolation_level=123) + def CheckNullCharacter(self): + # Issue #21147 + con = sqlite.connect(":memory:") + self.assertRaises(ValueError, con, "\0select 1") + self.assertRaises(ValueError, con, "select 1\0") + cur = con.cursor() + self.assertRaises(ValueError, cur.execute, " \0select 2") + self.assertRaises(ValueError, cur.execute, "select 2\0") + + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #21147: sqlite3 now raises an exception if the request contains a null + character instead of truncate it. Based on patch by Victor Stinner. + - Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. diff --git a/Modules/_sqlite/connection.c b/Modules/_sqlite/connection.c --- a/Modules/_sqlite/connection.c +++ b/Modules/_sqlite/connection.c @@ -1215,7 +1215,8 @@ if (rc == PYSQLITE_TOO_MUCH_SQL) { PyErr_SetString(pysqlite_Warning, "You can only execute one statement at a time."); } else if (rc == PYSQLITE_SQL_WRONG_TYPE) { - PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string or unicode."); + if (!PyErr_Occurred() || PyErr_ExceptionMatches(PyExc_TypeError)) + PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string or unicode."); } else { (void)pysqlite_statement_reset(statement); _pysqlite_seterror(self->db, NULL); diff --git a/Modules/_sqlite/statement.c b/Modules/_sqlite/statement.c --- a/Modules/_sqlite/statement.c +++ b/Modules/_sqlite/statement.c @@ -74,12 +74,15 @@ rc = PYSQLITE_SQL_WRONG_TYPE; return rc; } + sql_cstr = PyString_AsString(sql_str); + if (strlen(sql_cstr) != (size_t)PyString_GET_SIZE(sql_str)) { + PyErr_SetString(PyExc_ValueError, "the query contains a null character"); + return PYSQLITE_SQL_WRONG_TYPE; + } self->in_weakreflist = NULL; self->sql = sql_str; - sql_cstr = PyString_AsString(sql_str); - Py_BEGIN_ALLOW_THREADS rc = sqlite3_prepare(connection->db, sql_cstr, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 12:33:41 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 12:33:41 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxMTQ3?= =?utf-8?q?=3A_sqlite3_now_raises_an_exception_if_the_request_contains_a_n?= =?utf-8?q?ull?= Message-ID: <3htxK11gWHz7Ljn@mail.python.org> http://hg.python.org/cpython/rev/517f216d45ea changeset: 92402:517f216d45ea branch: 3.4 parent: 92398:6a96c28f9474 user: Serhiy Storchaka date: Thu Sep 11 13:29:05 2014 +0300 summary: Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. files: Lib/sqlite3/test/regression.py | 10 ++++++++++ Misc/NEWS | 3 +++ Modules/_sqlite/connection.c | 3 ++- Modules/_sqlite/statement.c | 4 ++++ 4 files changed, 19 insertions(+), 1 deletions(-) diff --git a/Lib/sqlite3/test/regression.py b/Lib/sqlite3/test/regression.py --- a/Lib/sqlite3/test/regression.py +++ b/Lib/sqlite3/test/regression.py @@ -336,6 +336,16 @@ sqlite.connect, ":memory:", isolation_level=123) + def CheckNullCharacter(self): + # Issue #21147 + con = sqlite.connect(":memory:") + self.assertRaises(ValueError, con, "\0select 1") + self.assertRaises(ValueError, con, "select 1\0") + cur = con.cursor() + self.assertRaises(ValueError, cur.execute, " \0select 2") + self.assertRaises(ValueError, cur.execute, "select 2\0") + + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #21147: sqlite3 now raises an exception if the request contains a null + character instead of truncate it. Based on patch by Victor Stinner. + - Issue #21951: Fixed a crash in Tkinter on AIX when called Tcl command with empty string or tuple argument. diff --git a/Modules/_sqlite/connection.c b/Modules/_sqlite/connection.c --- a/Modules/_sqlite/connection.c +++ b/Modules/_sqlite/connection.c @@ -1261,7 +1261,8 @@ if (rc == PYSQLITE_TOO_MUCH_SQL) { PyErr_SetString(pysqlite_Warning, "You can only execute one statement at a time."); } else if (rc == PYSQLITE_SQL_WRONG_TYPE) { - PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string or unicode."); + if (PyErr_ExceptionMatches(PyExc_TypeError)) + PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string."); } else { (void)pysqlite_statement_reset(statement); _pysqlite_seterror(self->db, NULL); diff --git a/Modules/_sqlite/statement.c b/Modules/_sqlite/statement.c --- a/Modules/_sqlite/statement.c +++ b/Modules/_sqlite/statement.c @@ -63,6 +63,10 @@ rc = PYSQLITE_SQL_WRONG_TYPE; return rc; } + if (strlen(sql_cstr) != (size_t)sql_cstr_len) { + PyErr_SetString(PyExc_ValueError, "the query contains a null character"); + return PYSQLITE_SQL_WRONG_TYPE; + } self->in_weakreflist = NULL; Py_INCREF(sql); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 12:33:42 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 12:33:42 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321147=3A_sqlite3_now_raises_an_exception_if_the?= =?utf-8?q?_request_contains_a_null?= Message-ID: <3htxK23mgZz7Lk2@mail.python.org> http://hg.python.org/cpython/rev/b81f5652c2d7 changeset: 92403:b81f5652c2d7 parent: 92400:ff4b9d654691 parent: 92402:517f216d45ea user: Serhiy Storchaka date: Thu Sep 11 13:30:48 2014 +0300 summary: Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. files: Lib/sqlite3/test/regression.py | 10 ++++++++++ Misc/NEWS | 3 +++ Modules/_sqlite/connection.c | 3 ++- Modules/_sqlite/statement.c | 4 ++++ 4 files changed, 19 insertions(+), 1 deletions(-) diff --git a/Lib/sqlite3/test/regression.py b/Lib/sqlite3/test/regression.py --- a/Lib/sqlite3/test/regression.py +++ b/Lib/sqlite3/test/regression.py @@ -336,6 +336,16 @@ sqlite.connect, ":memory:", isolation_level=123) + def CheckNullCharacter(self): + # Issue #21147 + con = sqlite.connect(":memory:") + self.assertRaises(ValueError, con, "\0select 1") + self.assertRaises(ValueError, con, "select 1\0") + cur = con.cursor() + self.assertRaises(ValueError, cur.execute, " \0select 2") + self.assertRaises(ValueError, cur.execute, "select 2\0") + + def suite(): regression_suite = unittest.makeSuite(RegressionTests, "Check") return unittest.TestSuite((regression_suite,)) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #21147: sqlite3 now raises an exception if the request contains a null + character instead of truncate it. Based on patch by Victor Stinner. + - Issue #13968: The glob module now supports recursive search in subdirectories using the "**" pattern. diff --git a/Modules/_sqlite/connection.c b/Modules/_sqlite/connection.c --- a/Modules/_sqlite/connection.c +++ b/Modules/_sqlite/connection.c @@ -1261,7 +1261,8 @@ if (rc == PYSQLITE_TOO_MUCH_SQL) { PyErr_SetString(pysqlite_Warning, "You can only execute one statement at a time."); } else if (rc == PYSQLITE_SQL_WRONG_TYPE) { - PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string or unicode."); + if (PyErr_ExceptionMatches(PyExc_TypeError)) + PyErr_SetString(pysqlite_Warning, "SQL is of wrong type. Must be string."); } else { (void)pysqlite_statement_reset(statement); _pysqlite_seterror(self->db, NULL); diff --git a/Modules/_sqlite/statement.c b/Modules/_sqlite/statement.c --- a/Modules/_sqlite/statement.c +++ b/Modules/_sqlite/statement.c @@ -63,6 +63,10 @@ rc = PYSQLITE_SQL_WRONG_TYPE; return rc; } + if (strlen(sql_cstr) != (size_t)sql_cstr_len) { + PyErr_SetString(PyExc_ValueError, "the query contains a null character"); + return PYSQLITE_SQL_WRONG_TYPE; + } self->in_weakreflist = NULL; Py_INCREF(sql); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 14:34:48 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 11 Sep 2014 14:34:48 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2313968=3A_Fixed_ne?= =?utf-8?q?wly_added_recursive_glob_test=2E?= Message-ID: <3hv00m3m8Kz7Ljl@mail.python.org> http://hg.python.org/cpython/rev/180f5bf7d1b9 changeset: 92404:180f5bf7d1b9 user: Serhiy Storchaka date: Thu Sep 11 14:33:02 2014 +0300 summary: Issue #13968: Fixed newly added recursive glob test. It was failed when run with non-empty current directory. files: Lib/test/test_glob.py | 72 +++++++++++++++--------------- 1 files changed, 37 insertions(+), 35 deletions(-) diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py --- a/Lib/test/test_glob.py +++ b/Lib/test/test_glob.py @@ -5,7 +5,7 @@ import unittest from test.support import (TESTFN, skip_unless_symlink, - can_symlink, create_empty_file) + can_symlink, create_empty_file, change_cwd) class GlobTests(unittest.TestCase): @@ -266,44 +266,46 @@ def test_selflink(self): tempdir = TESTFN + "_dir" os.makedirs(tempdir) - create_empty_file(os.path.join(tempdir, 'file')) - os.symlink(os.curdir, os.path.join(tempdir, 'link')) self.addCleanup(shutil.rmtree, tempdir) + with change_cwd(tempdir): + os.makedirs('dir') + create_empty_file(os.path.join('dir', 'file')) + os.symlink(os.curdir, os.path.join('dir', 'link')) - results = glob.glob('**', recursive=True) - self.assertEqual(len(results), len(set(results))) - results = set(results) - depth = 0 - while results: - path = os.path.join(*([tempdir] + ['link'] * depth)) - self.assertIn(path, results) - results.remove(path) - if not results: - break - path = os.path.join(path, 'file') - self.assertIn(path, results) - results.remove(path) - depth += 1 + results = glob.glob('**', recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*(['dir'] + ['link'] * depth)) + self.assertIn(path, results) + results.remove(path) + if not results: + break + path = os.path.join(path, 'file') + self.assertIn(path, results) + results.remove(path) + depth += 1 - results = glob.glob(os.path.join('**', 'file'), recursive=True) - self.assertEqual(len(results), len(set(results))) - results = set(results) - depth = 0 - while results: - path = os.path.join(*([tempdir] + ['link'] * depth + ['file'])) - self.assertIn(path, results) - results.remove(path) - depth += 1 + results = glob.glob(os.path.join('**', 'file'), recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*(['dir'] + ['link'] * depth + ['file'])) + self.assertIn(path, results) + results.remove(path) + depth += 1 - results = glob.glob(os.path.join('**', ''), recursive=True) - self.assertEqual(len(results), len(set(results))) - results = set(results) - depth = 0 - while results: - path = os.path.join(*([tempdir] + ['link'] * depth + [''])) - self.assertIn(path, results) - results.remove(path) - depth += 1 + results = glob.glob(os.path.join('**', ''), recursive=True) + self.assertEqual(len(results), len(set(results))) + results = set(results) + depth = 0 + while results: + path = os.path.join(*(['dir'] + ['link'] * depth + [''])) + self.assertIn(path, results) + results.remove(path) + depth += 1 if __name__ == "__main__": -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 17:52:37 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 11 Sep 2014 17:52:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2322336=3A_attempt?= =?utf-8?q?ckalloc=28=29_with_PyMem=5FMalloc=28=29_in_=5Ftkinter?= Message-ID: <3hv4P10mvsz7Ljc@mail.python.org> http://hg.python.org/cpython/rev/9f1d3e6e6ce6 changeset: 92405:9f1d3e6e6ce6 user: Victor Stinner date: Thu Sep 11 17:50:21 2014 +0200 summary: Closes #22336: attemptckalloc() with PyMem_Malloc() in _tkinter The PyMem_Malloc(size) function has a well defined behaviour: if size is 0, a pointer different than NULL is returned. PyMem_Malloc() allocations are tracked by tracemalloc, attemptckalloc() allocations are not tracked. files: Modules/_tkinter.c | 24 ++++++++++++------------ 1 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -605,7 +605,7 @@ Tcl_SetVar(v->interp, "tcl_interactive", "0", TCL_GLOBAL_ONLY); /* This is used to get the application class for Tk 4.1 and up */ - argv0 = (char*)attemptckalloc(strlen(className) + 1); + argv0 = (char*)PyMem_Malloc(strlen(className) + 1); if (!argv0) { PyErr_NoMemory(); Py_DECREF(v); @@ -616,7 +616,7 @@ if (Py_ISUPPER(Py_CHARMASK(argv0[0]))) argv0[0] = Py_TOLOWER(Py_CHARMASK(argv0[0])); Tcl_SetVar(v->interp, "argv0", argv0, TCL_GLOBAL_ONLY); - ckfree(argv0); + PyMem_Free(argv0); if (! wantTk) { Tcl_SetVar(v->interp, @@ -639,7 +639,7 @@ if (use) len += strlen(use) + sizeof "-use "; - args = (char*)attemptckalloc(len); + args = (char*)PyMem_Malloc(len); if (!args) { PyErr_NoMemory(); Py_DECREF(v); @@ -657,7 +657,7 @@ } Tcl_SetVar(v->interp, "argv", args, TCL_GLOBAL_ONLY); - ckfree(args); + PyMem_Free(args); } if (Tcl_AppInit(v->interp) != TCL_OK) { @@ -914,15 +914,15 @@ "list is too long"); return NULL; } - argv = (Tcl_Obj **) attemptckalloc(((size_t)size) * sizeof(Tcl_Obj *)); - if(!argv) { + argv = (Tcl_Obj **) PyMem_Malloc(((size_t)size) * sizeof(Tcl_Obj *)); + if (!argv) { PyErr_NoMemory(); return NULL; } for (i = 0; i < size; i++) argv[i] = AsObj(PySequence_Fast_GET_ITEM(value,i)); result = Tcl_NewListObj(size, argv); - ckfree(FREECAST argv); + PyMem_Free(argv); return result; } else if (PyUnicode_Check(value)) { @@ -948,7 +948,7 @@ if (kind == sizeof(Tcl_UniChar)) return Tcl_NewUnicodeObj(inbuf, size); allocsize = ((size_t)size) * sizeof(Tcl_UniChar); - outbuf = (Tcl_UniChar*)attemptckalloc(allocsize); + outbuf = (Tcl_UniChar*)PyMem_Malloc(allocsize); /* Else overflow occurred, and we take the next exit */ if (!outbuf) { PyErr_NoMemory(); @@ -965,14 +965,14 @@ "character U+%x is above the range " "(U+0000-U+FFFF) allowed by Tcl", ch); - ckfree(FREECAST outbuf); + PyMem_Free(outbuf); return NULL; } #endif outbuf[i] = ch; } result = Tcl_NewUnicodeObj(outbuf, size); - ckfree(FREECAST outbuf); + PyMem_Free(outbuf); return result; } else if(PyTclObject_Check(value)) { @@ -1084,7 +1084,7 @@ for (i = 0; i < objc; i++) Tcl_DecrRefCount(objv[i]); if (objv != objStore) - ckfree(FREECAST objv); + PyMem_Free(objv); } /* Convert Python objects to Tcl objects. This must happen in the @@ -1115,7 +1115,7 @@ "list is too long"); return NULL; } - objv = (Tcl_Obj **)attemptckalloc(((size_t)objc) * sizeof(Tcl_Obj *)); + objv = (Tcl_Obj **)PyMem_Malloc(((size_t)objc) * sizeof(Tcl_Obj *)); if (objv == NULL) { PyErr_NoMemory(); objc = 0; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 11 17:53:30 2014 From: python-checkins at python.org (victor.stinner) Date: Thu, 11 Sep 2014 17:53:30 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogdGVzdF9kaXN0dXRp?= =?utf-8?q?ls=3A_remove_the_test_file_in_DistributionTestCase=2Etest=5Fdeb?= =?utf-8?b?dWdfbW9kZSgp?= Message-ID: <3hv4Q216TQz7Ljc@mail.python.org> http://hg.python.org/cpython/rev/97f1ee2264bb changeset: 92406:97f1ee2264bb branch: 2.7 parent: 92401:430865e9ea9f user: Victor Stinner date: Thu Sep 11 17:52:58 2014 +0200 summary: test_distutils: remove the test file in DistributionTestCase.test_debug_mode() Fix the warning: "test_support.TESTFN was modified by test_distutils". files: Lib/distutils/tests/test_dist.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/distutils/tests/test_dist.py b/Lib/distutils/tests/test_dist.py --- a/Lib/distutils/tests/test_dist.py +++ b/Lib/distutils/tests/test_dist.py @@ -11,7 +11,7 @@ from distutils.dist import Distribution, fix_help_options from distutils.cmd import Command import distutils.dist -from test.test_support import TESTFN, captured_stdout, run_unittest +from test.test_support import TESTFN, captured_stdout, run_unittest, unlink from distutils.tests import support @@ -64,6 +64,7 @@ with open(TESTFN, "w") as f: f.write("[global]\n") f.write("command_packages = foo.bar, splat") + self.addCleanup(unlink, TESTFN) files = [TESTFN] sys.argv.append("build") -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 00:09:01 2014 From: python-checkins at python.org (vinay.sajip) Date: Fri, 12 Sep 2014 00:09:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzg2?= =?utf-8?q?=3A_fixed_regression=2E?= Message-ID: <3hvDlK3G4nz7LjQ@mail.python.org> http://hg.python.org/cpython/rev/a4c5effb8698 changeset: 92407:a4c5effb8698 branch: 3.4 parent: 92402:517f216d45ea user: Vinay Sajip date: Thu Sep 11 23:06:09 2014 +0100 summary: Issue #22386: fixed regression. files: Doc/library/logging.rst | 4 ++-- Lib/logging/__init__.py | 3 ++- Lib/test/test_logging.py | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -1052,8 +1052,8 @@ .. versionchanged:: 3.4 In Python versions earlier than 3.4, this function could also be passed a text level, and would return the corresponding numeric value of the level. - This undocumented behaviour was a mistake, and has been removed in Python - 3.4. + This undocumented behaviour was considered a mistake, and was removed in + Python 3.4, but reinstated in 3.4.2 due to retain backward compatibility. .. function:: makeLogRecord(attrdict) diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -129,7 +129,8 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelToName.get(level, ("Level %s" % level)) + # See Issue #22386 for the reason for this convoluted expression + return _levelToName.get(level, _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -313,6 +313,10 @@ ('INF.BADPARENT', 'INFO', '4'), ]) + def test_regression_22386(self): + """See issue #22386 for more information.""" + self.assertEqual(logging.getLevelName('INFO'), logging.INFO) + self.assertEqual(logging.getLevelName(logging.INFO), 'INFO') class BasicFilterTest(BaseTest): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 00:09:02 2014 From: python-checkins at python.org (vinay.sajip) Date: Fri, 12 Sep 2014 00:09:02 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2322386=3A_merged_fix_from_3=2E4=2E?= Message-ID: <3hvDlL51Mbz7LjQ@mail.python.org> http://hg.python.org/cpython/rev/070fed5b7b9d changeset: 92408:070fed5b7b9d parent: 92405:9f1d3e6e6ce6 parent: 92407:a4c5effb8698 user: Vinay Sajip date: Thu Sep 11 23:08:48 2014 +0100 summary: Closes #22386: merged fix from 3.4. files: Doc/library/logging.rst | 4 ++-- Lib/logging/__init__.py | 3 ++- Lib/test/test_logging.py | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -1052,8 +1052,8 @@ .. versionchanged:: 3.4 In Python versions earlier than 3.4, this function could also be passed a text level, and would return the corresponding numeric value of the level. - This undocumented behaviour was a mistake, and has been removed in Python - 3.4. + This undocumented behaviour was considered a mistake, and was removed in + Python 3.4, but reinstated in 3.4.2 due to retain backward compatibility. .. function:: makeLogRecord(attrdict) diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -129,7 +129,8 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelToName.get(level, ("Level %s" % level)) + # See Issue #22386 for the reason for this convoluted expression + return _levelToName.get(level, _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -307,6 +307,10 @@ ('INF.BADPARENT', 'INFO', '4'), ]) + def test_regression_22386(self): + """See issue #22386 for more information.""" + self.assertEqual(logging.getLevelName('INFO'), logging.INFO) + self.assertEqual(logging.getLevelName(logging.INFO), 'INFO') class BasicFilterTest(BaseTest): -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Fri Sep 12 10:37:05 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 12 Sep 2014 10:37:05 +0200 Subject: [Python-checkins] Daily reference leaks (070fed5b7b9d): sum=151989 Message-ID: results for 070fed5b7b9d on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_multiprocessing_spawn leaked [0, 0, 38] references, sum=38 test_multiprocessing_spawn leaked [0, 0, 17] memory blocks, sum=17 test_site leaked [-2, 0, 2] references, sum=0 test_site leaked [-2, 0, 2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogPI4Ne1', '-x'] From python-checkins at python.org Fri Sep 12 16:40:06 2014 From: python-checkins at python.org (brett.cannon) Date: Fri, 12 Sep 2014 16:40:06 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2316104=3A_Allow_co?= =?utf-8?q?mpileall_to_do_parallel_bytecode_compilation=2E?= Message-ID: <3hvfkt6rw2z7LjX@mail.python.org> http://hg.python.org/cpython/rev/9efefcab817e changeset: 92409:9efefcab817e user: Brett Cannon date: Fri Sep 12 10:39:48 2014 -0400 summary: Issue #16104: Allow compileall to do parallel bytecode compilation. Both compileall.compile_dir() and the CLI for compileall now allow for specifying how many workers to use (or 0 to use all CPUs). Thanks to Claudiu Popa for the patch. files: Doc/library/compileall.rst | 19 +++++- Doc/whatsnew/3.5.rst | 7 ++ Lib/compileall.py | 85 +++++++++++++++++------- Lib/test/test_compileall.py | 57 ++++++++++++++++ 4 files changed, 140 insertions(+), 28 deletions(-) diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst --- a/Doc/library/compileall.rst +++ b/Doc/library/compileall.rst @@ -73,12 +73,18 @@ :program:`python -m compileall -r 0` is equivalent to :program:`python -m compileall -l`. +.. cmdoption:: -j N + + Use *N* workers to compile the files within the given directory. + If ``0`` is used, then the result of :func:`os.cpu_count()` + will be used. .. versionchanged:: 3.2 Added the ``-i``, ``-b`` and ``-h`` options. .. versionchanged:: 3.5 - Added the ``-r`` option. + Added the ``-j`` and ``-r`` options. + There is no command-line option to control the optimization level used by the :func:`compile` function, because the Python interpreter itself already @@ -87,7 +93,7 @@ Public functions ---------------- -.. function:: compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, quiet=False, legacy=False, optimize=-1) +.. function:: compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, quiet=False, legacy=False, optimize=-1, workers=1) Recursively descend the directory tree named by *dir*, compiling all :file:`.py` files along the way. @@ -120,9 +126,18 @@ *optimize* specifies the optimization level for the compiler. It is passed to the built-in :func:`compile` function. + The argument *workers* specifies how many workers are used to + compile files in parallel. The default is to not use multiple workers. + If the platform can't use multiple workers and *workers* argument is given, + then a :exc:`NotImplementedError` will be raised. + If *workers* is lower than ``0``, a :exc:`ValueError` will be raised. + .. versionchanged:: 3.2 Added the *legacy* and *optimize* parameter. + .. versionchanged:: 3.5 + Added the *workers* parameter. + .. function:: compile_file(fullname, ddir=None, force=False, rx=None, quiet=False, legacy=False, optimize=-1) diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -134,6 +134,13 @@ Improved Modules ================ +compileall +---------- + +* :func:`compileall.compile_dir` and :mod:`compileall`'s command-line interface + can now do parallel bytecode compilation. + (Contributed by Claudiu Popa in :issue:`16104`). + doctest ------- diff --git a/Lib/compileall.py b/Lib/compileall.py --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -16,10 +16,40 @@ import py_compile import struct +try: + from concurrent.futures import ProcessPoolExecutor +except ImportError: + ProcessPoolExecutor = None +from functools import partial + __all__ = ["compile_dir","compile_file","compile_path"] +def _walk_dir(dir, ddir=None, maxlevels=10, quiet=False): + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if ddir is not None: + dfile = os.path.join(ddir, name) + else: + dfile = None + if not os.path.isdir(fullname): + yield fullname + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, ddir=dfile, + maxlevels=maxlevels - 1, quiet=quiet) + def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, - quiet=False, legacy=False, optimize=-1): + quiet=False, legacy=False, optimize=-1, workers=1): """Byte-compile all modules in the given directory tree. Arguments (only dir is required): @@ -32,33 +62,31 @@ quiet: if True, be quiet during compilation legacy: if True, produce legacy pyc paths instead of PEP 3147 paths optimize: optimization level or -1 for level of the interpreter + workers: maximum number of parallel workers """ - if not quiet: - print('Listing {!r}...'.format(dir)) - try: - names = os.listdir(dir) - except OSError: - print("Can't list {!r}".format(dir)) - names = [] - names.sort() + files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels, + ddir=ddir) success = 1 - for name in names: - if name == '__pycache__': - continue - fullname = os.path.join(dir, name) - if ddir is not None: - dfile = os.path.join(ddir, name) - else: - dfile = None - if not os.path.isdir(fullname): - if not compile_file(fullname, ddir, force, rx, quiet, + if workers is not None and workers != 1: + if workers < 0: + raise ValueError('workers must be greater or equal to 0') + if ProcessPoolExecutor is None: + raise NotImplementedError('multiprocessing support not available') + + workers = workers or None + with ProcessPoolExecutor(max_workers=workers) as executor: + results = executor.map(partial(compile_file, + ddir=ddir, force=force, + rx=rx, quiet=quiet, + legacy=legacy, + optimize=optimize), + files) + success = min(results, default=1) + else: + for file in files: + if not compile_file(file, ddir, force, rx, quiet, legacy, optimize): success = 0 - elif (maxlevels > 0 and name != os.curdir and name != os.pardir and - os.path.isdir(fullname) and not os.path.islink(fullname)): - if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, - quiet, legacy, optimize): - success = 0 return success def compile_file(fullname, ddir=None, force=False, rx=None, quiet=False, @@ -196,8 +224,10 @@ help=('zero or more file and directory names ' 'to compile; if no arguments given, defaults ' 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + args = parser.parse_args() - compile_dests = args.compile_dest if (args.ddir and (len(compile_dests) != 1 @@ -223,6 +253,9 @@ print("Error reading file list {}".format(args.flist)) return False + if args.workers is not None: + args.workers = args.workers or None + success = True try: if compile_dests: @@ -234,7 +267,7 @@ else: if not compile_dir(dest, maxlevels, args.ddir, args.force, args.rx, args.quiet, - args.legacy): + args.legacy, workers=args.workers): success = False return success else: diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -10,6 +10,13 @@ import unittest import io +from unittest import mock, skipUnless +try: + from concurrent.futures import ProcessPoolExecutor + _have_multiprocessing = True +except ImportError: + _have_multiprocessing = False + from test import support, script_helper class CompileallTests(unittest.TestCase): @@ -106,6 +113,33 @@ debug_override=not optimize) self.assertTrue(os.path.isfile(cached3)) + @mock.patch('compileall.ProcessPoolExecutor') + def test_compile_pool_called(self, pool_mock): + compileall.compile_dir(self.directory, quiet=True, workers=5) + self.assertTrue(pool_mock.called) + + def test_compile_workers_non_positive(self): + with self.assertRaisesRegex(ValueError, + "workers must be greater or equal to 0"): + compileall.compile_dir(self.directory, workers=-1) + + @mock.patch('compileall.ProcessPoolExecutor') + def test_compile_workers_cpu_count(self, pool_mock): + compileall.compile_dir(self.directory, quiet=True, workers=0) + self.assertEqual(pool_mock.call_args[1]['max_workers'], None) + + @mock.patch('compileall.ProcessPoolExecutor') + @mock.patch('compileall.compile_file') + def test_compile_one_worker(self, compile_file_mock, pool_mock): + compileall.compile_dir(self.directory, quiet=True) + self.assertFalse(pool_mock.called) + self.assertTrue(compile_file_mock.called) + + @mock.patch('compileall.ProcessPoolExecutor', new=None) + def test_compile_missing_multiprocessing(self): + with self.assertRaisesRegex(NotImplementedError, + "multiprocessing support not available"): + compileall.compile_dir(self.directory, quiet=True, workers=5) class EncodingTest(unittest.TestCase): """Issue 6716: compileall should escape source code when printing errors @@ -413,6 +447,29 @@ out = self.assertRunOK('badfilename') self.assertRegex(out, b"Can't list 'badfilename'") + @skipUnless(_have_multiprocessing, "requires multiprocessing") + def test_workers(self): + bar2fn = script_helper.make_script(self.directory, 'bar2', '') + files = [] + for suffix in range(5): + pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix)) + os.mkdir(pkgdir) + fn = script_helper.make_script(pkgdir, '__init__', '') + files.append(script_helper.make_script(pkgdir, 'bar2', '')) + + self.assertRunOK(self.directory, '-j', '0') + self.assertCompiled(bar2fn) + for file in files: + self.assertCompiled(file) + + @mock.patch('compileall.compile_dir') + def test_workers_available_cores(self, compile_dir): + with mock.patch("sys.argv", + new=[sys.executable, self.directory, "-j0"]): + compileall.main() + self.assertTrue(compile_dir.called) + self.assertEqual(compile_dir.call_args[-1]['workers'], None) + if __name__ == "__main__": unittest.main() -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 16:41:26 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 16:41:26 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?hooks=3A_remove_trailing_whitespace?= Message-ID: <3hvfmQ0bm9z7Lkj@mail.python.org> http://hg.python.org/hooks/rev/f329305f5d37 changeset: 89:f329305f5d37 user: Benjamin Peterson date: Fri Sep 12 10:40:23 2014 -0400 summary: remove trailing whitespace files: hgbuildbot.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/hgbuildbot.py b/hgbuildbot.py --- a/hgbuildbot.py +++ b/hgbuildbot.py @@ -48,7 +48,7 @@ s = sendchange.Sender(master) d = defer.Deferred() reactor.callLater(0, d.callback, None) - + def send(res, c): return s.send(**c) for change in changes: -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Fri Sep 12 16:41:27 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 16:41:27 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?hooks=3A_buildbot_is_now_installed_in?= =?utf-8?q?_the_system_python?= Message-ID: <3hvfmR2Zhwz7Llf@mail.python.org> http://hg.python.org/hooks/rev/1d809ed028de changeset: 90:1d809ed028de user: Benjamin Peterson date: Fri Sep 12 10:41:21 2014 -0400 summary: buildbot is now installed in the system python files: hgbuildbot.py | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/hgbuildbot.py b/hgbuildbot.py --- a/hgbuildbot.py +++ b/hgbuildbot.py @@ -38,8 +38,6 @@ from twisted.internet import defer, reactor -sys.path.append('/data/buildbot/lib/python') - def sendchanges(ui, master, changes): # send change information to one master -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Fri Sep 12 20:13:37 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 20:13:37 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?hooks=3A_add_support_for_logging_in?= Message-ID: <3hvlTF23Bfz7Ljg@mail.python.org> http://hg.python.org/hooks/rev/2a814d66b1e9 changeset: 91:2a814d66b1e9 user: Benjamin Peterson date: Fri Sep 12 17:56:46 2014 +0000 summary: add support for logging in files: mail.py | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/mail.py b/mail.py --- a/mail.py +++ b/mail.py @@ -151,6 +151,9 @@ host = ui.config('smtp', 'host', '') port = int(ui.config('smtp', 'port', 0)) smtp = smtplib.SMTP(host, port) + username = ui.config('smtp', 'username', '') + if username: + smtp.login(username, ui.config('smtp', 'password', '')) send(smtp, subj, sender, to, '\n'.join(body) + '\n') smtp.close() -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Fri Sep 12 21:33:56 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 19:33:56 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_difficult_choices?= Message-ID: <20140912183308.117577.1337@mail.hg.python.org> http://hg.python.org/test/rev/3145d7515eac changeset: 211:3145d7515eac user: Benjamin Peterson date: Fri Sep 12 14:33:05 2014 -0400 summary: difficult choices files: a | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/a b/a --- a/a +++ b/a @@ -1,4 +1,4 @@ -aaaaaaaaaaa +aaaaaaaaaa aabaa baaah (said the sheep) a2aa -- Repository URL: http://hg.python.org/test From python-checkins at python.org Fri Sep 12 21:35:39 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 19:35:39 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_this_file_is_boring?= Message-ID: <20140912193537.116507.62424@mail.hg.python.org> http://hg.python.org/test/rev/9043cf080599 changeset: 214:9043cf080599 user: Benjamin Peterson date: Fri Sep 12 15:35:35 2014 -0400 summary: this file is boring files: cc | 9 --------- 1 files changed, 0 insertions(+), 9 deletions(-) diff --git a/cc b/cc deleted file mode 100644 --- a/cc +++ /dev/null @@ -1,9 +0,0 @@ -ccc -c -c -c -c -c -c -c -c -- Repository URL: http://hg.python.org/test From python-checkins at python.org Fri Sep 12 21:48:12 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 12 Sep 2014 21:48:12 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?cpython=3A_inspect=2ESignature=3A_Fix?= =?utf-8?b?IGRpc2NyZXBhbmN5IGJldHdlZW4gX19lcV9fIGFuZCBfX2hhc2hfXy4=?= Message-ID: <3hvnZN1qw4z7Ljf@mail.python.org> http://hg.python.org/cpython/rev/3b974b61e74d changeset: 92410:3b974b61e74d user: Yury Selivanov date: Fri Sep 12 15:48:02 2014 -0400 summary: inspect.Signature: Fix discrepancy between __eq__ and __hash__. Issue #20334. Thanks to Antony Lee for bug report & initial patch. files: Lib/inspect.py | 55 +++++++-------------------- Lib/test/test_inspect.py | 24 ++++++++++++ Misc/NEWS | 1 + 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/Lib/inspect.py b/Lib/inspect.py --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -2239,14 +2239,7 @@ id(self), self) def __hash__(self): - hash_tuple = (self.name, int(self.kind)) - - if self._annotation is not _empty: - hash_tuple += (self._annotation,) - if self._default is not _empty: - hash_tuple += (self._default,) - - return hash(hash_tuple) + return hash((self.name, self.kind, self.annotation, self.default)) def __eq__(self, other): return (issubclass(other.__class__, Parameter) and @@ -2541,41 +2534,23 @@ return type(self)(parameters, return_annotation=return_annotation) + def _hash_basis(self): + params = tuple(param for param in self.parameters.values() + if param.kind != _KEYWORD_ONLY) + + kwo_params = {param.name: param for param in self.parameters.values() + if param.kind == _KEYWORD_ONLY} + + return params, kwo_params, self.return_annotation + def __hash__(self): - hash_tuple = tuple(self.parameters.values()) - if self._return_annotation is not _empty: - hash_tuple += (self._return_annotation,) - return hash(hash_tuple) + params, kwo_params, return_annotation = self._hash_basis() + kwo_params = frozenset(kwo_params.values()) + return hash((params, kwo_params, return_annotation)) def __eq__(self, other): - if (not issubclass(type(other), Signature) or - self.return_annotation != other.return_annotation or - len(self.parameters) != len(other.parameters)): - return False - - other_positions = {param: idx - for idx, param in enumerate(other.parameters.keys())} - - for idx, (param_name, param) in enumerate(self.parameters.items()): - if param.kind == _KEYWORD_ONLY: - try: - other_param = other.parameters[param_name] - except KeyError: - return False - else: - if param != other_param: - return False - else: - try: - other_idx = other_positions[param_name] - except KeyError: - return False - else: - if (idx != other_idx or - param != other.parameters[param_name]): - return False - - return True + return (isinstance(other, Signature) and + self._hash_basis() == other._hash_basis()) def __ne__(self, other): return not self.__eq__(other) diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py --- a/Lib/test/test_inspect.py +++ b/Lib/test/test_inspect.py @@ -2535,43 +2535,67 @@ def bar(a, *, b:int) -> float: pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int) -> int: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int): pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int=42) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, c) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, b:int) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def spam(b:int, a) -> float: pass self.assertNotEqual(inspect.signature(spam), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(spam)), hash(inspect.signature(bar))) def foo(*, a, b, c): pass def bar(*, c, b, a): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(*, a=1, b, c): pass def bar(*, c, b, a=1): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *, a=1, b, c): pass def bar(pos, *, c, b, a=1): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *, a, b, c): pass def bar(pos, *, c, b, a=1): pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *args, a=42, b, c, **kwargs:int): pass def bar(pos, *args, c, b, a=42, **kwargs:int): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def test_signature_hashable(self): S = inspect.Signature diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -814,6 +814,7 @@ keyword-only. - Issue #20334: inspect.Signature and inspect.Parameter are now hashable. + Thanks to Antony Lee for bug reports and suggestions. - Issue #15916: doctest.DocTestSuite returns an empty unittest.TestSuite instead of raising ValueError if it finds no tests -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 22:19:01 2014 From: python-checkins at python.org (steve.dower) Date: Fri, 12 Sep 2014 22:19:01 +0200 (CEST) Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogIzIyMzk4IFRvb2xz?= =?utf-8?q?/msi_enhancements_for_2=2E7?= Message-ID: <3hvpFx0KDTz7Ljg@mail.python.org> http://hg.python.org/cpython/rev/5c55a7bfec0c changeset: 92411:5c55a7bfec0c branch: 2.7 parent: 92406:97f1ee2264bb user: Steve Dower date: Fri Sep 12 11:48:13 2014 -0700 summary: #22398 Tools/msi enhancements for 2.7 Fix build_tkinter.py. Update msi.py to use environment vars and correct tcl/tk paths. Update msilib.py to generate short names for files with multiple dots in the name. files: PCbuild/build_tkinter.py | 2 +- Tools/msi/msi.py | 14 +++++++------- Tools/msi/msilib.py | 7 +------ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/PCbuild/build_tkinter.py b/PCbuild/build_tkinter.py --- a/PCbuild/build_tkinter.py +++ b/PCbuild/build_tkinter.py @@ -23,7 +23,7 @@ '%s %s') def nmake(makefile, command="", **kw): - defines = ' '.join(k+'='+v for k, v in kw.items()) + defines = ' '.join('%s=%s' % i for i in kw.items()) cmd = NMAKE % (makefile, defines, command) print("\n\n"+cmd+"\n") if os.system(cmd) != 0: diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py --- a/Tools/msi/msi.py +++ b/Tools/msi/msi.py @@ -13,7 +13,7 @@ # 0 for official python.org releases # 1 for intermediate releases by anybody, with # a new product code for every package. -snapshot = 1 +snapshot = int(os.environ.get("SNAPSHOT", "1")) # 1 means that file extension is px, not py, # and binaries start with x testpackage = 0 @@ -22,15 +22,15 @@ # Text to be displayed as the version in dialogs etc. # goes into file name and ProductCode. Defaults to # current_version.day for Snapshot, current_version otherwise -full_current_version = None +full_current_version = os.environ.get("CURRENT_VERSION") # Is Tcl available at all? have_tcl = True # path to PCbuild directory -PCBUILD="PCbuild" +PCBUILD=os.environ.get("PCBUILD", "PCbuild") # msvcrt version MSVCR = "90" # Name of certificate in default store to sign MSI with -certname = None +certname = os.environ.get("CERTNAME", None) # Make a zip file containing the PDB files for this build? pdbzip = True @@ -894,8 +894,8 @@ for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), ("Berkeley DB", "db-*", "LICENSE"), ("openssl", "openssl-*", "LICENSE"), - ("Tcl", "tcl8*", "license.terms"), - ("Tk", "tk8*", "license.terms"), + ("Tcl", "tcl-8*", "license.terms"), + ("Tk", "tk-8*", "license.terms"), ("Tix", "tix-*", "license.terms")): out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name) dirs = glob.glob(srcdir+"/../"+pat) @@ -946,7 +946,7 @@ if not snapshot: # For releases, the Python DLL has the same version as the # installer package. - assert pyversion.split(".")[:3] == current_version.split(".") + assert pyversion.split(".")[:3] == current_version.split("."), "%s != %s" % (pyversion, current_version) dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor), version=pyversion, language=installer.FileVersion(pydllsrc, 1)) diff --git a/Tools/msi/msilib.py b/Tools/msi/msilib.py --- a/Tools/msi/msilib.py +++ b/Tools/msi/msilib.py @@ -484,12 +484,7 @@ def make_short(self, file): file = re.sub(r'[\?|><:/*"+,;=\[\]]', '_', file) # restrictions on short names - parts = file.split(".") - if len(parts)>1: - suffix = parts[-1].upper() - else: - suffix = None - prefix = parts[0].upper() + prefix, _, suffix = file.upper().rpartition(".") if len(prefix) <= 8 and (not suffix or len(suffix)<=3): if suffix: file = prefix+"."+suffix -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 22:20:05 2014 From: python-checkins at python.org (local-hg) Date: Fri, 12 Sep 2014 20:20:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogIzIyMzk4IFRvb2xz?= =?utf-8?q?/msi_enhancements_for_2=2E7?= Message-ID: <20140912202003.117591.91060@mail.hg.python.org> http://hg.python.org/cpython/rev/5c55a7bfec0c changeset: 92411:5c55a7bfec0c branch: 2.7 parent: 92406:97f1ee2264bb user: Steve Dower date: Fri Sep 12 11:48:13 2014 -0700 summary: #22398 Tools/msi enhancements for 2.7 Fix build_tkinter.py. Update msi.py to use environment vars and correct tcl/tk paths. Update msilib.py to generate short names for files with multiple dots in the name. files: PCbuild/build_tkinter.py | 2 +- Tools/msi/msi.py | 14 +++++++------- Tools/msi/msilib.py | 7 +------ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/PCbuild/build_tkinter.py b/PCbuild/build_tkinter.py --- a/PCbuild/build_tkinter.py +++ b/PCbuild/build_tkinter.py @@ -23,7 +23,7 @@ '%s %s') def nmake(makefile, command="", **kw): - defines = ' '.join(k+'='+v for k, v in kw.items()) + defines = ' '.join('%s=%s' % i for i in kw.items()) cmd = NMAKE % (makefile, defines, command) print("\n\n"+cmd+"\n") if os.system(cmd) != 0: diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py --- a/Tools/msi/msi.py +++ b/Tools/msi/msi.py @@ -13,7 +13,7 @@ # 0 for official python.org releases # 1 for intermediate releases by anybody, with # a new product code for every package. -snapshot = 1 +snapshot = int(os.environ.get("SNAPSHOT", "1")) # 1 means that file extension is px, not py, # and binaries start with x testpackage = 0 @@ -22,15 +22,15 @@ # Text to be displayed as the version in dialogs etc. # goes into file name and ProductCode. Defaults to # current_version.day for Snapshot, current_version otherwise -full_current_version = None +full_current_version = os.environ.get("CURRENT_VERSION") # Is Tcl available at all? have_tcl = True # path to PCbuild directory -PCBUILD="PCbuild" +PCBUILD=os.environ.get("PCBUILD", "PCbuild") # msvcrt version MSVCR = "90" # Name of certificate in default store to sign MSI with -certname = None +certname = os.environ.get("CERTNAME", None) # Make a zip file containing the PDB files for this build? pdbzip = True @@ -894,8 +894,8 @@ for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), ("Berkeley DB", "db-*", "LICENSE"), ("openssl", "openssl-*", "LICENSE"), - ("Tcl", "tcl8*", "license.terms"), - ("Tk", "tk8*", "license.terms"), + ("Tcl", "tcl-8*", "license.terms"), + ("Tk", "tk-8*", "license.terms"), ("Tix", "tix-*", "license.terms")): out.write("\nThis copy of Python includes a copy of %s, which is licensed under the following terms:\n\n" % name) dirs = glob.glob(srcdir+"/../"+pat) @@ -946,7 +946,7 @@ if not snapshot: # For releases, the Python DLL has the same version as the # installer package. - assert pyversion.split(".")[:3] == current_version.split(".") + assert pyversion.split(".")[:3] == current_version.split("."), "%s != %s" % (pyversion, current_version) dlldir.add_file("%s/python%s%s.dll" % (PCBUILD, major, minor), version=pyversion, language=installer.FileVersion(pydllsrc, 1)) diff --git a/Tools/msi/msilib.py b/Tools/msi/msilib.py --- a/Tools/msi/msilib.py +++ b/Tools/msi/msilib.py @@ -484,12 +484,7 @@ def make_short(self, file): file = re.sub(r'[\?|><:/*"+,;=\[\]]', '_', file) # restrictions on short names - parts = file.split(".") - if len(parts)>1: - suffix = parts[-1].upper() - else: - suffix = None - prefix = parts[0].upper() + prefix, _, suffix = file.upper().rpartition(".") if len(prefix) <= 8 and (not suffix or len(suffix)<=3): if suffix: file = prefix+"."+suffix -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 22:20:05 2014 From: python-checkins at python.org (local-hg) Date: Fri, 12 Sep 2014 20:20:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_inspect=2ESignature=3A_Fix?= =?utf-8?b?IGRpc2NyZXBhbmN5IGJldHdlZW4gX19lcV9fIGFuZCBfX2hhc2hfXy4=?= Message-ID: <20140912202002.2954.38926@mail.hg.python.org> http://hg.python.org/cpython/rev/3b974b61e74d changeset: 92410:3b974b61e74d user: Yury Selivanov date: Fri Sep 12 15:48:02 2014 -0400 summary: inspect.Signature: Fix discrepancy between __eq__ and __hash__. Issue #20334. Thanks to Antony Lee for bug report & initial patch. files: Lib/inspect.py | 55 +++++++-------------------- Lib/test/test_inspect.py | 24 ++++++++++++ Misc/NEWS | 1 + 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/Lib/inspect.py b/Lib/inspect.py --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -2239,14 +2239,7 @@ id(self), self) def __hash__(self): - hash_tuple = (self.name, int(self.kind)) - - if self._annotation is not _empty: - hash_tuple += (self._annotation,) - if self._default is not _empty: - hash_tuple += (self._default,) - - return hash(hash_tuple) + return hash((self.name, self.kind, self.annotation, self.default)) def __eq__(self, other): return (issubclass(other.__class__, Parameter) and @@ -2541,41 +2534,23 @@ return type(self)(parameters, return_annotation=return_annotation) + def _hash_basis(self): + params = tuple(param for param in self.parameters.values() + if param.kind != _KEYWORD_ONLY) + + kwo_params = {param.name: param for param in self.parameters.values() + if param.kind == _KEYWORD_ONLY} + + return params, kwo_params, self.return_annotation + def __hash__(self): - hash_tuple = tuple(self.parameters.values()) - if self._return_annotation is not _empty: - hash_tuple += (self._return_annotation,) - return hash(hash_tuple) + params, kwo_params, return_annotation = self._hash_basis() + kwo_params = frozenset(kwo_params.values()) + return hash((params, kwo_params, return_annotation)) def __eq__(self, other): - if (not issubclass(type(other), Signature) or - self.return_annotation != other.return_annotation or - len(self.parameters) != len(other.parameters)): - return False - - other_positions = {param: idx - for idx, param in enumerate(other.parameters.keys())} - - for idx, (param_name, param) in enumerate(self.parameters.items()): - if param.kind == _KEYWORD_ONLY: - try: - other_param = other.parameters[param_name] - except KeyError: - return False - else: - if param != other_param: - return False - else: - try: - other_idx = other_positions[param_name] - except KeyError: - return False - else: - if (idx != other_idx or - param != other.parameters[param_name]): - return False - - return True + return (isinstance(other, Signature) and + self._hash_basis() == other._hash_basis()) def __ne__(self, other): return not self.__eq__(other) diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py --- a/Lib/test/test_inspect.py +++ b/Lib/test/test_inspect.py @@ -2535,43 +2535,67 @@ def bar(a, *, b:int) -> float: pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int) -> int: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int): pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, b:int=42) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, *, c) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def bar(a, b:int) -> float: pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def spam(b:int, a) -> float: pass self.assertNotEqual(inspect.signature(spam), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(spam)), hash(inspect.signature(bar))) def foo(*, a, b, c): pass def bar(*, c, b, a): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(*, a=1, b, c): pass def bar(*, c, b, a=1): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *, a=1, b, c): pass def bar(pos, *, c, b, a=1): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *, a, b, c): pass def bar(pos, *, c, b, a=1): pass self.assertNotEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertNotEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def foo(pos, *args, a=42, b, c, **kwargs:int): pass def bar(pos, *args, c, b, a=42, **kwargs:int): pass self.assertEqual(inspect.signature(foo), inspect.signature(bar)) + self.assertEqual( + hash(inspect.signature(foo)), hash(inspect.signature(bar))) def test_signature_hashable(self): S = inspect.Signature diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -814,6 +814,7 @@ keyword-only. - Issue #20334: inspect.Signature and inspect.Parameter are now hashable. + Thanks to Antony Lee for bug reports and suggestions. - Issue #15916: doctest.DocTestSuite returns an empty unittest.TestSuite instead of raising ValueError if it finds no tests -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 12 22:30:51 2014 From: python-checkins at python.org (local-hg) Date: Fri, 12 Sep 2014 20:30:51 +0000 Subject: [Python-checkins] =?utf-8?q?hooks=3A_allow_smtp_username_and_pass?= =?utf-8?q?word?= Message-ID: <20140912203047.117581.20279@mail.hg.python.org> http://hg.python.org/hooks/rev/181e6ea1e92a changeset: 92:181e6ea1e92a user: Benjamin Peterson date: Fri Sep 12 20:30:47 2014 +0000 summary: allow smtp username and password files: hgroundup.py | 14 ++++++++------ 1 files changed, 8 insertions(+), 6 deletions(-) diff --git a/hgroundup.py b/hgroundup.py --- a/hgroundup.py +++ b/hgroundup.py @@ -22,7 +22,6 @@ [hgroundup] fromaddr = roundup-user at example.com toaddr = roundup-admin at example.com - mailrelay = 127.0.0.1 `fromaddr` must be registered as the address of an existing Roundup user, otherwise Roundup will refuse and bounce the message. @@ -69,9 +68,6 @@ repourl = posixpath.join(ui.config('web', 'baseurl'), 'rev/') fromaddr = ui.config('hgroundup', 'fromaddr') toaddr = ui.config('hgroundup', 'toaddr') - mailrelay = ui.config('hgroundup', 'mailrelay', default='') - if not mailrelay: - mailrelay = ui.config('smtp', 'host', default='') for var in ('repourl', 'fromaddr', 'toaddr'): if not locals()[var]: raise RuntimeError( @@ -103,6 +99,13 @@ }) add_comment(issues, data, comment) if issues: + smtp_host = ui.config('smtp', 'host', default='localhost') + smtp_port = int(ui.config('smtp', 'port', 25)) + s = smtplib.SMTP(smtp_host, smtp_port) + username = ui.config('smtp', 'username', '') + if username: + password = ui.config('smtp', 'password', '') + s.login(username, password) try: send_comments(mailrelay, fromaddr, toaddr, issues) ui.status("sent email to roundup at " + toaddr + '\n') @@ -129,9 +132,8 @@ 'stage': 'resolved', }) -def send_comments(mailrelay, fromaddr, toaddr, issues): +def send_comments(s, fromaddr, toaddr, issues): """Update the Roundup issue with a comment and changeset link.""" - s = smtplib.SMTP(mailrelay) try: for issue_id, data in issues.iteritems(): props = '' -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Fri Sep 12 22:32:28 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 12 Sep 2014 22:32:28 +0200 (CEST) Subject: [Python-checkins] =?utf-8?q?hooks=3A_allow_smtp_username_and_pass?= =?utf-8?q?word?= Message-ID: <3hvpYS5dphz7LjR@mail.python.org> http://hg.python.org/hooks/rev/181e6ea1e92a changeset: 92:181e6ea1e92a user: Benjamin Peterson date: Fri Sep 12 20:30:47 2014 +0000 summary: allow smtp username and password files: hgroundup.py | 14 ++++++++------ 1 files changed, 8 insertions(+), 6 deletions(-) diff --git a/hgroundup.py b/hgroundup.py --- a/hgroundup.py +++ b/hgroundup.py @@ -22,7 +22,6 @@ [hgroundup] fromaddr = roundup-user at example.com toaddr = roundup-admin at example.com - mailrelay = 127.0.0.1 `fromaddr` must be registered as the address of an existing Roundup user, otherwise Roundup will refuse and bounce the message. @@ -69,9 +68,6 @@ repourl = posixpath.join(ui.config('web', 'baseurl'), 'rev/') fromaddr = ui.config('hgroundup', 'fromaddr') toaddr = ui.config('hgroundup', 'toaddr') - mailrelay = ui.config('hgroundup', 'mailrelay', default='') - if not mailrelay: - mailrelay = ui.config('smtp', 'host', default='') for var in ('repourl', 'fromaddr', 'toaddr'): if not locals()[var]: raise RuntimeError( @@ -103,6 +99,13 @@ }) add_comment(issues, data, comment) if issues: + smtp_host = ui.config('smtp', 'host', default='localhost') + smtp_port = int(ui.config('smtp', 'port', 25)) + s = smtplib.SMTP(smtp_host, smtp_port) + username = ui.config('smtp', 'username', '') + if username: + password = ui.config('smtp', 'password', '') + s.login(username, password) try: send_comments(mailrelay, fromaddr, toaddr, issues) ui.status("sent email to roundup at " + toaddr + '\n') @@ -129,9 +132,8 @@ 'stage': 'resolved', }) -def send_comments(mailrelay, fromaddr, toaddr, issues): +def send_comments(s, fromaddr, toaddr, issues): """Update the Roundup issue with a comment and changeset link.""" - s = smtplib.SMTP(mailrelay) try: for issue_id, data in issues.iteritems(): props = '' -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Sat Sep 13 03:19:54 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 13 Sep 2014 01:19:54 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_suggest_checking_out_sour?= =?utf-8?q?ce_over_https?= Message-ID: <20140913011952.2942.89926@mail.hg.python.org> http://hg.python.org/devguide/rev/2bd115e03a1b changeset: 712:2bd115e03a1b user: Benjamin Peterson date: Fri Sep 12 21:19:49 2014 -0400 summary: suggest checking out source over https files: setup.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/setup.rst b/setup.rst --- a/setup.rst +++ b/setup.rst @@ -46,7 +46,7 @@ CPython (core developers use a different URL as outlined in :ref:`coredev`), run:: - hg clone http://hg.python.org/cpython + hg clone https://hg.python.org/cpython If you want a working copy of an already-released version of Python, i.e., a version in :ref:`maintenance mode `, you can update your -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Sat Sep 13 07:46:00 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 13 Sep 2014 05:46:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_make_hg_link_s?= =?utf-8?q?ecure?= Message-ID: <20140913054600.2952.54511@mail.hg.python.org> http://hg.python.org/cpython/rev/429acfbef89c changeset: 92413:429acfbef89c branch: 3.4 parent: 92407:a4c5effb8698 user: Benjamin Peterson date: Sat Sep 13 01:44:34 2014 -0400 summary: make hg link secure files: Doc/tools/sphinxext/pyspecific.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -10,7 +10,7 @@ """ ISSUE_URI = 'http://bugs.python.org/issue%s' -SOURCE_URI = 'http://hg.python.org/cpython/file/3.4/%s' +SOURCE_URI = 'https://hg.python.org/cpython/file/3.4/%s' from docutils import nodes, utils -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 13 07:46:00 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 13 Sep 2014 05:46:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_make_hg_link_s?= =?utf-8?q?ecure?= Message-ID: <20140913054600.126873.4569@mail.hg.python.org> http://hg.python.org/cpython/rev/301c672ceca6 changeset: 92412:301c672ceca6 branch: 2.7 user: Benjamin Peterson date: Sat Sep 13 01:44:34 2014 -0400 summary: make hg link secure files: Doc/tools/sphinxext/pyspecific.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -10,7 +10,7 @@ """ ISSUE_URI = 'http://bugs.python.org/issue%s' -SOURCE_URI = 'http://hg.python.org/cpython/file/2.7/%s' +SOURCE_URI = 'https://hg.python.org/cpython/file/2.7/%s' from docutils import nodes, utils -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sat Sep 13 07:46:00 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 13 Sep 2014 05:46:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140913054600.116483.43017@mail.hg.python.org> http://hg.python.org/cpython/rev/f5cde9c5ef60 changeset: 92414:f5cde9c5ef60 parent: 92410:3b974b61e74d parent: 92413:429acfbef89c user: Benjamin Peterson date: Sat Sep 13 01:45:50 2014 -0400 summary: merge 3.4 files: Doc/tools/sphinxext/pyspecific.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -10,7 +10,7 @@ """ ISSUE_URI = 'http://bugs.python.org/issue%s' -SOURCE_URI = 'http://hg.python.org/cpython/file/default/%s' +SOURCE_URI = 'https://hg.python.org/cpython/file/default/%s' from docutils import nodes, utils -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Sat Sep 13 10:41:59 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 13 Sep 2014 10:41:59 +0200 Subject: [Python-checkins] Daily reference leaks (3b974b61e74d): sum=151936 Message-ID: results for 3b974b61e74d on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [-2, 0, 0] references, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 2] references, sum=2 test_site leaked [2, -2, 2] memory blocks, sum=2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/refloghzn2ST', '-x'] From python-checkins at python.org Sun Sep 14 08:41:14 2014 From: python-checkins at python.org (ned.deily) Date: Sun, 14 Sep 2014 06:41:14 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMTY4?= =?utf-8?q?=3A_Prevent_turtle_AttributeError_with_non-default_Canvas_on_OS?= =?utf-8?b?IFgu?= Message-ID: <20140914064112.2946.10107@mail.hg.python.org> http://hg.python.org/cpython/rev/fac17d06e01d changeset: 92415:fac17d06e01d branch: 3.4 parent: 92413:429acfbef89c user: Ned Deily date: Sat Sep 13 23:39:16 2014 -0700 summary: Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. files: Lib/turtle.py | 5 +++-- Misc/NEWS | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/turtle.py b/Lib/turtle.py --- a/Lib/turtle.py +++ b/Lib/turtle.py @@ -997,8 +997,9 @@ # Force Turtle window to the front on OS X. This is needed because # the Turtle window will show behind the Terminal window when you # start the demo from the command line. - cv._rootwindow.call('wm', 'attributes', '.', '-topmost', '1') - cv._rootwindow.call('wm', 'attributes', '.', '-topmost', '0') + rootwindow = cv.winfo_toplevel() + rootwindow.call('wm', 'attributes', '.', '-topmost', '1') + rootwindow.call('wm', 'attributes', '.', '-topmost', '0') def clear(self): """Delete all drawings and all turtles from the TurtleScreen. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,8 @@ Library ------- +- Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. + - Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 08:41:14 2014 From: python-checkins at python.org (ned.deily) Date: Sun, 14 Sep 2014 06:41:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322168=3A_Prevent_turtle_AttributeError_with_non?= =?utf-8?q?-default_Canvas_on_OS_X=2E?= Message-ID: <20140914064112.72566.75580@mail.hg.python.org> http://hg.python.org/cpython/rev/775453a7b85d changeset: 92416:775453a7b85d parent: 92414:f5cde9c5ef60 parent: 92415:fac17d06e01d user: Ned Deily date: Sat Sep 13 23:40:27 2014 -0700 summary: Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. files: Lib/turtle.py | 5 +++-- Misc/NEWS | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/turtle.py b/Lib/turtle.py --- a/Lib/turtle.py +++ b/Lib/turtle.py @@ -997,8 +997,9 @@ # Force Turtle window to the front on OS X. This is needed because # the Turtle window will show behind the Terminal window when you # start the demo from the command line. - cv._rootwindow.call('wm', 'attributes', '.', '-topmost', '1') - cv._rootwindow.call('wm', 'attributes', '.', '-topmost', '0') + rootwindow = cv.winfo_toplevel() + rootwindow.call('wm', 'attributes', '.', '-topmost', '1') + rootwindow.call('wm', 'attributes', '.', '-topmost', '0') def clear(self): """Delete all drawings and all turtles from the TurtleScreen. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,8 @@ Library ------- +- Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. + - Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Sun Sep 14 09:46:42 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 14 Sep 2014 09:46:42 +0200 Subject: [Python-checkins] Daily reference leaks (f5cde9c5ef60): sum=151934 Message-ID: results for f5cde9c5ef60 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 0] references, sum=0 test_site leaked [2, -2, 0] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogOwoOZH', '-x'] From python-checkins at python.org Sun Sep 14 12:29:42 2014 From: python-checkins at python.org (georg.brandl) Date: Sun, 14 Sep 2014 10:29:42 +0000 Subject: [Python-checkins] =?utf-8?q?hooks=3A_Fix_refactoring_oversight=2E?= Message-ID: <20140914102941.2946.92914@mail.hg.python.org> http://hg.python.org/hooks/rev/48575ba56bb3 changeset: 93:48575ba56bb3 user: Georg Brandl date: Sun Sep 14 12:29:37 2014 +0200 summary: Fix refactoring oversight. files: hgroundup.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/hgroundup.py b/hgroundup.py --- a/hgroundup.py +++ b/hgroundup.py @@ -107,7 +107,7 @@ password = ui.config('smtp', 'password', '') s.login(username, password) try: - send_comments(mailrelay, fromaddr, toaddr, issues) + send_comments(s, fromaddr, toaddr, issues) ui.status("sent email to roundup at " + toaddr + '\n') except Exception, err: # make sure an issue updating roundup does not prevent an -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Sun Sep 14 12:34:12 2014 From: python-checkins at python.org (georg.brandl) Date: Sun, 14 Sep 2014 10:34:12 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_Closes_=232771=3A_test=2E?= Message-ID: <20140914103410.116507.86698@mail.hg.python.org> http://hg.python.org/test/rev/8510224e05dc changeset: 215:8510224e05dc user: Georg Brandl date: Sun Sep 14 12:32:47 2014 +0200 summary: Closes #2771: test. files: n | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/n b/n --- a/n +++ b/n @@ -1,1 +1,1 @@ -n +nnnnoooooooooo!!! -- Repository URL: http://hg.python.org/test From python-checkins at python.org Sun Sep 14 12:38:25 2014 From: python-checkins at python.org (georg.brandl) Date: Sun, 14 Sep 2014 10:38:25 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_=232771=3A_test_baseurl_chang?= =?utf-8?q?e=2E?= Message-ID: <20140914103824.116505.20126@mail.hg.python.org> http://hg.python.org/test/rev/e79d1244d887 changeset: 216:e79d1244d887 user: Georg Brandl date: Sun Sep 14 12:38:20 2014 +0200 summary: #2771: test baseurl change. files: n | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/n b/n --- a/n +++ b/n @@ -1,1 +1,2 @@ nnnnoooooooooo!!! +yes. -- Repository URL: http://hg.python.org/test From python-checkins at python.org Sun Sep 14 15:44:01 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 13:44:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fixed_re_tests?= =?utf-8?q?_incorrectly_ported_from_2=2Ex_to_3=2Ex=2E?= Message-ID: <20140914134401.117593.33408@mail.hg.python.org> http://hg.python.org/cpython/rev/16636d6ffc25 changeset: 92417:16636d6ffc25 branch: 3.4 parent: 92415:fac17d06e01d user: Serhiy Storchaka date: Sun Sep 14 15:56:27 2014 +0300 summary: Fixed re tests incorrectly ported from 2.x to 3.x. files: Lib/test/test_re.py | 49 ++++++++++++++++++-------------- 1 files changed, 27 insertions(+), 22 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -480,29 +480,37 @@ self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", + "abcd abc bcd bx", re.ASCII).group(1), "bx") + self.assertEqual(re.search(r"\B(b.)\B", + "abc bcd bc abxd", re.ASCII).group(1), "bx") + self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") - self.assertEqual(re.search(r"\b(b.)\b", - "abcd abc bcd bx", re.UNICODE).group(1), "bx") - self.assertEqual(re.search(r"\B(b.)\B", - "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) - self.assertEqual(re.search(r"\b(b.)\b", - "abcd abc bcd bx").group(1), "bx") - self.assertEqual(re.search(r"\B(b.)\B", - "abc bcd bc abxd").group(1), "bx") - self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) + self.assertEqual(re.search(br"\b(b.)\b", + b"abcd abc bcd bx").group(1), b"bx") + self.assertEqual(re.search(br"\B(b.)\B", + b"abc bcd bc abxd").group(1), b"bx") + self.assertEqual(re.search(br"\b(b.)\b", + b"abcd abc bcd bx", re.LOCALE).group(1), b"bx") + self.assertEqual(re.search(br"\B(b.)\B", + b"abc bcd bc abxd", re.LOCALE).group(1), b"bx") + self.assertEqual(re.search(br"^abc$", b"\nabc\n", re.M).group(0), b"abc") + self.assertEqual(re.search(br"^\Aabc\Z$", b"abc", re.M).group(0), b"abc") + self.assertEqual(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M), None) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") + self.assertEqual(re.search(br"\d\D\w\W\s\S", + b"1aa! a").group(0), b"1aa! a") + self.assertEqual(re.search(r"\d\D\w\W\s\S", + "1aa! a", re.ASCII).group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") - self.assertEqual(re.search(r"\d\D\w\W\s\S", - "1aa! a", re.UNICODE).group(0), "1aa! a") + self.assertEqual(re.search(br"\d\D\w\W\s\S", + b"1aa! a", re.LOCALE).group(0), b"1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 @@ -533,11 +541,8 @@ def test_bigcharset(self): self.assertEqual(re.match("([\u2222\u2223])", "\u2222").group(1), "\u2222") - self.assertEqual(re.match("([\u2222\u2223])", - "\u2222", re.UNICODE).group(1), "\u2222") r = '[%s]' % ''.join(map(chr, range(256, 2**16, 255))) - self.assertEqual(re.match(r, - "\uff01", re.UNICODE).group(), "\uff01") + self.assertEqual(re.match(r, "\uff01").group(), "\uff01") def test_big_codesize(self): # Issue #1160 @@ -567,7 +572,7 @@ def test_ignore_case(self): self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") - self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") + self.assertEqual(re.match(b"abc", b"ABC", re.I).group(0), b"ABC") self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b") @@ -587,7 +592,7 @@ self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") - self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") + self.assertEqual(re.match(b"abc", b"ABC", re.I).group(0), b"ABC") def test_not_literal(self): self.assertEqual(re.search("\s([^a])", " b").group(1), "b") @@ -901,7 +906,7 @@ re.compile(b'bug_926075')) def test_bug_931848(self): - pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"') + pattern = "[\u002E\u3002\uFF0E\uFF61]" self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) @@ -1020,9 +1025,9 @@ self.assertEqual(pat.match('\xe0'), None) # Bytes patterns for flags in (0, re.ASCII): - pat = re.compile(b'\xc0', re.IGNORECASE) + pat = re.compile(b'\xc0', flags | re.IGNORECASE) self.assertEqual(pat.match(b'\xe0'), None) - pat = re.compile(b'\w') + pat = re.compile(b'\w', flags) self.assertEqual(pat.match(b'\xe0'), None) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 15:44:03 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 13:44:03 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Use_more_appropriate_asserts_in_re_tests=2E?= Message-ID: <20140914134402.2936.24378@mail.hg.python.org> http://hg.python.org/cpython/rev/6cdb7981eb0f changeset: 92421:6cdb7981eb0f parent: 92418:1d73a0d1ce02 parent: 92420:13acbb8939a8 user: Serhiy Storchaka date: Sun Sep 14 16:21:27 2014 +0300 summary: Use more appropriate asserts in re tests. files: Lib/test/test_re.py | 230 ++++++++++++++++---------------- 1 files changed, 115 insertions(+), 115 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -59,12 +59,12 @@ self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) - self.assertEqual(re.search('x', 'aaa'), None) + self.assertIsNone(re.search('x', 'aaa')) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) - self.assertEqual(re.match('a+', 'xxx'), None) + self.assertIsNone(re.match('a+', 'xxx')) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) @@ -384,8 +384,8 @@ ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', 'a)')) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', '(a')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), @@ -401,8 +401,8 @@ ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) - self.assertEqual(p.match('abd'), None) - self.assertEqual(p.match('ac'), None) + self.assertIsNone(p.match('abd')) + self.assertIsNone(p.match('ac')) def test_re_groupref(self): @@ -410,8 +410,8 @@ ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', 'a|')) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', '|a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), @@ -429,10 +429,10 @@ "second first second first") def test_repeat_minmax(self): - self.assertEqual(re.match("^(\w){1}$", "abc"), None) - self.assertEqual(re.match("^(\w){1}?$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) + self.assertIsNone(re.match("^(\w){1}$", "abc")) + self.assertIsNone(re.match("^(\w){1}?$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}?$", "abc")) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") @@ -443,22 +443,22 @@ self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") - self.assertEqual(re.match("^x{1}$", "xxx"), None) - self.assertEqual(re.match("^x{1}?$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) + self.assertIsNone(re.match("^x{1}$", "xxx")) + self.assertIsNone(re.match("^x{1}?$", "xxx")) + self.assertIsNone(re.match("^x{1,2}$", "xxx")) + self.assertIsNone(re.match("^x{1,2}?$", "xxx")) - self.assertNotEqual(re.match("^x{3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) + self.assertTrue(re.match("^x{3}$", "xxx")) + self.assertTrue(re.match("^x{1,3}$", "xxx")) + self.assertTrue(re.match("^x{1,4}$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) + self.assertTrue(re.match("^x{3}?$", "xxx")) + self.assertTrue(re.match("^x{1,3}?$", "xxx")) + self.assertTrue(re.match("^x{1,4}?$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) - self.assertEqual(re.match("^x{}$", "xxx"), None) - self.assertNotEqual(re.match("^x{}$", "x{}"), None) + self.assertIsNone(re.match("^x{}$", "xxx")) + self.assertTrue(re.match("^x{}$", "x{}")) def test_getattr(self): self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)") @@ -472,7 +472,7 @@ self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) - self.assertNotEqual(re.match("(a)", "a").re, None) + self.assertTrue(re.match("(a)", "a").re) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", @@ -489,7 +489,7 @@ "abc bcd bc abxd", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) + self.assertIsNone(re.search(r"^\Aabc\Z$", "\nabc\n", re.M)) self.assertEqual(re.search(br"\b(b.)\b", b"abcd abc bcd bx").group(1), b"bx") self.assertEqual(re.search(br"\B(b.)\B", @@ -500,7 +500,7 @@ b"abc bcd bc abxd", re.LOCALE).group(1), b"bx") self.assertEqual(re.search(br"^abc$", b"\nabc\n", re.M).group(0), b"abc") self.assertEqual(re.search(br"^\Aabc\Z$", b"abc", re.M).group(0), b"abc") - self.assertEqual(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M), None) + self.assertIsNone(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M)) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(br"\d\D\w\W\s\S", @@ -524,10 +524,10 @@ self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. - self.assertEqual(re.search(r"\B", ""), None) + self.assertIsNone(re.search(r"\B", "")) # This one is questionable and different from the perlre behaviour, # but describes current behavior. - self.assertEqual(re.search(r"\b", ""), None) + self.assertIsNone(re.search(r"\b", "")) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) @@ -547,8 +547,8 @@ def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) - self.assertIsNotNone(r.match('1000')) - self.assertIsNotNone(r.match('9999')) + self.assertTrue(r.match('1000')) + self.assertTrue(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), @@ -674,29 +674,29 @@ def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: - self.assertNotEqual(re.compile('^pattern$', flag), None) + self.assertTrue(re.compile('^pattern$', flag)) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: - self.assertIsNotNone(re.match(r"\%03o" % i, chr(i))) - self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8")) - self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\%03o" % i, chr(i))) + self.assertTrue(re.match(r"\%03o0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\%03o8" % i, chr(i)+"8")) + self.assertTrue(re.match(r"\x%02x" % i, chr(i))) + self.assertTrue(re.match(r"\x%02x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\x%02xz" % i, chr(i)+"z")) if i < 0x10000: - self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"\0", "\000")) - self.assertIsNotNone(re.match(r"\08", "\0008")) - self.assertIsNotNone(re.match(r"\01", "\001")) - self.assertIsNotNone(re.match(r"\018", "\0018")) - self.assertIsNotNone(re.match(r"\567", chr(0o167))) + self.assertTrue(re.match(r"\u%04x" % i, chr(i))) + self.assertTrue(re.match(r"\u%04x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\u%04xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\U%08x" % i, chr(i))) + self.assertTrue(re.match(r"\U%08x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\U%08xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\0", "\000")) + self.assertTrue(re.match(r"\08", "\0008")) + self.assertTrue(re.match(r"\01", "\001")) + self.assertTrue(re.match(r"\018", "\0018")) + self.assertTrue(re.match(r"\567", chr(0o167))) self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") @@ -709,22 +709,22 @@ def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: - self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i))) + self.assertTrue(re.match(r"[\%o]" % i, chr(i))) + self.assertTrue(re.match(r"[\%o8]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o0]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o8]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x0]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02xz]" % i, chr(i))) if i < 0x10000: - self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) + self.assertTrue(re.match(r"[\u%04x]" % i, chr(i))) + self.assertTrue(re.match(r"[\u%04x0]" % i, chr(i))) + self.assertTrue(re.match(r"[\u%04xz]" % i, chr(i))) + self.assertTrue(re.match(r"[\U%08x]" % i, chr(i))) + self.assertTrue(re.match(r"[\U%08x0]" % i, chr(i)+"0")) + self.assertTrue(re.match(r"[\U%08xz]" % i, chr(i)+"z")) + self.assertTrue(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") @@ -733,35 +733,35 @@ def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) - self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) - self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) - self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) - self.assertIsNotNone(re.match(br"\u", b'u')) - self.assertIsNotNone(re.match(br"\U", b'U')) - self.assertIsNotNone(re.match(br"\0", b"\000")) - self.assertIsNotNone(re.match(br"\08", b"\0008")) - self.assertIsNotNone(re.match(br"\01", b"\001")) - self.assertIsNotNone(re.match(br"\018", b"\0018")) - self.assertIsNotNone(re.match(br"\567", bytes([0o167]))) + self.assertTrue(re.match((r"\%03o" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) + self.assertTrue(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) + self.assertTrue(re.match((r"\x%02x" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) + self.assertTrue(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) + self.assertTrue(re.match(br"\u", b'u')) + self.assertTrue(re.match(br"\U", b'U')) + self.assertTrue(re.match(br"\0", b"\000")) + self.assertTrue(re.match(br"\08", b"\0008")) + self.assertTrue(re.match(br"\01", b"\001")) + self.assertTrue(re.match(br"\018", b"\0018")) + self.assertTrue(re.match(br"\567", bytes([0o167]))) self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") def test_sre_byte_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match(br"[\u]", b'u')) - self.assertIsNotNone(re.match(br"[\U]", b'U')) + self.assertTrue(re.match((r"[\%o]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%o8]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) + self.assertTrue(re.match(br"[\u]", b'u')) + self.assertTrue(re.match(br"[\U]", b'U')) self.assertRaises(re.error, re.match, br"[\911]", "") self.assertRaises(re.error, re.match, br"[\x1z]", "") @@ -772,7 +772,7 @@ def test_bug_527371(self): # bug described in patches 527371/672491 - self.assertEqual(re.match(r'(a)?a','a').lastindex, None) + self.assertIsNone(re.match(r'(a)?a','a').lastindex) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?Pa)(?Pb)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?Pa(b))", "ab").lastgroup, 'a') @@ -829,7 +829,7 @@ (r"\s+", None), ]) - self.assertNotEqual(scanner.scanner.scanner("").pattern, None) + self.assertTrue(scanner.scanner.scanner("").pattern) self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, @@ -874,7 +874,7 @@ # bug 764548, re.compile() barfs on str/unicode subclasses class my_unicode(str): pass pat = re.compile(my_unicode("abc")) - self.assertEqual(pat.match("xyz"), None) + self.assertIsNone(pat.match("xyz")) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") @@ -902,8 +902,8 @@ ["::", "::"]) def test_bug_926075(self): - self.assertTrue(re.compile('bug_926075') is not - re.compile(b'bug_926075')) + self.assertIsNot(re.compile('bug_926075'), + re.compile(b'bug_926075')) def test_bug_931848(self): pattern = "[\u002E\u3002\uFF0E\uFF61]" @@ -917,7 +917,7 @@ scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) - self.assertEqual(scanner.search(), None) + self.assertIsNone(scanner.search()) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") @@ -951,7 +951,7 @@ import array for typecode in 'bBuhHiIlLfd': a = array.array(typecode) - self.assertEqual(re.compile(b"bla").match(a), None) + self.assertIsNone(re.compile(b"bla").match(a)) self.assertEqual(re.compile(b"").match(a).groups(), ()) def test_inline_flags(self): @@ -961,27 +961,27 @@ p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" @@ -1012,23 +1012,23 @@ # String patterns for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) - self.assertNotEqual(pat.match('\xe0'), None) + self.assertTrue(pat.match('\xe0')) pat = re.compile('\w', flags) - self.assertNotEqual(pat.match('\xe0'), None) + self.assertTrue(pat.match('\xe0')) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('(?a)\xc0', re.IGNORECASE) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('\w', re.ASCII) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('(?a)\w') - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', flags | re.IGNORECASE) - self.assertEqual(pat.match(b'\xe0'), None) + self.assertIsNone(pat.match(b'\xe0')) pat = re.compile(b'\w', flags) - self.assertEqual(pat.match(b'\xe0'), None) + self.assertIsNone(pat.match(b'\xe0')) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) self.assertRaises(ValueError, re.compile, b'(?u)\w') @@ -1068,11 +1068,11 @@ self.assertRaises(TypeError, _sre.compile, {}, 0, []) def test_search_dot_unicode(self): - self.assertIsNotNone(re.search("123.*-", '123abc-')) - self.assertIsNotNone(re.search("123.*-", '123\xe9-')) - self.assertIsNotNone(re.search("123.*-", '123\u20ac-')) - self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-')) - self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) + self.assertTrue(re.search("123.*-", '123abc-')) + self.assertTrue(re.search("123.*-", '123\xe9-')) + self.assertTrue(re.search("123.*-", '123\u20ac-')) + self.assertTrue(re.search("123.*-", '123\U0010ffff-')) + self.assertTrue(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) def test_compile(self): # Test return value when given string and pattern as parameter -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 15:44:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 13:44:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Use_more_appro?= =?utf-8?q?priate_asserts_in_re_tests=2E?= Message-ID: <20140914134402.50646.67752@mail.hg.python.org> http://hg.python.org/cpython/rev/fe8ba0ade9a3 changeset: 92419:fe8ba0ade9a3 branch: 2.7 parent: 92412:301c672ceca6 user: Serhiy Storchaka date: Sun Sep 14 16:19:37 2014 +0300 summary: Use more appropriate asserts in re tests. files: Lib/test/test_re.py | 118 ++++++++++++++++---------------- 1 files changed, 59 insertions(+), 59 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -31,12 +31,12 @@ self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) - self.assertEqual(re.search('x', 'aaa'), None) + self.assertIsNone(re.search('x', 'aaa')) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) - self.assertEqual(re.match('a+', 'xxx'), None) + self.assertIsNone(re.match('a+', 'xxx')) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) @@ -284,8 +284,8 @@ ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', 'a)')) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', '(a')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), @@ -301,8 +301,8 @@ ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) - self.assertEqual(p.match('abd'), None) - self.assertEqual(p.match('ac'), None) + self.assertIsNone(p.match('abd')) + self.assertIsNone(p.match('ac')) def test_re_groupref(self): @@ -310,8 +310,8 @@ ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', 'a|')) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', '|a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), @@ -329,10 +329,10 @@ "second first second first") def test_repeat_minmax(self): - self.assertEqual(re.match("^(\w){1}$", "abc"), None) - self.assertEqual(re.match("^(\w){1}?$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) + self.assertIsNone(re.match("^(\w){1}$", "abc")) + self.assertIsNone(re.match("^(\w){1}?$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}?$", "abc")) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") @@ -343,29 +343,29 @@ self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") - self.assertEqual(re.match("^x{1}$", "xxx"), None) - self.assertEqual(re.match("^x{1}?$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) + self.assertIsNone(re.match("^x{1}$", "xxx")) + self.assertIsNone(re.match("^x{1}?$", "xxx")) + self.assertIsNone(re.match("^x{1,2}$", "xxx")) + self.assertIsNone(re.match("^x{1,2}?$", "xxx")) - self.assertNotEqual(re.match("^x{3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) + self.assertTrue(re.match("^x{3}$", "xxx")) + self.assertTrue(re.match("^x{1,3}$", "xxx")) + self.assertTrue(re.match("^x{1,4}$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) + self.assertTrue(re.match("^x{3}?$", "xxx")) + self.assertTrue(re.match("^x{1,3}?$", "xxx")) + self.assertTrue(re.match("^x{1,4}?$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) - self.assertEqual(re.match("^x{}$", "xxx"), None) - self.assertNotEqual(re.match("^x{}$", "x{}"), None) + self.assertIsNone(re.match("^x{}$", "xxx")) + self.assertTrue(re.match("^x{}$", "x{}")) def test_getattr(self): self.assertEqual(re.match("(a)", "a").pos, 0) self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) - self.assertNotEqual(re.match("(a)", "a").re, None) + self.assertTrue(re.match("(a)", "a").re) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", @@ -382,14 +382,14 @@ "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) + self.assertIsNone(re.search(r"^\Aabc\Z$", "\nabc\n", re.M)) self.assertEqual(re.search(r"\b(b.)\b", u"abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", u"abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None) + self.assertIsNone(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M)) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", @@ -409,10 +409,10 @@ self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. - self.assertEqual(re.search(r"\B", ""), None) + self.assertIsNone(re.search(r"\B", "")) # This one is questionable and different from the perlre behaviour, # but describes current behavior. - self.assertEqual(re.search(r"\b", ""), None) + self.assertIsNone(re.search(r"\b", "")) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) @@ -434,8 +434,8 @@ def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) - self.assertIsNotNone(r.match('1000')) - self.assertIsNotNone(r.match('9999')) + self.assertTrue(r.match('1000')) + self.assertTrue(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), @@ -569,26 +569,26 @@ def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: - self.assertNotEqual(re.compile('^pattern$', flag), None) + self.assertTrue(re.compile('^pattern$', flag)) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None) - self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None) - self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None) - self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None) - self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None) - self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None) + self.assertTrue(re.match(r"\%03o" % i, chr(i))) + self.assertTrue(re.match(r"\%03o0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\%03o8" % i, chr(i)+"8")) + self.assertTrue(re.match(r"\x%02x" % i, chr(i))) + self.assertTrue(re.match(r"\x%02x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\x%02xz" % i, chr(i)+"z")) self.assertRaises(re.error, re.match, "\911", "") def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None) - self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None) - self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None) - self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None) - self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None) - self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None) + self.assertTrue(re.match(r"[\%03o]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o0]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o8]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x0]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02xz]" % i, chr(i))) self.assertRaises(re.error, re.match, "[\911]", "") def test_bug_113254(self): @@ -598,7 +598,7 @@ def test_bug_527371(self): # bug described in patches 527371/672491 - self.assertEqual(re.match(r'(a)?a','a').lastindex, None) + self.assertIsNone(re.match(r'(a)?a','a').lastindex) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?Pa)(?Pb)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?Pa(b))", "ab").lastgroup, 'a') @@ -655,7 +655,7 @@ (r"\s+", None), ]) - self.assertNotEqual(scanner.scanner.scanner("").pattern, None) + self.assertTrue(scanner.scanner.scanner("").pattern) self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, @@ -704,7 +704,7 @@ self.skipTest('no problem if we have no unicode') class my_unicode(unicode): pass pat = re.compile(my_unicode("abc")) - self.assertEqual(pat.match("xyz"), None) + self.assertIsNone(pat.match("xyz")) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") @@ -716,8 +716,8 @@ unicode except NameError: self.skipTest('no problem if we have no unicode') - self.assertTrue(re.compile('bug_926075') is not - re.compile(eval("u'bug_926075'"))) + self.assertIsNot(re.compile('bug_926075'), + re.compile(eval("u'bug_926075'"))) def test_bug_931848(self): try: @@ -735,7 +735,7 @@ scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) - self.assertEqual(scanner.search(), None) + self.assertIsNone(scanner.search()) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") @@ -769,7 +769,7 @@ import array for typecode in 'cbBuhHiIlLfd': a = array.array(typecode) - self.assertEqual(re.compile("bla").match(a), None) + self.assertIsNone(re.compile("bla").match(a)) self.assertEqual(re.compile("").match(a).groups(), ()) def test_inline_flags(self): @@ -779,27 +779,27 @@ p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 15:44:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 13:44:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fixed_re_tests_incorrectly_ported_from_2=2Ex_to_3=2Ex=2E?= Message-ID: <20140914134401.77017.5616@mail.hg.python.org> http://hg.python.org/cpython/rev/1d73a0d1ce02 changeset: 92418:1d73a0d1ce02 parent: 92416:775453a7b85d parent: 92417:16636d6ffc25 user: Serhiy Storchaka date: Sun Sep 14 15:57:01 2014 +0300 summary: Fixed re tests incorrectly ported from 2.x to 3.x. files: Lib/test/test_re.py | 49 ++++++++++++++++++-------------- 1 files changed, 27 insertions(+), 22 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -480,29 +480,37 @@ self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", + "abcd abc bcd bx", re.ASCII).group(1), "bx") + self.assertEqual(re.search(r"\B(b.)\B", + "abc bcd bc abxd", re.ASCII).group(1), "bx") + self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") - self.assertEqual(re.search(r"\b(b.)\b", - "abcd abc bcd bx", re.UNICODE).group(1), "bx") - self.assertEqual(re.search(r"\B(b.)\B", - "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) - self.assertEqual(re.search(r"\b(b.)\b", - "abcd abc bcd bx").group(1), "bx") - self.assertEqual(re.search(r"\B(b.)\B", - "abc bcd bc abxd").group(1), "bx") - self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) + self.assertEqual(re.search(br"\b(b.)\b", + b"abcd abc bcd bx").group(1), b"bx") + self.assertEqual(re.search(br"\B(b.)\B", + b"abc bcd bc abxd").group(1), b"bx") + self.assertEqual(re.search(br"\b(b.)\b", + b"abcd abc bcd bx", re.LOCALE).group(1), b"bx") + self.assertEqual(re.search(br"\B(b.)\B", + b"abc bcd bc abxd", re.LOCALE).group(1), b"bx") + self.assertEqual(re.search(br"^abc$", b"\nabc\n", re.M).group(0), b"abc") + self.assertEqual(re.search(br"^\Aabc\Z$", b"abc", re.M).group(0), b"abc") + self.assertEqual(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M), None) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") + self.assertEqual(re.search(br"\d\D\w\W\s\S", + b"1aa! a").group(0), b"1aa! a") + self.assertEqual(re.search(r"\d\D\w\W\s\S", + "1aa! a", re.ASCII).group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") - self.assertEqual(re.search(r"\d\D\w\W\s\S", - "1aa! a", re.UNICODE).group(0), "1aa! a") + self.assertEqual(re.search(br"\d\D\w\W\s\S", + b"1aa! a", re.LOCALE).group(0), b"1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 @@ -533,11 +541,8 @@ def test_bigcharset(self): self.assertEqual(re.match("([\u2222\u2223])", "\u2222").group(1), "\u2222") - self.assertEqual(re.match("([\u2222\u2223])", - "\u2222", re.UNICODE).group(1), "\u2222") r = '[%s]' % ''.join(map(chr, range(256, 2**16, 255))) - self.assertEqual(re.match(r, - "\uff01", re.UNICODE).group(), "\uff01") + self.assertEqual(re.match(r, "\uff01").group(), "\uff01") def test_big_codesize(self): # Issue #1160 @@ -567,7 +572,7 @@ def test_ignore_case(self): self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") - self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") + self.assertEqual(re.match(b"abc", b"ABC", re.I).group(0), b"ABC") self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b") @@ -587,7 +592,7 @@ self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") - self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") + self.assertEqual(re.match(b"abc", b"ABC", re.I).group(0), b"ABC") def test_not_literal(self): self.assertEqual(re.search("\s([^a])", " b").group(1), "b") @@ -901,7 +906,7 @@ re.compile(b'bug_926075')) def test_bug_931848(self): - pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"') + pattern = "[\u002E\u3002\uFF0E\uFF61]" self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) @@ -1020,9 +1025,9 @@ self.assertEqual(pat.match('\xe0'), None) # Bytes patterns for flags in (0, re.ASCII): - pat = re.compile(b'\xc0', re.IGNORECASE) + pat = re.compile(b'\xc0', flags | re.IGNORECASE) self.assertEqual(pat.match(b'\xe0'), None) - pat = re.compile(b'\w') + pat = re.compile(b'\w', flags) self.assertEqual(pat.match(b'\xe0'), None) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 15:44:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 13:44:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Use_more_appro?= =?utf-8?q?priate_asserts_in_re_tests=2E?= Message-ID: <20140914134402.111152.13297@mail.hg.python.org> http://hg.python.org/cpython/rev/13acbb8939a8 changeset: 92420:13acbb8939a8 branch: 3.4 parent: 92417:16636d6ffc25 user: Serhiy Storchaka date: Sun Sep 14 16:20:20 2014 +0300 summary: Use more appropriate asserts in re tests. files: Lib/test/test_re.py | 230 ++++++++++++++++---------------- 1 files changed, 115 insertions(+), 115 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -59,12 +59,12 @@ self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) - self.assertEqual(re.search('x', 'aaa'), None) + self.assertIsNone(re.search('x', 'aaa')) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) - self.assertEqual(re.match('a+', 'xxx'), None) + self.assertIsNone(re.match('a+', 'xxx')) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) @@ -384,8 +384,8 @@ ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', 'a)')) + self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', '(a')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), @@ -401,8 +401,8 @@ ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) - self.assertEqual(p.match('abd'), None) - self.assertEqual(p.match('ac'), None) + self.assertIsNone(p.match('abd')) + self.assertIsNone(p.match('ac')) def test_re_groupref(self): @@ -410,8 +410,8 @@ ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) - self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', 'a|')) + self.assertIsNone(re.match(r'^(\|)?([^()]+)\1$', '|a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), @@ -429,10 +429,10 @@ "second first second first") def test_repeat_minmax(self): - self.assertEqual(re.match("^(\w){1}$", "abc"), None) - self.assertEqual(re.match("^(\w){1}?$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) - self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) + self.assertIsNone(re.match("^(\w){1}$", "abc")) + self.assertIsNone(re.match("^(\w){1}?$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}$", "abc")) + self.assertIsNone(re.match("^(\w){1,2}?$", "abc")) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") @@ -443,22 +443,22 @@ self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") - self.assertEqual(re.match("^x{1}$", "xxx"), None) - self.assertEqual(re.match("^x{1}?$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}$", "xxx"), None) - self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) + self.assertIsNone(re.match("^x{1}$", "xxx")) + self.assertIsNone(re.match("^x{1}?$", "xxx")) + self.assertIsNone(re.match("^x{1,2}$", "xxx")) + self.assertIsNone(re.match("^x{1,2}?$", "xxx")) - self.assertNotEqual(re.match("^x{3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) - self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) + self.assertTrue(re.match("^x{3}$", "xxx")) + self.assertTrue(re.match("^x{1,3}$", "xxx")) + self.assertTrue(re.match("^x{1,4}$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) + self.assertTrue(re.match("^x{3}?$", "xxx")) + self.assertTrue(re.match("^x{1,3}?$", "xxx")) + self.assertTrue(re.match("^x{1,4}?$", "xxx")) + self.assertTrue(re.match("^x{3,4}?$", "xxx")) - self.assertEqual(re.match("^x{}$", "xxx"), None) - self.assertNotEqual(re.match("^x{}$", "x{}"), None) + self.assertIsNone(re.match("^x{}$", "xxx")) + self.assertTrue(re.match("^x{}$", "x{}")) def test_getattr(self): self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)") @@ -472,7 +472,7 @@ self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) - self.assertNotEqual(re.match("(a)", "a").re, None) + self.assertTrue(re.match("(a)", "a").re) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", @@ -489,7 +489,7 @@ "abc bcd bc abxd", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") - self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) + self.assertIsNone(re.search(r"^\Aabc\Z$", "\nabc\n", re.M)) self.assertEqual(re.search(br"\b(b.)\b", b"abcd abc bcd bx").group(1), b"bx") self.assertEqual(re.search(br"\B(b.)\B", @@ -500,7 +500,7 @@ b"abc bcd bc abxd", re.LOCALE).group(1), b"bx") self.assertEqual(re.search(br"^abc$", b"\nabc\n", re.M).group(0), b"abc") self.assertEqual(re.search(br"^\Aabc\Z$", b"abc", re.M).group(0), b"abc") - self.assertEqual(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M), None) + self.assertIsNone(re.search(br"^\Aabc\Z$", b"\nabc\n", re.M)) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(br"\d\D\w\W\s\S", @@ -524,10 +524,10 @@ self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. - self.assertEqual(re.search(r"\B", ""), None) + self.assertIsNone(re.search(r"\B", "")) # This one is questionable and different from the perlre behaviour, # but describes current behavior. - self.assertEqual(re.search(r"\b", ""), None) + self.assertIsNone(re.search(r"\b", "")) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) @@ -547,8 +547,8 @@ def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) - self.assertIsNotNone(r.match('1000')) - self.assertIsNotNone(r.match('9999')) + self.assertTrue(r.match('1000')) + self.assertTrue(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), @@ -674,29 +674,29 @@ def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: - self.assertNotEqual(re.compile('^pattern$', flag), None) + self.assertTrue(re.compile('^pattern$', flag)) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: - self.assertIsNotNone(re.match(r"\%03o" % i, chr(i))) - self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8")) - self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\%03o" % i, chr(i))) + self.assertTrue(re.match(r"\%03o0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\%03o8" % i, chr(i)+"8")) + self.assertTrue(re.match(r"\x%02x" % i, chr(i))) + self.assertTrue(re.match(r"\x%02x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\x%02xz" % i, chr(i)+"z")) if i < 0x10000: - self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i))) - self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"\0", "\000")) - self.assertIsNotNone(re.match(r"\08", "\0008")) - self.assertIsNotNone(re.match(r"\01", "\001")) - self.assertIsNotNone(re.match(r"\018", "\0018")) - self.assertIsNotNone(re.match(r"\567", chr(0o167))) + self.assertTrue(re.match(r"\u%04x" % i, chr(i))) + self.assertTrue(re.match(r"\u%04x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\u%04xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\U%08x" % i, chr(i))) + self.assertTrue(re.match(r"\U%08x0" % i, chr(i)+"0")) + self.assertTrue(re.match(r"\U%08xz" % i, chr(i)+"z")) + self.assertTrue(re.match(r"\0", "\000")) + self.assertTrue(re.match(r"\08", "\0008")) + self.assertTrue(re.match(r"\01", "\001")) + self.assertTrue(re.match(r"\018", "\0018")) + self.assertTrue(re.match(r"\567", chr(0o167))) self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") @@ -709,22 +709,22 @@ def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: - self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i))) + self.assertTrue(re.match(r"[\%o]" % i, chr(i))) + self.assertTrue(re.match(r"[\%o8]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o0]" % i, chr(i))) + self.assertTrue(re.match(r"[\%03o8]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02x0]" % i, chr(i))) + self.assertTrue(re.match(r"[\x%02xz]" % i, chr(i))) if i < 0x10000: - self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i))) - self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0")) - self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z")) - self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) + self.assertTrue(re.match(r"[\u%04x]" % i, chr(i))) + self.assertTrue(re.match(r"[\u%04x0]" % i, chr(i))) + self.assertTrue(re.match(r"[\u%04xz]" % i, chr(i))) + self.assertTrue(re.match(r"[\U%08x]" % i, chr(i))) + self.assertTrue(re.match(r"[\U%08x0]" % i, chr(i)+"0")) + self.assertTrue(re.match(r"[\U%08xz]" % i, chr(i)+"z")) + self.assertTrue(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") @@ -733,35 +733,35 @@ def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) - self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) - self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) - self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) - self.assertIsNotNone(re.match(br"\u", b'u')) - self.assertIsNotNone(re.match(br"\U", b'U')) - self.assertIsNotNone(re.match(br"\0", b"\000")) - self.assertIsNotNone(re.match(br"\08", b"\0008")) - self.assertIsNotNone(re.match(br"\01", b"\001")) - self.assertIsNotNone(re.match(br"\018", b"\0018")) - self.assertIsNotNone(re.match(br"\567", bytes([0o167]))) + self.assertTrue(re.match((r"\%03o" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) + self.assertTrue(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) + self.assertTrue(re.match((r"\x%02x" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) + self.assertTrue(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) + self.assertTrue(re.match(br"\u", b'u')) + self.assertTrue(re.match(br"\U", b'U')) + self.assertTrue(re.match(br"\0", b"\000")) + self.assertTrue(re.match(br"\08", b"\0008")) + self.assertTrue(re.match(br"\01", b"\001")) + self.assertTrue(re.match(br"\018", b"\0018")) + self.assertTrue(re.match(br"\567", bytes([0o167]))) self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") def test_sre_byte_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: - self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) - self.assertIsNotNone(re.match(br"[\u]", b'u')) - self.assertIsNotNone(re.match(br"[\U]", b'U')) + self.assertTrue(re.match((r"[\%o]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%o8]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) + self.assertTrue(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) + self.assertTrue(re.match(br"[\u]", b'u')) + self.assertTrue(re.match(br"[\U]", b'U')) self.assertRaises(re.error, re.match, br"[\911]", "") self.assertRaises(re.error, re.match, br"[\x1z]", "") @@ -772,7 +772,7 @@ def test_bug_527371(self): # bug described in patches 527371/672491 - self.assertEqual(re.match(r'(a)?a','a').lastindex, None) + self.assertIsNone(re.match(r'(a)?a','a').lastindex) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?Pa)(?Pb)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?Pa(b))", "ab").lastgroup, 'a') @@ -829,7 +829,7 @@ (r"\s+", None), ]) - self.assertNotEqual(scanner.scanner.scanner("").pattern, None) + self.assertTrue(scanner.scanner.scanner("").pattern) self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, @@ -874,7 +874,7 @@ # bug 764548, re.compile() barfs on str/unicode subclasses class my_unicode(str): pass pat = re.compile(my_unicode("abc")) - self.assertEqual(pat.match("xyz"), None) + self.assertIsNone(pat.match("xyz")) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") @@ -902,8 +902,8 @@ ["::", "::"]) def test_bug_926075(self): - self.assertTrue(re.compile('bug_926075') is not - re.compile(b'bug_926075')) + self.assertIsNot(re.compile('bug_926075'), + re.compile(b'bug_926075')) def test_bug_931848(self): pattern = "[\u002E\u3002\uFF0E\uFF61]" @@ -917,7 +917,7 @@ scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) - self.assertEqual(scanner.search(), None) + self.assertIsNone(scanner.search()) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") @@ -951,7 +951,7 @@ import array for typecode in 'bBuhHiIlLfd': a = array.array(typecode) - self.assertEqual(re.compile(b"bla").match(a), None) + self.assertIsNone(re.compile(b"bla").match(a)) self.assertEqual(re.compile(b"").match(a).groups(), ()) def test_inline_flags(self): @@ -961,27 +961,27 @@ p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) - self.assertNotEqual(q, None) + self.assertTrue(q) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) - self.assertNotEqual(q, None) + self.assertTrue(q) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" @@ -1012,23 +1012,23 @@ # String patterns for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) - self.assertNotEqual(pat.match('\xe0'), None) + self.assertTrue(pat.match('\xe0')) pat = re.compile('\w', flags) - self.assertNotEqual(pat.match('\xe0'), None) + self.assertTrue(pat.match('\xe0')) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('(?a)\xc0', re.IGNORECASE) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('\w', re.ASCII) - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) pat = re.compile('(?a)\w') - self.assertEqual(pat.match('\xe0'), None) + self.assertIsNone(pat.match('\xe0')) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', flags | re.IGNORECASE) - self.assertEqual(pat.match(b'\xe0'), None) + self.assertIsNone(pat.match(b'\xe0')) pat = re.compile(b'\w', flags) - self.assertEqual(pat.match(b'\xe0'), None) + self.assertIsNone(pat.match(b'\xe0')) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) self.assertRaises(ValueError, re.compile, b'(?u)\w') @@ -1068,11 +1068,11 @@ self.assertRaises(TypeError, _sre.compile, {}, 0, []) def test_search_dot_unicode(self): - self.assertIsNotNone(re.search("123.*-", '123abc-')) - self.assertIsNotNone(re.search("123.*-", '123\xe9-')) - self.assertIsNotNone(re.search("123.*-", '123\u20ac-')) - self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-')) - self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) + self.assertTrue(re.search("123.*-", '123abc-')) + self.assertTrue(re.search("123.*-", '123\xe9-')) + self.assertTrue(re.search("123.*-", '123\u20ac-')) + self.assertTrue(re.search("123.*-", '123\U0010ffff-')) + self.assertTrue(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) def test_compile(self): # Test return value when given string and pattern as parameter -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 16:43:26 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 14:43:26 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Now_re_tests_w?= =?utf-8?q?ork_again_on_Unicode-disabled_build=2E?= Message-ID: <20140914144325.116493.65381@mail.hg.python.org> http://hg.python.org/cpython/rev/8f678db15869 changeset: 92422:8f678db15869 branch: 2.7 parent: 92419:fe8ba0ade9a3 user: Serhiy Storchaka date: Sun Sep 14 17:40:44 2014 +0300 summary: Now re tests work again on Unicode-disabled build. Simplified existing detections of Unicode-disabled build. files: Lib/test/re_tests.py | 2 +- Lib/test/test_re.py | 99 ++++++++++++++++--------------- 2 files changed, 53 insertions(+), 48 deletions(-) diff --git a/Lib/test/re_tests.py b/Lib/test/re_tests.py --- a/Lib/test/re_tests.py +++ b/Lib/test/re_tests.py @@ -663,7 +663,7 @@ try: u = eval("u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'") -except SyntaxError: +except (SyntaxError, ValueError): pass else: tests.extend([ diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -1,6 +1,6 @@ from test.test_support import verbose, run_unittest, import_module from test.test_support import precisionbigmemtest, _2G, cpython_only -from test.test_support import captured_stdout +from test.test_support import captured_stdout, have_unicode, requires_unicode, u import re from re import Scanner import sre_constants @@ -86,6 +86,7 @@ self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') + @requires_unicode def test_bug_1140(self): # re.sub(x, y, u'') should return u'', not '', and # re.sub(x, y, '') should return '', not u''. @@ -376,10 +377,11 @@ "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") - self.assertEqual(re.search(r"\b(b.)\b", - "abcd abc bcd bx", re.UNICODE).group(1), "bx") - self.assertEqual(re.search(r"\B(b.)\B", - "abc bcd bc abxd", re.UNICODE).group(1), "bx") + if have_unicode: + self.assertEqual(re.search(r"\b(b.)\b", + "abcd abc bcd bx", re.UNICODE).group(1), "bx") + self.assertEqual(re.search(r"\B(b.)\B", + "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertIsNone(re.search(r"^\Aabc\Z$", "\nabc\n", re.M)) @@ -394,8 +396,9 @@ "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") - self.assertEqual(re.search(r"\d\D\w\W\s\S", - "1aa! a", re.UNICODE).group(0), "1aa! a") + if have_unicode: + self.assertEqual(re.search(r"\d\D\w\W\s\S", + "1aa! a", re.UNICODE).group(0), "1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 @@ -423,13 +426,14 @@ # Can match around the whitespace. self.assertEqual(len(re.findall(r"\B", " ")), 2) + @requires_unicode def test_bigcharset(self): - self.assertEqual(re.match(u"([\u2222\u2223])", - u"\u2222").group(1), u"\u2222") - self.assertEqual(re.match(u"([\u2222\u2223])", - u"\u2222", re.UNICODE).group(1), u"\u2222") + self.assertEqual(re.match(u(r"([\u2222\u2223])"), + unichr(0x2222)).group(1), unichr(0x2222)) + self.assertEqual(re.match(u(r"([\u2222\u2223])"), + unichr(0x2222), re.UNICODE).group(1), unichr(0x2222)) r = u'[%s]' % u''.join(map(unichr, range(256, 2**16, 255))) - self.assertEqual(re.match(r, u"\uff01", re.UNICODE).group(), u"\uff01") + self.assertEqual(re.match(r, unichr(0xff01), re.UNICODE).group(), unichr(0xff01)) def test_big_codesize(self): # Issue #1160 @@ -476,7 +480,8 @@ import _sre self.assertEqual(_sre.getlower(ord('A'), 0), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a')) - self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) + if have_unicode: + self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC") @@ -503,8 +508,9 @@ self.assertEqual(m.group(), match) self.assertEqual(m.span(), span) + @requires_unicode def test_re_escape(self): - alnum_chars = string.ascii_letters + string.digits + alnum_chars = unicode(string.ascii_letters + string.digits) p = u''.join(unichr(i) for i in range(256)) for c in p: if c in alnum_chars: @@ -517,7 +523,7 @@ self.assertMatch(re.escape(p), p) def test_re_escape_byte(self): - alnum_chars = (string.ascii_letters + string.digits).encode('ascii') + alnum_chars = string.ascii_letters + string.digits p = ''.join(chr(i) for i in range(256)) for b in p: if b in alnum_chars: @@ -529,20 +535,21 @@ self.assertMatch(re.escape(b), b) self.assertMatch(re.escape(p), p) + @requires_unicode def test_re_escape_non_ascii(self): - s = u'xxx\u2620\u2620\u2620xxx' + s = u(r'xxx\u2620\u2620\u2620xxx') s_escaped = re.escape(s) - self.assertEqual(s_escaped, u'xxx\\\u2620\\\u2620\\\u2620xxx') + self.assertEqual(s_escaped, u(r'xxx\\\u2620\\\u2620\\\u2620xxx')) self.assertMatch(s_escaped, s) - self.assertMatch(u'.%s+.' % re.escape(u'\u2620'), s, - u'x\u2620\u2620\u2620x', (2, 7), re.search) + self.assertMatch(u'.%s+.' % re.escape(unichr(0x2620)), s, + u(r'x\u2620\u2620\u2620x'), (2, 7), re.search) def test_re_escape_non_ascii_bytes(self): - b = u'y\u2620y\u2620y'.encode('utf-8') + b = b'y\xe2\x98\xa0y\xe2\x98\xa0y' b_escaped = re.escape(b) self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y') self.assertMatch(b_escaped, b) - res = re.findall(re.escape(u'\u2620'.encode('utf-8')), b) + res = re.findall(re.escape(b'\xe2\x98\xa0'), b) self.assertEqual(len(res), 2) def test_pickling(self): @@ -621,8 +628,9 @@ # non-recursive scheme was implemented. self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001) + @requires_unicode def test_bug_612074(self): - pat=u"["+re.escape(u"\u2039")+u"]" + pat=u"["+re.escape(unichr(0x2039))+u"]" self.assertEqual(re.compile(pat) and 1, 1) def test_stack_overflow(self): @@ -696,12 +704,9 @@ self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(), ('a', None, None)) + @requires_unicode def test_bug_764548(self): # bug 764548, re.compile() barfs on str/unicode subclasses - try: - unicode - except NameError: - self.skipTest('no problem if we have no unicode') class my_unicode(unicode): pass pat = re.compile(my_unicode("abc")) self.assertIsNone(pat.match("xyz")) @@ -711,20 +716,14 @@ self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) + @requires_unicode def test_bug_926075(self): - try: - unicode - except NameError: - self.skipTest('no problem if we have no unicode') self.assertIsNot(re.compile('bug_926075'), - re.compile(eval("u'bug_926075'"))) + re.compile(u'bug_926075')) + @requires_unicode def test_bug_931848(self): - try: - unicode - except NameError: - self.skipTest('no problem if we have no unicode') - pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"') + pattern = u(r"[\u002E\u3002\uFF0E\uFF61]") self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) @@ -743,23 +742,24 @@ self.assertEqual(iter.next().span(), (4, 4)) self.assertRaises(StopIteration, iter.next) + @requires_unicode def test_bug_6561(self): # '\d' should match characters in Unicode category 'Nd' # (Number, Decimal Digit), but not those in 'Nl' (Number, # Letter) or 'No' (Number, Other). decimal_digits = [ - u'\u0037', # '\N{DIGIT SEVEN}', category 'Nd' - u'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd' - u'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' + unichr(0x0037), # '\N{DIGIT SEVEN}', category 'Nd' + unichr(0x0e58), # '\N{THAI DIGIT SIX}', category 'Nd' + unichr(0xff10), # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' ] for x in decimal_digits: self.assertEqual(re.match('^\d$', x, re.UNICODE).group(0), x) not_decimal_digits = [ - u'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl' - u'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl' - u'\u2082', # '\N{SUBSCRIPT TWO}', category 'No' - u'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' + unichr(0x2165), # '\N{ROMAN NUMERAL SIX}', category 'Nl' + unichr(0x3039), # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl' + unichr(0x2082), # '\N{SUBSCRIPT TWO}', category 'No' + unichr(0x32b4), # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' ] for x in not_decimal_digits: self.assertIsNone(re.match('^\d$', x, re.UNICODE)) @@ -767,11 +767,15 @@ def test_empty_array(self): # SF buf 1647541 import array - for typecode in 'cbBuhHiIlLfd': + typecodes = 'cbBhHiIlLfd' + if have_unicode: + typecodes += 'u' + for typecode in typecodes: a = array.array(typecode) self.assertIsNone(re.compile("bla").match(a)) self.assertEqual(re.compile("").match(a).groups(), ()) + @requires_unicode def test_inline_flags(self): # Bug #1700 upper_char = unichr(0x1ea0) # Latin Capital Letter A with Dot Bellow @@ -906,9 +910,10 @@ pattern = '.' + reps + mod + 'yz' self.assertEqual(re.compile(pattern, re.S).findall('xyz'), ['xyz'], msg=pattern) - pattern = pattern.encode() - self.assertEqual(re.compile(pattern, re.S).findall(b'xyz'), - [b'xyz'], msg=pattern) + if have_unicode: + pattern = unicode(pattern) + self.assertEqual(re.compile(pattern, re.S).findall(u'xyz'), + [u'xyz'], msg=pattern) def test_bug_2537(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 18:57:40 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 14 Sep 2014 16:57:40 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_make_links_to_hg=2Epython?= =?utf-8?q?=2Eorg_https?= Message-ID: <20140914165737.117583.92356@mail.hg.python.org> http://hg.python.org/devguide/rev/dd01b6c1978f changeset: 713:dd01b6c1978f user: Benjamin Peterson date: Sun Sep 14 12:57:24 2014 -0400 summary: make links to hg.python.org https files: buildbots.rst | 4 +- committing.rst | 8 ++-- communication.rst | 2 +- coredev.rst | 6 +- coverity.rst | 2 +- docquality.rst | 4 +- faq.rst | 6 +- index.rst | 8 ++-- runtests.rst | 4 +- setup.rst | 2 +- triaging.rst | 58 +++++++++++++++++----------------- 11 files changed, 52 insertions(+), 52 deletions(-) diff --git a/buildbots.rst b/buildbots.rst --- a/buildbots.rst +++ b/buildbots.rst @@ -189,8 +189,8 @@ When creating ("forcing") a build on a custom builder, you have to provide at least two parameters: -* The repository path, relative to http://hg.python.org. For example, - ``sandbox/myfixes`` if ``http://hg.python.org/sandbox/myfixes`` is the +* The repository path, relative to https://hg.python.org. For example, + ``sandbox/myfixes`` if ``https://hg.python.org/sandbox/myfixes`` is the full path to the repository. * The Mercurial id of the changeset you want to build. To make things less diff --git a/committing.rst b/committing.rst --- a/committing.rst +++ b/committing.rst @@ -487,10 +487,10 @@ If you want to work on a feature long-term (perhaps you're implementing a PEP), you will probably want to publish your work in a dedicated repository. The following instructions will help you do so on `hg.python.org -`_'s infrastructure without requiring a lot of upload +`_'s infrastructure without requiring a lot of upload bandwidth. -Go to the main repository's Web page (http://hg.python.org/cpython/); there +Go to the main repository's Web page (https://hg.python.org/cpython/); there you find a button labelled "server-side clone", which you can click on to display a Web form. Enter the relative path of the repository you want to create on the server, for example ``features/mywork``; and press the button. @@ -539,13 +539,13 @@ When you push them, they will land in the public repository at ``ssh://hg at hg.python.org/features/mywork`` (or -``http://hg.python.org/features/mywork`` for the read-only URL). Other +``https://hg.python.org/features/mywork`` for the read-only URL). Other people can clone the public repository and work on the code too. When you want to synchronize with CPython's upstream changes, you can pull from the main repository, either from its remote URL:: - $ hg pull http://hg.python.org/cpython + $ hg pull https://hg.python.org/cpython or from a local clone that you may have on your disk (which is of course faster):: diff --git a/communication.rst b/communication.rst --- a/communication.rst +++ b/communication.rst @@ -31,7 +31,7 @@ it will get redirected here. Python-checkins_ sends out an email for every commit to Python's various -repositories from http://hg.python.org. All core developers +repositories from https://hg.python.org. All core developers subscribe to this list and are known to reply to these emails to make comments about various issues they catch in the commit. Replies get redirected to python-dev. diff --git a/coredev.rst b/coredev.rst --- a/coredev.rst +++ b/coredev.rst @@ -25,7 +25,7 @@ able to commit them without supervision. A complete list of core developer usernames can be found at -http://hg.python.org/committers.txt. :ref:`developers` lists when and why +https://hg.python.org/committers.txt. :ref:`developers` lists when and why someone received commit privileges. @@ -69,9 +69,9 @@ This should match your username on the issue tracker. You can verify your commit access by looking at -http://hg.python.org/committers.txt which lists all core developers by +https://hg.python.org/committers.txt which lists all core developers by username. If you want to practice, there is a `test repository -`_ where you can freely commit and push any +`_ where you can freely commit and push any changes you like:: hg clone ssh://hg at hg.python.org/test/ hgtest diff --git a/coverity.rst b/coverity.rst --- a/coverity.rst +++ b/coverity.rst @@ -150,4 +150,4 @@ .. _Coverity Connect: http://scan5.coverity.com:8080/ -.. _coverity_model.c: http://hg.python.org/cpython/file/tip/Misc/coverity_model.c +.. _coverity_model.c: https://hg.python.org/cpython/file/tip/Misc/coverity_model.c diff --git a/docquality.rst b/docquality.rst --- a/docquality.rst +++ b/docquality.rst @@ -76,12 +76,12 @@ are normally published within a day, on a schedule that may be different from the main documentation. -.. _separate repository: http://hg.python.org/devguide +.. _separate repository: https://hg.python.org/devguide .. _Python bug tracker: http://bugs.python.org To clone the Developer's Guide:: - $ hg clone http://hg.python.org/devguide + $ hg clone https://hg.python.org/devguide Core developers should use:: diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -113,7 +113,7 @@ On the `issue tracker`_, most core developers will have the Python logo appear next to their name. -.. _full list of developers: http://hg.python.org/committers.txt +.. _full list of developers: https://hg.python.org/committers.txt What standards of behaviour are expected in these communication channels? @@ -177,8 +177,8 @@ while the maintenance branch names (``2.7``, ``3.3``, etc) are mapped directly. -.. _git mirror: http://github.com/akheron/cpython -.. _CPython repository: http://hg.python.org/cpython +.. _git mirror: https://github.com/akheron/cpython +.. _CPython repository: https://hg.python.org/cpython Please only use this approach if you're already an experienced Git user and don't require assistance with the specifics of version control commands. All diff --git a/index.rst b/index.rst --- a/index.rst +++ b/index.rst @@ -16,7 +16,7 @@ 1. :ref:`Get the source code `:: - hg clone http://hg.python.org/cpython + hg clone https://hg.python.org/cpython 2. :ref:`Build Python `. On all platforms, install build dependencies (such as compilers), then on :ref:`UNIX `:: @@ -147,8 +147,8 @@ * `Firefox search engine plug-in`_ * `Buildbot status`_ * Source code - * `Browse online `_ - * `Snapshot of py3k `_ + * `Browse online `_ + * `Snapshot of py3k `_ * `Daily OS X installer `_ * PEPs_ (Python Enhancement Proposals) * :doc:`faq` @@ -214,7 +214,7 @@ .. _Buildbot status: http://python.org/dev/buildbot/ .. _Firefox search engine plug-in: http://www.python.org/dev/searchplugin/ -.. _Misc directory: http://hg.python.org/cpython/file/default/Misc +.. _Misc directory: https://hg.python.org/cpython/file/default/Misc .. _PEPs: http://www.python.org/dev/peps/ .. _python.org maintenance: http://python.org/dev/pydotorg/ .. _Python: http://www.python.org/ diff --git a/runtests.rst b/runtests.rst --- a/runtests.rst +++ b/runtests.rst @@ -130,7 +130,7 @@ ---------- Benchmarking is useful to test that a change does not degrade performance. -`The Grand Unified Python Benchmark Suite `_ +`The Grand Unified Python Benchmark Suite `_ has a collection of benchmarks for all Python implementations. Documentation about running the benchmarks is in the `README.txt -`_ of the benchmarks repo. +`_ of the benchmarks repo. diff --git a/setup.rst b/setup.rst --- a/setup.rst +++ b/setup.rst @@ -222,7 +222,7 @@ Windows ''''''' -The `readme `_ +The `readme `_ included in the solution has more details, especially on what additional software is required to build which parts of Python. diff --git a/triaging.rst b/triaging.rst --- a/triaging.rst +++ b/triaging.rst @@ -282,7 +282,7 @@ The issue tracker automatically translates the legacy svn revision ```` to its corresponding Mercurial changeset identifier. * ``Dir/file.ext`` and ``Dir/file.ext:NNN`` generate links to files in the - `Python source code repositories `_, + `Python source code repositories `_, possibly linking to the line number specified after the ``:``. * ``PEP `` and ``PEP`` link to the :abbr:`PEP (Python Enhancement Proposal)` ````. @@ -291,33 +291,33 @@ the Devguide, this page, and this section respectively. -.. _CPython: http://hg.python.org/cpython/file/default/ -.. _Doc: http://hg.python.org/cpython/file/default/Doc/ -.. _Grammar: http://hg.python.org/cpython/file/default/Grammar/ -.. _Lib: http://hg.python.org/cpython/file/default/Lib/ -.. _Lib/lib2to3: http://hg.python.org/cpython/file/default/Lib/lib2to3/ -.. _Lib/ctypes: http://hg.python.org/cpython/file/default/Lib/ctypes/ -.. _Lib/distutils: http://hg.python.org/cpython/file/default/Lib/distutils/ -.. _Lib/doctest.py: http://hg.python.org/cpython/file/default/Lib/doctest.py -.. _Lib/idlelib: http://hg.python.org/cpython/file/default/Lib/idlelib/ -.. _Lib/io.py: http://hg.python.org/cpython/file/default/Lib/io.py -.. _Lib/re.py: http://hg.python.org/cpython/file/default/Lib/re.py -.. _Lib/test: http://hg.python.org/cpython/file/default/Lib/test/ -.. _Lib/test/regrtest.py: http://hg.python.org/cpython/file/default/Lib/test/regrtest.py -.. _Lib/test/support: http://hg.python.org/cpython/file/default/Lib/test/support/ -.. _Lib/tkinter: http://hg.python.org/cpython/file/default/Lib/tkinter/ -.. _Lib/unittest: http://hg.python.org/cpython/file/default/Lib/unittest/ -.. _Lib/xml: http://hg.python.org/cpython/file/default/Lib/xml/ -.. _Modules: http://hg.python.org/cpython/file/default/Modules/ -.. _Modules/_io: http://hg.python.org/cpython/file/default/Modules/_io/ -.. _Modules/_sre.c: http://hg.python.org/cpython/file/default/Modules/_sre.c -.. _Objects: http://hg.python.org/cpython/file/default/Objects/ -.. _Objects/unicodeobject.c: http://hg.python.org/cpython/file/default/Objects/unicodeobject.c -.. _Parser: http://hg.python.org/cpython/file/default/Parser/ -.. _Python: http://hg.python.org/cpython/file/default/Python/ -.. _Tools: http://hg.python.org/cpython/file/default/Tools/ -.. _Tools/demo: http://hg.python.org/cpython/file/default/Tools/demo/ -.. _benchmarks: http://hg.python.org/benchmarks/ -.. _Developer's guide: http://hg.python.org/devguide/ +.. _CPython: https://hg.python.org/cpython/file/default/ +.. _Doc: https://hg.python.org/cpython/file/default/Doc/ +.. _Grammar: https://hg.python.org/cpython/file/default/Grammar/ +.. _Lib: https://hg.python.org/cpython/file/default/Lib/ +.. _Lib/lib2to3: https://hg.python.org/cpython/file/default/Lib/lib2to3/ +.. _Lib/ctypes: https://hg.python.org/cpython/file/default/Lib/ctypes/ +.. _Lib/distutils: https://hg.python.org/cpython/file/default/Lib/distutils/ +.. _Lib/doctest.py: https://hg.python.org/cpython/file/default/Lib/doctest.py +.. _Lib/idlelib: https://hg.python.org/cpython/file/default/Lib/idlelib/ +.. _Lib/io.py: https://hg.python.org/cpython/file/default/Lib/io.py +.. _Lib/re.py: https://hg.python.org/cpython/file/default/Lib/re.py +.. _Lib/test: https://hg.python.org/cpython/file/default/Lib/test/ +.. _Lib/test/regrtest.py: https://hg.python.org/cpython/file/default/Lib/test/regrtest.py +.. _Lib/test/support: https://hg.python.org/cpython/file/default/Lib/test/support/ +.. _Lib/tkinter: https://hg.python.org/cpython/file/default/Lib/tkinter/ +.. _Lib/unittest: https://hg.python.org/cpython/file/default/Lib/unittest/ +.. _Lib/xml: https://hg.python.org/cpython/file/default/Lib/xml/ +.. _Modules: https://hg.python.org/cpython/file/default/Modules/ +.. _Modules/_io: https://hg.python.org/cpython/file/default/Modules/_io/ +.. _Modules/_sre.c: https://hg.python.org/cpython/file/default/Modules/_sre.c +.. _Objects: https://hg.python.org/cpython/file/default/Objects/ +.. _Objects/unicodeobject.c: https://hg.python.org/cpython/file/default/Objects/unicodeobject.c +.. _Parser: https://hg.python.org/cpython/file/default/Parser/ +.. _Python: https://hg.python.org/cpython/file/default/Python/ +.. _Tools: https://hg.python.org/cpython/file/default/Tools/ +.. _Tools/demo: https://hg.python.org/cpython/file/default/Tools/demo/ +.. _benchmarks: https://hg.python.org/benchmarks/ +.. _Developer's guide: https://hg.python.org/devguide/ .. _GSoC: http://code.google.com/soc/ .. _issue tracker: http://bugs.python.org -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Sun Sep 14 20:20:49 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 18:20:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzg0?= =?utf-8?q?=3A_An_exception_in_Tkinter_callback_no_longer_crashes_the_prog?= =?utf-8?q?ram?= Message-ID: <20140914182046.116499.91003@mail.hg.python.org> http://hg.python.org/cpython/rev/c62fad86fac3 changeset: 92424:c62fad86fac3 branch: 3.4 parent: 92420:13acbb8939a8 user: Serhiy Storchaka date: Sun Sep 14 21:17:32 2014 +0300 summary: Issue #22384: An exception in Tkinter callback no longer crashes the program when it is run with pythonw.exe. Documented that Tk.report_callback_exception() is purposed to be overriden in applications. files: Lib/tkinter/__init__.py | 7 +++++-- Misc/NEWS | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -1917,9 +1917,12 @@ if os.path.isfile(base_py): exec(open(base_py).read(), dir) def report_callback_exception(self, exc, val, tb): - """Internal function. It reports exception on sys.stderr.""" + """Report callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.""" import traceback - sys.stderr.write("Exception in Tkinter callback\n") + print("Exception in Tkinter callback", file=sys.stderr) sys.last_type = exc sys.last_value = val sys.last_traceback = tb diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #22384: An exception in Tkinter callback no longer crashes the program + when it is run with pythonw.exe. + - Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. - Issue #21147: sqlite3 now raises an exception if the request contains a null -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 20:20:49 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 18:20:49 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322384=3A_An_exception_in_Tkinter_callback_no_lo?= =?utf-8?q?nger_crashes_the_program?= Message-ID: <20140914182046.2954.44902@mail.hg.python.org> http://hg.python.org/cpython/rev/7191b14ca312 changeset: 92425:7191b14ca312 parent: 92421:6cdb7981eb0f parent: 92424:c62fad86fac3 user: Serhiy Storchaka date: Sun Sep 14 21:18:31 2014 +0300 summary: Issue #22384: An exception in Tkinter callback no longer crashes the program when it is run with pythonw.exe. Documented that Tk.report_callback_exception() is purposed to be overriden in applications. files: Lib/tkinter/__init__.py | 7 +++++-- Misc/NEWS | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Lib/tkinter/__init__.py b/Lib/tkinter/__init__.py --- a/Lib/tkinter/__init__.py +++ b/Lib/tkinter/__init__.py @@ -1919,9 +1919,12 @@ if os.path.isfile(base_py): exec(open(base_py).read(), dir) def report_callback_exception(self, exc, val, tb): - """Internal function. It reports exception on sys.stderr.""" + """Report callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.""" import traceback - sys.stderr.write("Exception in Tkinter callback\n") + print("Exception in Tkinter callback", file=sys.stderr) sys.last_type = exc sys.last_value = val sys.last_traceback = tb diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #22384: An exception in Tkinter callback no longer crashes the program + when it is run with pythonw.exe. + - Issue #22168: Prevent turtle AttributeError with non-default Canvas on OS X. - Issue #21147: sqlite3 now raises an exception if the request contains a null -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 20:20:49 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 14 Sep 2014 18:20:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyMzg0?= =?utf-8?q?=3A_An_exception_in_Tkinter_callback_no_longer_crashes_the_prog?= =?utf-8?q?ram?= Message-ID: <20140914182046.117587.28768@mail.hg.python.org> http://hg.python.org/cpython/rev/994a16b51544 changeset: 92423:994a16b51544 branch: 2.7 user: Serhiy Storchaka date: Sun Sep 14 21:17:16 2014 +0300 summary: Issue #22384: An exception in Tkinter callback no longer crashes the program when it is run with pythonw.exe. Documented that Tk.report_callback_exception() is purposed to be overriden in applications. files: Lib/lib-tk/Tkinter.py | 7 +++++-- 1 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/lib-tk/Tkinter.py b/Lib/lib-tk/Tkinter.py --- a/Lib/lib-tk/Tkinter.py +++ b/Lib/lib-tk/Tkinter.py @@ -1879,9 +1879,12 @@ if os.path.isfile(base_py): execfile(base_py, dir) def report_callback_exception(self, exc, val, tb): - """Internal function. It reports exception on sys.stderr.""" + """Report callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.""" import traceback, sys - sys.stderr.write("Exception in Tkinter callback\n") + print >>sys.stderr, "Exception in Tkinter callback" sys.last_type = exc sys.last_value = val sys.last_traceback = tb -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Sep 14 22:29:30 2014 From: python-checkins at python.org (vinay.sajip) Date: Sun, 14 Sep 2014 20:29:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2320537=3A_logging?= =?utf-8?q?_methods_now_accept_an_exception_instance_as_well_as_a?= Message-ID: <20140914202927.126873.55374@mail.hg.python.org> http://hg.python.org/cpython/rev/9d54903a84b5 changeset: 92426:9d54903a84b5 user: Vinay Sajip date: Sun Sep 14 21:29:11 2014 +0100 summary: Closes #20537: logging methods now accept an exception instance as well as a Boolean value or exception tuple. Thanks to Yury Selivanov for the patch. files: Doc/library/logging.rst | 13 +++++++++---- Lib/logging/__init__.py | 19 +++++++++---------- Lib/test/test_logging.py | 13 +++++++++++++ Misc/NEWS | 3 +++ 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -155,11 +155,13 @@ *msg* using the string formatting operator. (Note that this means that you can use keywords in the format string, together with a single dictionary argument.) - There are three keyword arguments in *kwargs* which are inspected: *exc_info* - which, if it does not evaluate as false, causes exception information to be + There are three keyword arguments in *kwargs* which are inspected: + *exc_info*, *stack_info*, and *extra*. + + If *exc_info* does not evaluate as false, it causes exception information to be added to the logging message. If an exception tuple (in the format returned by - :func:`sys.exc_info`) is provided, it is used; otherwise, :func:`sys.exc_info` - is called to get the exception information. + :func:`sys.exc_info`) or an exception instance is provided, it is used; + otherwise, :func:`sys.exc_info` is called to get the exception information. The second optional keyword argument is *stack_info*, which defaults to ``False``. If true, stack information is added to the logging @@ -216,6 +218,9 @@ .. versionadded:: 3.2 The *stack_info* parameter was added. + .. versionchanged:: 3.5 + The *exc_info* parameter can now accept exception instances. + .. method:: Logger.info(msg, *args, **kwargs) diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py --- a/Lib/logging/__init__.py +++ b/Lib/logging/__init__.py @@ -1302,12 +1302,11 @@ if self.isEnabledFor(ERROR): self._log(ERROR, msg, args, **kwargs) - def exception(self, msg, *args, **kwargs): + def exception(self, msg, *args, exc_info=True, **kwargs): """ Convenience method for logging an ERROR with exception information. """ - kwargs['exc_info'] = True - self.error(msg, *args, **kwargs) + self.error(msg, *args, exc_info=exc_info, **kwargs) def critical(self, msg, *args, **kwargs): """ @@ -1402,7 +1401,9 @@ else: # pragma: no cover fn, lno, func = "(unknown file)", 0, "(unknown function)" if exc_info: - if not isinstance(exc_info, tuple): + if isinstance(exc_info, BaseException): + exc_info = (type(exc_info), exc_info, exc_info.__traceback__) + elif not isinstance(exc_info, tuple): exc_info = sys.exc_info() record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra, sinfo) @@ -1612,12 +1613,11 @@ """ self.log(ERROR, msg, *args, **kwargs) - def exception(self, msg, *args, **kwargs): + def exception(self, msg, *args, exc_info=True, **kwargs): """ Delegate an exception call to the underlying logger. """ - kwargs["exc_info"] = True - self.log(ERROR, msg, *args, **kwargs) + self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) def critical(self, msg, *args, **kwargs): """ @@ -1796,14 +1796,13 @@ basicConfig() root.error(msg, *args, **kwargs) -def exception(msg, *args, **kwargs): +def exception(msg, *args, exc_info=True, **kwargs): """ Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format. """ - kwargs['exc_info'] = True - error(msg, *args, **kwargs) + error(msg, *args, exc_info=exc_info, **kwargs) def warning(msg, *args, **kwargs): """ diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -3712,6 +3712,19 @@ self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) + def test_exception_excinfo(self): + try: + 1 / 0 + except ZeroDivisionError as e: + exc = e + + self.adapter.exception('exc_info test', exc_info=exc) + + self.assertEqual(len(self.recording.records), 1) + record = self.recording.records[0] + self.assertEqual(record.exc_info, + (exc.__class__, exc, exc.__traceback__)) + def test_critical(self): msg = 'critical test! %r' self.adapter.critical(msg, self.recording) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #20537: logging methods now accept an exception instance as well as a + Boolean value or exception tuple. Thanks to Yury Selivanov for the patch. + - Issue #22384: An exception in Tkinter callback no longer crashes the program when it is run with pythonw.exe. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 04:21:34 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 15 Sep 2014 02:21:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2317095=3A_merge_from_3=2E4?= Message-ID: <20140915022130.116497.88355@mail.hg.python.org> http://hg.python.org/cpython/rev/d3939f602e1f changeset: 92428:d3939f602e1f parent: 92426:9d54903a84b5 parent: 92427:781454f792c4 user: Ned Deily date: Sun Sep 14 19:21:05 2014 -0700 summary: Issue #17095: merge from 3.4 files: Modules/getpath.c | 14 -------------- 1 files changed, 0 insertions(+), 14 deletions(-) diff --git a/Modules/getpath.c b/Modules/getpath.c --- a/Modules/getpath.c +++ b/Modules/getpath.c @@ -734,11 +734,6 @@ bufsz += wcslen(zip_path) + 1; bufsz += wcslen(exec_prefix) + 1; - /* When running from the build directory, add room for the Modules - * subdirectory too. - */ - if (efound == -1) - bufsz += wcslen(argv0_path) + wcslen(L"Modules") + 2; buf = (wchar_t *)PyMem_Malloc(bufsz * sizeof(wchar_t)); if (buf == NULL) { @@ -786,15 +781,6 @@ /* Finally, on goes the directory for dynamic-load modules */ wcscat(buf, exec_prefix); - /* And, if we run from a build directory, the Modules directory (for - * modules built with Modules/Setup.) - */ - if (efound == -1) { - wcscat(buf, delimiter); - wcscat(buf, argv0_path); - wcscat(buf, separator); - wcscat(buf, L"Modules"); - } /* And publish the results */ module_search_path = buf; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 04:21:34 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 15 Sep 2014 02:21:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE3MDk1?= =?utf-8?q?=3A_Temporarily_revert_getpath=2Ec_change_that_added_the_Module?= =?utf-8?q?s?= Message-ID: <20140915022129.117581.17557@mail.hg.python.org> http://hg.python.org/cpython/rev/781454f792c4 changeset: 92427:781454f792c4 branch: 3.4 parent: 92424:c62fad86fac3 user: Ned Deily date: Sun Sep 14 19:19:49 2014 -0700 summary: Issue #17095: Temporarily revert getpath.c change that added the Modules directory to sys.path when running from a build directory. That has proven to be problematic for several standard library modules with C extension modules whose builds can fail on some platforms. files: Modules/getpath.c | 14 -------------- 1 files changed, 0 insertions(+), 14 deletions(-) diff --git a/Modules/getpath.c b/Modules/getpath.c --- a/Modules/getpath.c +++ b/Modules/getpath.c @@ -734,11 +734,6 @@ bufsz += wcslen(zip_path) + 1; bufsz += wcslen(exec_prefix) + 1; - /* When running from the build directory, add room for the Modules - * subdirectory too. - */ - if (efound == -1) - bufsz += wcslen(argv0_path) + wcslen(L"Modules") + 2; buf = (wchar_t *)PyMem_Malloc(bufsz * sizeof(wchar_t)); if (buf == NULL) { @@ -786,15 +781,6 @@ /* Finally, on goes the directory for dynamic-load modules */ wcscat(buf, exec_prefix); - /* And, if we run from a build directory, the Modules directory (for - * modules built with Modules/Setup.) - */ - if (efound == -1) { - wcscat(buf, delimiter); - wcscat(buf, argv0_path); - wcscat(buf, separator); - wcscat(buf, L"Modules"); - } /* And publish the results */ module_search_path = buf; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 10:37:37 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 15 Sep 2014 08:37:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Test_re_pickli?= =?utf-8?q?ng_for_all_protocols=2E?= Message-ID: <20140915083737.116485.49140@mail.hg.python.org> http://hg.python.org/cpython/rev/e6c7a5a94a1d changeset: 92431:e6c7a5a94a1d branch: 2.7 parent: 92423:994a16b51544 user: Serhiy Storchaka date: Mon Sep 15 11:35:06 2014 +0300 summary: Test re pickling for all protocols. files: Lib/test/test_re.py | 9 ++++++--- 1 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -560,12 +560,15 @@ # old pickles expect the _compile() reconstructor in sre module import_module("sre", deprecated=True) from sre import _compile + # current pickle expects the _compile() reconstructor in re module + from re import _compile def pickle_test(self, pickle): oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') - s = pickle.dumps(oldpat) - newpat = pickle.loads(s) - self.assertEqual(oldpat, newpat) + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + pickled = pickle.dumps(oldpat, proto) + newpat = pickle.loads(pickled) + self.assertEqual(newpat, oldpat) def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 10:37:37 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 15 Sep 2014 08:37:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Restored_re_pi?= =?utf-8?q?ckling_test=2E?= Message-ID: <20140915083737.116493.8619@mail.hg.python.org> http://hg.python.org/cpython/rev/0668b3daa84e changeset: 92429:0668b3daa84e branch: 3.4 parent: 92427:781454f792c4 user: Serhiy Storchaka date: Mon Sep 15 11:33:19 2014 +0300 summary: Restored re pickling test. files: Lib/test/test_re.py | 14 +++++++++----- 1 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -659,11 +659,15 @@ res = re.findall(re.escape('\u2620'.encode('utf-8')), b) self.assertEqual(len(res), 2) - def pickle_test(self, pickle): - oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') - s = pickle.dumps(oldpat) - newpat = pickle.loads(s) - self.assertEqual(oldpat, newpat) + def test_pickling(self): + import pickle + oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)', re.UNICODE) + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + pickled = pickle.dumps(oldpat, proto) + newpat = pickle.loads(pickled) + self.assertEqual(newpat, oldpat) + # current pickle expects the _compile() reconstructor in re module + from re import _compile def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 10:37:37 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 15 Sep 2014 08:37:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Restored_re_pickling_test=2E?= Message-ID: <20140915083737.116487.97112@mail.hg.python.org> http://hg.python.org/cpython/rev/49e4e3b74334 changeset: 92430:49e4e3b74334 parent: 92428:d3939f602e1f parent: 92429:0668b3daa84e user: Serhiy Storchaka date: Mon Sep 15 11:33:55 2014 +0300 summary: Restored re pickling test. files: Lib/test/test_re.py | 14 +++++++++----- 1 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -659,11 +659,15 @@ res = re.findall(re.escape('\u2620'.encode('utf-8')), b) self.assertEqual(len(res), 2) - def pickle_test(self, pickle): - oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') - s = pickle.dumps(oldpat) - newpat = pickle.loads(s) - self.assertEqual(oldpat, newpat) + def test_pickling(self): + import pickle + oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)', re.UNICODE) + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + pickled = pickle.dumps(oldpat, proto) + newpat = pickle.loads(pickled) + self.assertEqual(newpat, oldpat) + # current pickle expects the _compile() reconstructor in re module + from re import _compile def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Mon Sep 15 10:41:56 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 15 Sep 2014 10:41:56 +0200 Subject: [Python-checkins] Daily reference leaks (9d54903a84b5): sum=151932 Message-ID: results for 9d54903a84b5 on branch "default" -------------------------------------------- test_codecs leaked [5825, 5825, 5825] references, sum=17475 test_codecs leaked [1172, 1174, 1174] memory blocks, sum=3520 test_collections leaked [0, -2, 0] references, sum=-2 test_distutils leaked [37735, 37735, 37735] references, sum=113205 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 0] references, sum=0 test_site leaked [2, -2, 0] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogXgNWIL', '-x'] From python-checkins at python.org Mon Sep 15 11:10:54 2014 From: python-checkins at python.org (lukasz.langa) Date: Mon, 15 Sep 2014 09:10:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2318159=3A_ConfigP?= =?utf-8?q?arser_getters_not_available_on_SectionProxy?= Message-ID: <20140915091054.116487.45162@mail.hg.python.org> http://hg.python.org/cpython/rev/2c46a4ded259 changeset: 92433:2c46a4ded259 user: ?ukasz Langa date: Mon Sep 15 02:08:41 2014 -0700 summary: Closes #18159: ConfigParser getters not available on SectionProxy files: Doc/library/configparser.rst | 46 +++- Lib/configparser.py | 170 ++++++++++++---- Lib/test/test_configparser.py | 223 ++++++++++++++++++++++ 3 files changed, 386 insertions(+), 53 deletions(-) diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst --- a/Doc/library/configparser.rst +++ b/Doc/library/configparser.rst @@ -162,10 +162,8 @@ True Apart from :meth:`getboolean`, config parsers also provide equivalent -:meth:`getint` and :meth:`getfloat` methods, but these are far less -useful since conversion using :func:`int` and :func:`float` is -sufficient for these types. - +:meth:`getint` and :meth:`getfloat` methods. You can register your own +converters and customize the provided ones. [1]_ Fallback Values --------------- @@ -555,10 +553,10 @@ Comment prefixes are strings that indicate the start of a valid comment within a config file. *comment_prefixes* are used only on otherwise empty lines - (optionally indented) whereas *inline_comment_prefixes* can be used - after every valid value (e.g. section names, options and empty lines - as well). By default inline comments are disabled and ``'#'`` and - ``';'`` are used as prefixes for whole line comments. + (optionally indented) whereas *inline_comment_prefixes* can be used after + every valid value (e.g. section names, options and empty lines as well). By + default inline comments are disabled and ``'#'`` and ``';'`` are used as + prefixes for whole line comments. .. versionchanged:: 3.2 In previous versions of :mod:`configparser` behaviour matched @@ -667,10 +665,26 @@ `dedicated documentation section <#interpolation-of-values>`_. :class:`RawConfigParser` has a default value of ``None``. +* *converters*, default value: not set + + Config parsers provide option value getters that perform type conversion. By + default :meth:`getint`, :meth:`getfloat`, and :meth:`getboolean` are + implemented. Should other getters be desirable, users may define them in + a subclass or pass a dictionary where each key is a name of the converter and + each value is a callable implementing said conversion. For instance, passing + ``{'decimal': decimal.Decimal}`` would add :meth:`getdecimal` on both the + parser object and all section proxies. In other words, it will be possible + to write both ``parser_instance.getdecimal('section', 'key', fallback=0)`` + and ``parser_instance['section'].getdecimal('key', 0)``. + + If the converter needs to access the state of the parser, it can be + implemented as a method on a config parser subclass. If the name of this + method starts with ``get``, it will be available on all section proxies, in + the dict-compatible form (see the ``getdecimal()`` example above). More advanced customization may be achieved by overriding default values of -these parser attributes. The defaults are defined on the classes, so they -may be overridden by subclasses or by attribute assignment. +these parser attributes. The defaults are defined on the classes, so they may +be overridden by subclasses or by attribute assignment. .. attribute:: BOOLEAN_STATES @@ -863,7 +877,7 @@ ConfigParser Objects -------------------- -.. class:: ConfigParser(defaults=None, dict_type=collections.OrderedDict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=configparser.DEFAULTSECT, interpolation=BasicInterpolation()) +.. class:: ConfigParser(defaults=None, dict_type=collections.OrderedDict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=configparser.DEFAULTSECT, interpolation=BasicInterpolation(), converters={}) The main configuration parser. When *defaults* is given, it is initialized into the dictionary of intrinsic defaults. When *dict_type* is given, it @@ -903,6 +917,12 @@ converts option names to lower case), the values ``foo %(bar)s`` and ``foo %(BAR)s`` are equivalent. + When *converters* is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its own corresponding :meth:`get*()` method on the parser + object and section proxies. + .. versionchanged:: 3.1 The default *dict_type* is :class:`collections.OrderedDict`. @@ -911,6 +931,9 @@ *empty_lines_in_values*, *default_section* and *interpolation* were added. + .. versionchanged:: 3.5 + The *converters* argument was added. + .. method:: defaults() @@ -1286,3 +1309,4 @@ .. [1] Config parsers allow for heavy customization. If you are interested in changing the behaviour outlined by the footnote reference, consult the `Customizing Parser Behaviour`_ section. + diff --git a/Lib/configparser.py b/Lib/configparser.py --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -17,7 +17,8 @@ __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, - empty_lines_in_values=True): + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. @@ -47,6 +48,25 @@ When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. + When `default_section' is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser object s don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildbot`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + sections() Return all the configuration section names, sans DEFAULT. @@ -129,9 +149,11 @@ __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" @@ -580,11 +602,12 @@ comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, - interpolation=_UNSET): + interpolation=_UNSET, converters=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() + self._converters = ConverterMapping(self) self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: @@ -612,6 +635,8 @@ self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() + if converters is not _UNSET: + self._converters.update(converters) def defaults(self): return self._defaults @@ -775,36 +800,31 @@ def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) - def getint(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): + def _get_conv(self, section, option, conv, *, raw=False, vars=None, + fallback=_UNSET, **kwargs): try: - return self._get(section, int, option, raw=raw, vars=vars) + return self._get(section, conv, option, raw=raw, vars=vars, + **kwargs) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise - else: - return fallback + return fallback + + # getint, getfloat and getboolean provided directly for backwards compat + def getint(self, section, option, *, raw=False, vars=None, + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, int, raw=raw, vars=vars, + fallback=fallback, **kwargs) def getfloat(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): - try: - return self._get(section, float, option, raw=raw, vars=vars) - except (NoSectionError, NoOptionError): - if fallback is _UNSET: - raise - else: - return fallback + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, float, raw=raw, vars=vars, + fallback=fallback, **kwargs) def getboolean(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): - try: - return self._get(section, self._convert_to_boolean, option, - raw=raw, vars=vars) - except (NoSectionError, NoOptionError): - if fallback is _UNSET: - raise - else: - return fallback + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, self._convert_to_boolean, + raw=raw, vars=vars, fallback=fallback, **kwargs) def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. @@ -1154,6 +1174,10 @@ if not isinstance(value, str): raise TypeError("option values must be strings") + @property + def converters(self): + return self._converters + class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" @@ -1194,6 +1218,10 @@ """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name + for conv in parser.converters: + key = 'get' + conv + getter = functools.partial(self.get, _impl=getattr(parser, key)) + setattr(self, key, getter) def __repr__(self): return ''.format(self._name) @@ -1227,22 +1255,6 @@ else: return self._parser.defaults() - def get(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.get(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getint(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getint(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getfloat(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getfloat(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getboolean(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getboolean(self._name, option, raw=raw, vars=vars, - fallback=fallback) - @property def parser(self): # The parser object of the proxy is read-only. @@ -1252,3 +1264,77 @@ def name(self): # The name of the section on a proxy is read-only. return self._name + + def get(self, option, fallback=None, *, raw=False, vars=None, + _impl=None, **kwargs): + """Get an option value. + + Unless `fallback` is provided, `None` will be returned if the option + is not found. + + """ + # If `_impl` is provided, it should be a getter method on the parser + # object that provides the desired type conversion. + if not _impl: + _impl = self._parser.get + return _impl(self._name, option, raw=raw, vars=vars, + fallback=fallback, **kwargs) + + +class ConverterMapping(MutableMapping): + """Enables reuse of get*() methods between the parser and section proxies. + + If a parser class implements a getter directly, the value for the given + key will be ``None``. The presence of the converter name here enables + section proxies to find and use the implementation on the parser class. + """ + + GETTERCRE = re.compile(r"^get(?P.+)$") + + def __init__(self, parser): + self._parser = parser + self._data = {} + for getter in dir(self._parser): + m = self.GETTERCRE.match(getter) + if not m or not callable(getattr(self._parser, getter)): + continue + self._data[m.group('name')] = None # See class docstring. + + def __getitem__(self, key): + return self._data[key] + + def __setitem__(self, key, value): + try: + k = 'get' + key + except TypeError: + raise ValueError('Incompatible key: {} (type: {})' + ''.format(key, type(key))) + if k == 'get': + raise ValueError('Incompatible key: cannot use "" as a name') + self._data[key] = value + func = functools.partial(self._parser._get_conv, conv=value) + func.converter = value + setattr(self._parser, k, func) + for proxy in self._parser.values(): + getter = functools.partial(proxy.get, _impl=func) + setattr(proxy, k, getter) + + def __delitem__(self, key): + try: + k = 'get' + (key or None) + except TypeError: + raise KeyError(key) + del self._data[key] + for inst in itertools.chain((self._parser,), self._parser.values()): + try: + delattr(inst, k) + except AttributeError: + # don't raise since the entry was present in _data, silently + # clean up + continue + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py --- a/Lib/test/test_configparser.py +++ b/Lib/test/test_configparser.py @@ -1584,6 +1584,34 @@ """) self.assertEqual(repr(parser['section']), '') + def test_inconsistent_converters_state(self): + parser = configparser.ConfigParser() + import decimal + parser.converters['decimal'] = decimal.Decimal + parser.read_string(""" + [s1] + one = 1 + [s2] + two = 2 + """) + self.assertIn('decimal', parser.converters) + self.assertEqual(parser.getdecimal('s1', 'one'), 1) + self.assertEqual(parser.getdecimal('s2', 'two'), 2) + self.assertEqual(parser['s1'].getdecimal('one'), 1) + self.assertEqual(parser['s2'].getdecimal('two'), 2) + del parser.getdecimal + with self.assertRaises(AttributeError): + parser.getdecimal('s1', 'one') + self.assertIn('decimal', parser.converters) + del parser.converters['decimal'] + self.assertNotIn('decimal', parser.converters) + with self.assertRaises(AttributeError): + parser.getdecimal('s1', 'one') + with self.assertRaises(AttributeError): + parser['s1'].getdecimal('one') + with self.assertRaises(AttributeError): + parser['s2'].getdecimal('two') + class ExceptionPicklingTestCase(unittest.TestCase): """Tests for issue #13760: ConfigParser exceptions are not picklable.""" @@ -1763,6 +1791,7 @@ self.assertEqual(s['k2'], 'v2') self.assertEqual(s['k3'], 'v3;#//still v3# and still v3') + class ExceptionContextTestCase(unittest.TestCase): """ Test that implementation details doesn't leak through raising exceptions. """ @@ -1816,5 +1845,199 @@ config.remove_option('Section1', 'an_int') self.assertIs(cm.exception.__suppress_context__, True) + +class ConvertersTestCase(BasicTestCase, unittest.TestCase): + """Introduced in 3.5, issue #18159.""" + + config_class = configparser.ConfigParser + + def newconfig(self, defaults=None): + instance = super().newconfig(defaults=defaults) + instance.converters['list'] = lambda v: [e.strip() for e in v.split() + if e.strip()] + return instance + + def test_converters(self): + cfg = self.newconfig() + self.assertIn('boolean', cfg.converters) + self.assertIn('list', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertIsNotNone(cfg.converters['list']) + self.assertEqual(len(cfg.converters), 4) + with self.assertRaises(ValueError): + cfg.converters[''] = lambda v: v + with self.assertRaises(ValueError): + cfg.converters[None] = lambda v: v + cfg.read_string(""" + [s] + str = string + int = 1 + float = 0.5 + list = a b c d e f g + bool = yes + """) + s = cfg['s'] + self.assertEqual(s['str'], 'string') + self.assertEqual(s['int'], '1') + self.assertEqual(s['float'], '0.5') + self.assertEqual(s['list'], 'a b c d e f g') + self.assertEqual(s['bool'], 'yes') + self.assertEqual(cfg.get('s', 'str'), 'string') + self.assertEqual(cfg.get('s', 'int'), '1') + self.assertEqual(cfg.get('s', 'float'), '0.5') + self.assertEqual(cfg.get('s', 'list'), 'a b c d e f g') + self.assertEqual(cfg.get('s', 'bool'), 'yes') + self.assertEqual(cfg.get('s', 'str'), 'string') + self.assertEqual(cfg.getint('s', 'int'), 1) + self.assertEqual(cfg.getfloat('s', 'float'), 0.5) + self.assertEqual(cfg.getlist('s', 'list'), ['a', 'b', 'c', 'd', + 'e', 'f', 'g']) + self.assertEqual(cfg.getboolean('s', 'bool'), True) + self.assertEqual(s.get('str'), 'string') + self.assertEqual(s.getint('int'), 1) + self.assertEqual(s.getfloat('float'), 0.5) + self.assertEqual(s.getlist('list'), ['a', 'b', 'c', 'd', + 'e', 'f', 'g']) + self.assertEqual(s.getboolean('bool'), True) + with self.assertRaises(AttributeError): + cfg.getdecimal('s', 'float') + with self.assertRaises(AttributeError): + s.getdecimal('float') + import decimal + cfg.converters['decimal'] = decimal.Decimal + self.assertIn('decimal', cfg.converters) + self.assertIsNotNone(cfg.converters['decimal']) + self.assertEqual(len(cfg.converters), 5) + dec0_5 = decimal.Decimal('0.5') + self.assertEqual(cfg.getdecimal('s', 'float'), dec0_5) + self.assertEqual(s.getdecimal('float'), dec0_5) + del cfg.converters['decimal'] + self.assertNotIn('decimal', cfg.converters) + self.assertEqual(len(cfg.converters), 4) + with self.assertRaises(AttributeError): + cfg.getdecimal('s', 'float') + with self.assertRaises(AttributeError): + s.getdecimal('float') + with self.assertRaises(KeyError): + del cfg.converters['decimal'] + with self.assertRaises(KeyError): + del cfg.converters[''] + with self.assertRaises(KeyError): + del cfg.converters[None] + + +class BlatantOverrideConvertersTestCase(unittest.TestCase): + """What if somebody overrode a getboolean()? We want to make sure that in + this case the automatic converters do not kick in.""" + + config = """ + [one] + one = false + two = false + three = long story short + + [two] + one = false + two = false + three = four + """ + + def test_converters_at_init(self): + cfg = configparser.ConfigParser(converters={'len': len}) + cfg.read_string(self.config) + self._test_len(cfg) + self.assertIsNotNone(cfg.converters['len']) + + def test_inheritance(self): + class StrangeConfigParser(configparser.ConfigParser): + gettysburg = 'a historic borough in south central Pennsylvania' + + def getboolean(self, section, option, *, raw=False, vars=None, + fallback=configparser._UNSET): + if section == option: + return True + return super().getboolean(section, option, raw=raw, vars=vars, + fallback=fallback) + def getlen(self, section, option, *, raw=False, vars=None, + fallback=configparser._UNSET): + return self._get_conv(section, option, len, raw=raw, vars=vars, + fallback=fallback) + + cfg = StrangeConfigParser() + cfg.read_string(self.config) + self._test_len(cfg) + self.assertIsNone(cfg.converters['len']) + self.assertTrue(cfg.getboolean('one', 'one')) + self.assertTrue(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + cfg.converters['boolean'] = cfg._convert_to_boolean + self.assertFalse(cfg.getboolean('one', 'one')) + self.assertFalse(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + + def _test_len(self, cfg): + self.assertEqual(len(cfg.converters), 4) + self.assertIn('boolean', cfg.converters) + self.assertIn('len', cfg.converters) + self.assertNotIn('tysburg', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertEqual(cfg.getlen('one', 'one'), 5) + self.assertEqual(cfg.getlen('one', 'two'), 5) + self.assertEqual(cfg.getlen('one', 'three'), 16) + self.assertEqual(cfg.getlen('two', 'one'), 5) + self.assertEqual(cfg.getlen('two', 'two'), 5) + self.assertEqual(cfg.getlen('two', 'three'), 4) + self.assertEqual(cfg.getlen('two', 'four', fallback=0), 0) + with self.assertRaises(configparser.NoOptionError): + cfg.getlen('two', 'four') + self.assertEqual(cfg['one'].getlen('one'), 5) + self.assertEqual(cfg['one'].getlen('two'), 5) + self.assertEqual(cfg['one'].getlen('three'), 16) + self.assertEqual(cfg['two'].getlen('one'), 5) + self.assertEqual(cfg['two'].getlen('two'), 5) + self.assertEqual(cfg['two'].getlen('three'), 4) + self.assertEqual(cfg['two'].getlen('four', 0), 0) + self.assertEqual(cfg['two'].getlen('four'), None) + + def test_instance_assignment(self): + cfg = configparser.ConfigParser() + cfg.getboolean = lambda section, option: True + cfg.getlen = lambda section, option: len(cfg[section][option]) + cfg.read_string(self.config) + self.assertEqual(len(cfg.converters), 3) + self.assertIn('boolean', cfg.converters) + self.assertNotIn('len', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertTrue(cfg.getboolean('one', 'one')) + self.assertTrue(cfg.getboolean('two', 'two')) + self.assertTrue(cfg.getboolean('one', 'two')) + self.assertTrue(cfg.getboolean('two', 'one')) + cfg.converters['boolean'] = cfg._convert_to_boolean + self.assertFalse(cfg.getboolean('one', 'one')) + self.assertFalse(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + self.assertEqual(cfg.getlen('one', 'one'), 5) + self.assertEqual(cfg.getlen('one', 'two'), 5) + self.assertEqual(cfg.getlen('one', 'three'), 16) + self.assertEqual(cfg.getlen('two', 'one'), 5) + self.assertEqual(cfg.getlen('two', 'two'), 5) + self.assertEqual(cfg.getlen('two', 'three'), 4) + # If a getter impl is assigned straight to the instance, it won't + # be available on the section proxies. + with self.assertRaises(AttributeError): + self.assertEqual(cfg['one'].getlen('one'), 5) + with self.assertRaises(AttributeError): + self.assertEqual(cfg['two'].getlen('one'), 5) + + if __name__ == '__main__': unittest.main() -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 11:10:54 2014 From: python-checkins at python.org (lukasz.langa) Date: Mon, 15 Sep 2014 09:10:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_full-stop_whitespace_i?= =?utf-8?q?n_configparser_docs?= Message-ID: <20140915091054.116487.31713@mail.hg.python.org> http://hg.python.org/cpython/rev/23c1adede649 changeset: 92432:23c1adede649 parent: 92428:d3939f602e1f user: ?ukasz Langa date: Sun Sep 14 23:37:03 2014 -0700 summary: Fix full-stop whitespace in configparser docs files: Doc/library/configparser.rst | 86 ++++++++++++----------- 1 files changed, 44 insertions(+), 42 deletions(-) diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst --- a/Doc/library/configparser.rst +++ b/Doc/library/configparser.rst @@ -144,12 +144,13 @@ >>> float(topsecret['CompressionLevel']) 9.0 -Extracting Boolean values is not that simple, though. Passing the value -to ``bool()`` would do no good since ``bool('False')`` is still -``True``. This is why config parsers also provide :meth:`getboolean`. -This method is case-insensitive and recognizes Boolean values from -``'yes'``/``'no'``, ``'on'``/``'off'`` and ``'1'``/``'0'`` [1]_. -For example: +Since this task is so common, config parsers provide a range of handy getter +methods to handle integers, floats and booleans. The last one is the most +interesting because simply passing the value to ``bool()`` would do no good +since ``bool('False')`` is still ``True``. This is why config parsers also +provide :meth:`getboolean`. This method is case-insensitive and recognizes +Boolean values from ``'yes'``/``'no'``, ``'on'``/``'off'``, +``'true'``/``'false'`` and ``'1'``/``'0'`` [1]_. For example: .. doctest:: @@ -319,11 +320,11 @@ .. class:: ExtendedInterpolation() An alternative handler for interpolation which implements a more advanced - syntax, used for instance in ``zc.buildout``. Extended interpolation is + syntax, used for instance in ``zc.buildout``. Extended interpolation is using ``${section:option}`` to denote a value from a foreign section. - Interpolation can span multiple levels. For convenience, if the ``section:`` - part is omitted, interpolation defaults to the current section (and possibly - the default values from the special section). + Interpolation can span multiple levels. For convenience, if the + ``section:`` part is omitted, interpolation defaults to the current section + (and possibly the default values from the special section). For example, the configuration specified above with basic interpolation, would look like this with extended interpolation: @@ -401,13 +402,13 @@ * ``parser.popitem()`` never returns it. * ``parser.get(section, option, **kwargs)`` - the second argument is **not** - a fallback value. Note however that the section-level ``get()`` methods are + a fallback value. Note however that the section-level ``get()`` methods are compatible both with the mapping protocol and the classic configparser API. * ``parser.items()`` is compatible with the mapping protocol (returns a list of *section_name*, *section_proxy* pairs including the DEFAULTSECT). However, this method can also be invoked with arguments: ``parser.items(section, raw, - vars)``. The latter call returns a list of *option*, *value* pairs for + vars)``. The latter call returns a list of *option*, *value* pairs for a specified ``section``, with all interpolations expanded (unless ``raw=True`` is provided). @@ -541,9 +542,9 @@ * *delimiters*, default value: ``('=', ':')`` - Delimiters are substrings that delimit keys from values within a section. The - first occurrence of a delimiting substring on a line is considered a delimiter. - This means values (but not keys) can contain the delimiters. + Delimiters are substrings that delimit keys from values within a section. + The first occurrence of a delimiting substring on a line is considered + a delimiter. This means values (but not keys) can contain the delimiters. See also the *space_around_delimiters* argument to :meth:`ConfigParser.write`. @@ -554,10 +555,10 @@ Comment prefixes are strings that indicate the start of a valid comment within a config file. *comment_prefixes* are used only on otherwise empty lines - (optionally indented) whereas *inline_comment_prefixes* can be used after - every valid value (e.g. section names, options and empty lines as well). By - default inline comments are disabled and ``'#'`` and ``';'`` are used as - prefixes for whole line comments. + (optionally indented) whereas *inline_comment_prefixes* can be used + after every valid value (e.g. section names, options and empty lines + as well). By default inline comments are disabled and ``'#'`` and + ``';'`` are used as prefixes for whole line comments. .. versionchanged:: 3.2 In previous versions of :mod:`configparser` behaviour matched @@ -565,10 +566,10 @@ Please note that config parsers don't support escaping of comment prefixes so using *inline_comment_prefixes* may prevent users from specifying option - values with characters used as comment prefixes. When in doubt, avoid setting - *inline_comment_prefixes*. In any circumstances, the only way of storing - comment prefix characters at the beginning of a line in multiline values is to - interpolate the prefix, for example:: + values with characters used as comment prefixes. When in doubt, avoid + setting *inline_comment_prefixes*. In any circumstances, the only way of + storing comment prefix characters at the beginning of a line in multiline + values is to interpolate the prefix, for example:: >>> from configparser import ConfigParser, ExtendedInterpolation >>> parser = ConfigParser(interpolation=ExtendedInterpolation()) @@ -613,7 +614,7 @@ When set to ``True``, the parser will not allow for any section or option duplicates while reading from a single source (using :meth:`read_file`, - :meth:`read_string` or :meth:`read_dict`). It is recommended to use strict + :meth:`read_string` or :meth:`read_dict`). It is recommended to use strict parsers in new applications. .. versionchanged:: 3.2 @@ -648,12 +649,12 @@ The convention of allowing a special section of default values for other sections or interpolation purposes is a powerful concept of this library, - letting users create complex declarative configurations. This section is + letting users create complex declarative configurations. This section is normally called ``"DEFAULT"`` but this can be customized to point to any - other valid section name. Some typical values include: ``"general"`` or - ``"common"``. The name provided is used for recognizing default sections when - reading from any source and is used when writing configuration back to - a file. Its current value can be retrieved using the + other valid section name. Some typical values include: ``"general"`` or + ``"common"``. The name provided is used for recognizing default sections + when reading from any source and is used when writing configuration back to + a file. Its current value can be retrieved using the ``parser_instance.default_section`` attribute and may be modified at runtime (i.e. to convert files from one format to another). @@ -662,7 +663,7 @@ Interpolation behaviour may be customized by providing a custom handler through the *interpolation* argument. ``None`` can be used to turn off interpolation completely, ``ExtendedInterpolation()`` provides a more - advanced variant inspired by ``zc.buildout``. More on the subject in the + advanced variant inspired by ``zc.buildout``. More on the subject in the `dedicated documentation section <#interpolation-of-values>`_. :class:`RawConfigParser` has a default value of ``None``. @@ -727,10 +728,11 @@ .. attribute:: SECTCRE - A compiled regular expression used to parse section headers. The default - matches ``[section]`` to the name ``"section"``. Whitespace is considered part - of the section name, thus ``[ larch ]`` will be read as a section of name - ``" larch "``. Override this attribute if that's unsuitable. For example: + A compiled regular expression used to parse section headers. The default + matches ``[section]`` to the name ``"section"``. Whitespace is considered + part of the section name, thus ``[ larch ]`` will be read as a section of + name ``" larch "``. Override this attribute if that's unsuitable. For + example: .. doctest:: @@ -871,8 +873,8 @@ When *delimiters* is given, it is used as the set of substrings that divide keys from values. When *comment_prefixes* is given, it will be used as the set of substrings that prefix comments in otherwise empty lines. - Comments can be indented. When *inline_comment_prefixes* is given, it will be - used as the set of substrings that prefix comments in non-empty lines. + Comments can be indented. When *inline_comment_prefixes* is given, it will + be used as the set of substrings that prefix comments in non-empty lines. When *strict* is ``True`` (the default), the parser won't allow for any section or option duplicates while reading from a single source (file, @@ -886,13 +888,13 @@ When *default_section* is given, it specifies the name for the special section holding default values for other sections and interpolation purposes - (normally named ``"DEFAULT"``). This value can be retrieved and changed on + (normally named ``"DEFAULT"``). This value can be retrieved and changed on runtime using the ``default_section`` instance attribute. Interpolation behaviour may be customized by providing a custom handler through the *interpolation* argument. ``None`` can be used to turn off interpolation completely, ``ExtendedInterpolation()`` provides a more - advanced variant inspired by ``zc.buildout``. More on the subject in the + advanced variant inspired by ``zc.buildout``. More on the subject in the `dedicated documentation section <#interpolation-of-values>`_. All option names used in interpolation will be passed through the @@ -946,7 +948,7 @@ .. method:: has_option(section, option) If the given *section* exists, and contains the given *option*, return - :const:`True`; otherwise return :const:`False`. If the specified + :const:`True`; otherwise return :const:`False`. If the specified *section* is :const:`None` or an empty string, DEFAULT is assumed. @@ -1071,7 +1073,7 @@ :meth:`get` method. .. versionchanged:: 3.2 - Items present in *vars* no longer appear in the result. The previous + Items present in *vars* no longer appear in the result. The previous behaviour mixed actual parser options with variables provided for interpolation. @@ -1172,7 +1174,7 @@ .. note:: Consider using :class:`ConfigParser` instead which checks types of - the values to be stored internally. If you don't want interpolation, you + the values to be stored internally. If you don't want interpolation, you can use ``ConfigParser(interpolation=None)``. @@ -1183,7 +1185,7 @@ *default section* name is passed, :exc:`ValueError` is raised. Type of *section* is not checked which lets users create non-string named - sections. This behaviour is unsupported and may cause internal errors. + sections. This behaviour is unsupported and may cause internal errors. .. method:: set(section, option, value) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 11:10:55 2014 From: python-checkins at python.org (lukasz.langa) Date: Mon, 15 Sep 2014 09:10:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2318159=3A_ConfigParser_getters_not_available_on?= =?utf-8?q?_SectionProxy?= Message-ID: <20140915091054.117589.1776@mail.hg.python.org> http://hg.python.org/cpython/rev/5eb95d41ee43 changeset: 92434:5eb95d41ee43 parent: 92430:49e4e3b74334 parent: 92433:2c46a4ded259 user: ?ukasz Langa date: Mon Sep 15 02:10:01 2014 -0700 summary: Closes #18159: ConfigParser getters not available on SectionProxy files: Doc/library/configparser.rst | 118 +++++++---- Lib/configparser.py | 170 ++++++++++++---- Lib/test/test_configparser.py | 223 ++++++++++++++++++++++ 3 files changed, 423 insertions(+), 88 deletions(-) diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst --- a/Doc/library/configparser.rst +++ b/Doc/library/configparser.rst @@ -144,12 +144,13 @@ >>> float(topsecret['CompressionLevel']) 9.0 -Extracting Boolean values is not that simple, though. Passing the value -to ``bool()`` would do no good since ``bool('False')`` is still -``True``. This is why config parsers also provide :meth:`getboolean`. -This method is case-insensitive and recognizes Boolean values from -``'yes'``/``'no'``, ``'on'``/``'off'`` and ``'1'``/``'0'`` [1]_. -For example: +Since this task is so common, config parsers provide a range of handy getter +methods to handle integers, floats and booleans. The last one is the most +interesting because simply passing the value to ``bool()`` would do no good +since ``bool('False')`` is still ``True``. This is why config parsers also +provide :meth:`getboolean`. This method is case-insensitive and recognizes +Boolean values from ``'yes'``/``'no'``, ``'on'``/``'off'``, +``'true'``/``'false'`` and ``'1'``/``'0'`` [1]_. For example: .. doctest:: @@ -161,10 +162,8 @@ True Apart from :meth:`getboolean`, config parsers also provide equivalent -:meth:`getint` and :meth:`getfloat` methods, but these are far less -useful since conversion using :func:`int` and :func:`float` is -sufficient for these types. - +:meth:`getint` and :meth:`getfloat` methods. You can register your own +converters and customize the provided ones. [1]_ Fallback Values --------------- @@ -319,11 +318,11 @@ .. class:: ExtendedInterpolation() An alternative handler for interpolation which implements a more advanced - syntax, used for instance in ``zc.buildout``. Extended interpolation is + syntax, used for instance in ``zc.buildout``. Extended interpolation is using ``${section:option}`` to denote a value from a foreign section. - Interpolation can span multiple levels. For convenience, if the ``section:`` - part is omitted, interpolation defaults to the current section (and possibly - the default values from the special section). + Interpolation can span multiple levels. For convenience, if the + ``section:`` part is omitted, interpolation defaults to the current section + (and possibly the default values from the special section). For example, the configuration specified above with basic interpolation, would look like this with extended interpolation: @@ -401,13 +400,13 @@ * ``parser.popitem()`` never returns it. * ``parser.get(section, option, **kwargs)`` - the second argument is **not** - a fallback value. Note however that the section-level ``get()`` methods are + a fallback value. Note however that the section-level ``get()`` methods are compatible both with the mapping protocol and the classic configparser API. * ``parser.items()`` is compatible with the mapping protocol (returns a list of *section_name*, *section_proxy* pairs including the DEFAULTSECT). However, this method can also be invoked with arguments: ``parser.items(section, raw, - vars)``. The latter call returns a list of *option*, *value* pairs for + vars)``. The latter call returns a list of *option*, *value* pairs for a specified ``section``, with all interpolations expanded (unless ``raw=True`` is provided). @@ -541,9 +540,9 @@ * *delimiters*, default value: ``('=', ':')`` - Delimiters are substrings that delimit keys from values within a section. The - first occurrence of a delimiting substring on a line is considered a delimiter. - This means values (but not keys) can contain the delimiters. + Delimiters are substrings that delimit keys from values within a section. + The first occurrence of a delimiting substring on a line is considered + a delimiter. This means values (but not keys) can contain the delimiters. See also the *space_around_delimiters* argument to :meth:`ConfigParser.write`. @@ -555,7 +554,7 @@ Comment prefixes are strings that indicate the start of a valid comment within a config file. *comment_prefixes* are used only on otherwise empty lines (optionally indented) whereas *inline_comment_prefixes* can be used after - every valid value (e.g. section names, options and empty lines as well). By + every valid value (e.g. section names, options and empty lines as well). By default inline comments are disabled and ``'#'`` and ``';'`` are used as prefixes for whole line comments. @@ -565,10 +564,10 @@ Please note that config parsers don't support escaping of comment prefixes so using *inline_comment_prefixes* may prevent users from specifying option - values with characters used as comment prefixes. When in doubt, avoid setting - *inline_comment_prefixes*. In any circumstances, the only way of storing - comment prefix characters at the beginning of a line in multiline values is to - interpolate the prefix, for example:: + values with characters used as comment prefixes. When in doubt, avoid + setting *inline_comment_prefixes*. In any circumstances, the only way of + storing comment prefix characters at the beginning of a line in multiline + values is to interpolate the prefix, for example:: >>> from configparser import ConfigParser, ExtendedInterpolation >>> parser = ConfigParser(interpolation=ExtendedInterpolation()) @@ -613,7 +612,7 @@ When set to ``True``, the parser will not allow for any section or option duplicates while reading from a single source (using :meth:`read_file`, - :meth:`read_string` or :meth:`read_dict`). It is recommended to use strict + :meth:`read_string` or :meth:`read_dict`). It is recommended to use strict parsers in new applications. .. versionchanged:: 3.2 @@ -648,12 +647,12 @@ The convention of allowing a special section of default values for other sections or interpolation purposes is a powerful concept of this library, - letting users create complex declarative configurations. This section is + letting users create complex declarative configurations. This section is normally called ``"DEFAULT"`` but this can be customized to point to any - other valid section name. Some typical values include: ``"general"`` or - ``"common"``. The name provided is used for recognizing default sections when - reading from any source and is used when writing configuration back to - a file. Its current value can be retrieved using the + other valid section name. Some typical values include: ``"general"`` or + ``"common"``. The name provided is used for recognizing default sections + when reading from any source and is used when writing configuration back to + a file. Its current value can be retrieved using the ``parser_instance.default_section`` attribute and may be modified at runtime (i.e. to convert files from one format to another). @@ -662,14 +661,30 @@ Interpolation behaviour may be customized by providing a custom handler through the *interpolation* argument. ``None`` can be used to turn off interpolation completely, ``ExtendedInterpolation()`` provides a more - advanced variant inspired by ``zc.buildout``. More on the subject in the + advanced variant inspired by ``zc.buildout``. More on the subject in the `dedicated documentation section <#interpolation-of-values>`_. :class:`RawConfigParser` has a default value of ``None``. +* *converters*, default value: not set + + Config parsers provide option value getters that perform type conversion. By + default :meth:`getint`, :meth:`getfloat`, and :meth:`getboolean` are + implemented. Should other getters be desirable, users may define them in + a subclass or pass a dictionary where each key is a name of the converter and + each value is a callable implementing said conversion. For instance, passing + ``{'decimal': decimal.Decimal}`` would add :meth:`getdecimal` on both the + parser object and all section proxies. In other words, it will be possible + to write both ``parser_instance.getdecimal('section', 'key', fallback=0)`` + and ``parser_instance['section'].getdecimal('key', 0)``. + + If the converter needs to access the state of the parser, it can be + implemented as a method on a config parser subclass. If the name of this + method starts with ``get``, it will be available on all section proxies, in + the dict-compatible form (see the ``getdecimal()`` example above). More advanced customization may be achieved by overriding default values of -these parser attributes. The defaults are defined on the classes, so they -may be overridden by subclasses or by attribute assignment. +these parser attributes. The defaults are defined on the classes, so they may +be overridden by subclasses or by attribute assignment. .. attribute:: BOOLEAN_STATES @@ -727,10 +742,11 @@ .. attribute:: SECTCRE - A compiled regular expression used to parse section headers. The default - matches ``[section]`` to the name ``"section"``. Whitespace is considered part - of the section name, thus ``[ larch ]`` will be read as a section of name - ``" larch "``. Override this attribute if that's unsuitable. For example: + A compiled regular expression used to parse section headers. The default + matches ``[section]`` to the name ``"section"``. Whitespace is considered + part of the section name, thus ``[ larch ]`` will be read as a section of + name ``" larch "``. Override this attribute if that's unsuitable. For + example: .. doctest:: @@ -861,7 +877,7 @@ ConfigParser Objects -------------------- -.. class:: ConfigParser(defaults=None, dict_type=collections.OrderedDict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=configparser.DEFAULTSECT, interpolation=BasicInterpolation()) +.. class:: ConfigParser(defaults=None, dict_type=collections.OrderedDict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=configparser.DEFAULTSECT, interpolation=BasicInterpolation(), converters={}) The main configuration parser. When *defaults* is given, it is initialized into the dictionary of intrinsic defaults. When *dict_type* is given, it @@ -871,8 +887,8 @@ When *delimiters* is given, it is used as the set of substrings that divide keys from values. When *comment_prefixes* is given, it will be used as the set of substrings that prefix comments in otherwise empty lines. - Comments can be indented. When *inline_comment_prefixes* is given, it will be - used as the set of substrings that prefix comments in non-empty lines. + Comments can be indented. When *inline_comment_prefixes* is given, it will + be used as the set of substrings that prefix comments in non-empty lines. When *strict* is ``True`` (the default), the parser won't allow for any section or option duplicates while reading from a single source (file, @@ -886,13 +902,13 @@ When *default_section* is given, it specifies the name for the special section holding default values for other sections and interpolation purposes - (normally named ``"DEFAULT"``). This value can be retrieved and changed on + (normally named ``"DEFAULT"``). This value can be retrieved and changed on runtime using the ``default_section`` instance attribute. Interpolation behaviour may be customized by providing a custom handler through the *interpolation* argument. ``None`` can be used to turn off interpolation completely, ``ExtendedInterpolation()`` provides a more - advanced variant inspired by ``zc.buildout``. More on the subject in the + advanced variant inspired by ``zc.buildout``. More on the subject in the `dedicated documentation section <#interpolation-of-values>`_. All option names used in interpolation will be passed through the @@ -901,6 +917,12 @@ converts option names to lower case), the values ``foo %(bar)s`` and ``foo %(BAR)s`` are equivalent. + When *converters* is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its own corresponding :meth:`get*()` method on the parser + object and section proxies. + .. versionchanged:: 3.1 The default *dict_type* is :class:`collections.OrderedDict`. @@ -909,6 +931,9 @@ *empty_lines_in_values*, *default_section* and *interpolation* were added. + .. versionchanged:: 3.5 + The *converters* argument was added. + .. method:: defaults() @@ -946,7 +971,7 @@ .. method:: has_option(section, option) If the given *section* exists, and contains the given *option*, return - :const:`True`; otherwise return :const:`False`. If the specified + :const:`True`; otherwise return :const:`False`. If the specified *section* is :const:`None` or an empty string, DEFAULT is assumed. @@ -1071,7 +1096,7 @@ :meth:`get` method. .. versionchanged:: 3.2 - Items present in *vars* no longer appear in the result. The previous + Items present in *vars* no longer appear in the result. The previous behaviour mixed actual parser options with variables provided for interpolation. @@ -1172,7 +1197,7 @@ .. note:: Consider using :class:`ConfigParser` instead which checks types of - the values to be stored internally. If you don't want interpolation, you + the values to be stored internally. If you don't want interpolation, you can use ``ConfigParser(interpolation=None)``. @@ -1183,7 +1208,7 @@ *default section* name is passed, :exc:`ValueError` is raised. Type of *section* is not checked which lets users create non-string named - sections. This behaviour is unsupported and may cause internal errors. + sections. This behaviour is unsupported and may cause internal errors. .. method:: set(section, option, value) @@ -1284,3 +1309,4 @@ .. [1] Config parsers allow for heavy customization. If you are interested in changing the behaviour outlined by the footnote reference, consult the `Customizing Parser Behaviour`_ section. + diff --git a/Lib/configparser.py b/Lib/configparser.py --- a/Lib/configparser.py +++ b/Lib/configparser.py @@ -17,7 +17,8 @@ __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, - empty_lines_in_values=True): + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. @@ -47,6 +48,25 @@ When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. + When `default_section' is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser object s don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildbot`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + sections() Return all the configuration section names, sans DEFAULT. @@ -129,9 +149,11 @@ __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" @@ -580,11 +602,12 @@ comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, - interpolation=_UNSET): + interpolation=_UNSET, converters=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() + self._converters = ConverterMapping(self) self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: @@ -612,6 +635,8 @@ self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() + if converters is not _UNSET: + self._converters.update(converters) def defaults(self): return self._defaults @@ -775,36 +800,31 @@ def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) - def getint(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): + def _get_conv(self, section, option, conv, *, raw=False, vars=None, + fallback=_UNSET, **kwargs): try: - return self._get(section, int, option, raw=raw, vars=vars) + return self._get(section, conv, option, raw=raw, vars=vars, + **kwargs) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise - else: - return fallback + return fallback + + # getint, getfloat and getboolean provided directly for backwards compat + def getint(self, section, option, *, raw=False, vars=None, + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, int, raw=raw, vars=vars, + fallback=fallback, **kwargs) def getfloat(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): - try: - return self._get(section, float, option, raw=raw, vars=vars) - except (NoSectionError, NoOptionError): - if fallback is _UNSET: - raise - else: - return fallback + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, float, raw=raw, vars=vars, + fallback=fallback, **kwargs) def getboolean(self, section, option, *, raw=False, vars=None, - fallback=_UNSET): - try: - return self._get(section, self._convert_to_boolean, option, - raw=raw, vars=vars) - except (NoSectionError, NoOptionError): - if fallback is _UNSET: - raise - else: - return fallback + fallback=_UNSET, **kwargs): + return self._get_conv(section, option, self._convert_to_boolean, + raw=raw, vars=vars, fallback=fallback, **kwargs) def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. @@ -1154,6 +1174,10 @@ if not isinstance(value, str): raise TypeError("option values must be strings") + @property + def converters(self): + return self._converters + class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" @@ -1194,6 +1218,10 @@ """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name + for conv in parser.converters: + key = 'get' + conv + getter = functools.partial(self.get, _impl=getattr(parser, key)) + setattr(self, key, getter) def __repr__(self): return ''.format(self._name) @@ -1227,22 +1255,6 @@ else: return self._parser.defaults() - def get(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.get(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getint(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getint(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getfloat(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getfloat(self._name, option, raw=raw, vars=vars, - fallback=fallback) - - def getboolean(self, option, fallback=None, *, raw=False, vars=None): - return self._parser.getboolean(self._name, option, raw=raw, vars=vars, - fallback=fallback) - @property def parser(self): # The parser object of the proxy is read-only. @@ -1252,3 +1264,77 @@ def name(self): # The name of the section on a proxy is read-only. return self._name + + def get(self, option, fallback=None, *, raw=False, vars=None, + _impl=None, **kwargs): + """Get an option value. + + Unless `fallback` is provided, `None` will be returned if the option + is not found. + + """ + # If `_impl` is provided, it should be a getter method on the parser + # object that provides the desired type conversion. + if not _impl: + _impl = self._parser.get + return _impl(self._name, option, raw=raw, vars=vars, + fallback=fallback, **kwargs) + + +class ConverterMapping(MutableMapping): + """Enables reuse of get*() methods between the parser and section proxies. + + If a parser class implements a getter directly, the value for the given + key will be ``None``. The presence of the converter name here enables + section proxies to find and use the implementation on the parser class. + """ + + GETTERCRE = re.compile(r"^get(?P.+)$") + + def __init__(self, parser): + self._parser = parser + self._data = {} + for getter in dir(self._parser): + m = self.GETTERCRE.match(getter) + if not m or not callable(getattr(self._parser, getter)): + continue + self._data[m.group('name')] = None # See class docstring. + + def __getitem__(self, key): + return self._data[key] + + def __setitem__(self, key, value): + try: + k = 'get' + key + except TypeError: + raise ValueError('Incompatible key: {} (type: {})' + ''.format(key, type(key))) + if k == 'get': + raise ValueError('Incompatible key: cannot use "" as a name') + self._data[key] = value + func = functools.partial(self._parser._get_conv, conv=value) + func.converter = value + setattr(self._parser, k, func) + for proxy in self._parser.values(): + getter = functools.partial(proxy.get, _impl=func) + setattr(proxy, k, getter) + + def __delitem__(self, key): + try: + k = 'get' + (key or None) + except TypeError: + raise KeyError(key) + del self._data[key] + for inst in itertools.chain((self._parser,), self._parser.values()): + try: + delattr(inst, k) + except AttributeError: + # don't raise since the entry was present in _data, silently + # clean up + continue + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) diff --git a/Lib/test/test_configparser.py b/Lib/test/test_configparser.py --- a/Lib/test/test_configparser.py +++ b/Lib/test/test_configparser.py @@ -1584,6 +1584,34 @@ """) self.assertEqual(repr(parser['section']), '') + def test_inconsistent_converters_state(self): + parser = configparser.ConfigParser() + import decimal + parser.converters['decimal'] = decimal.Decimal + parser.read_string(""" + [s1] + one = 1 + [s2] + two = 2 + """) + self.assertIn('decimal', parser.converters) + self.assertEqual(parser.getdecimal('s1', 'one'), 1) + self.assertEqual(parser.getdecimal('s2', 'two'), 2) + self.assertEqual(parser['s1'].getdecimal('one'), 1) + self.assertEqual(parser['s2'].getdecimal('two'), 2) + del parser.getdecimal + with self.assertRaises(AttributeError): + parser.getdecimal('s1', 'one') + self.assertIn('decimal', parser.converters) + del parser.converters['decimal'] + self.assertNotIn('decimal', parser.converters) + with self.assertRaises(AttributeError): + parser.getdecimal('s1', 'one') + with self.assertRaises(AttributeError): + parser['s1'].getdecimal('one') + with self.assertRaises(AttributeError): + parser['s2'].getdecimal('two') + class ExceptionPicklingTestCase(unittest.TestCase): """Tests for issue #13760: ConfigParser exceptions are not picklable.""" @@ -1763,6 +1791,7 @@ self.assertEqual(s['k2'], 'v2') self.assertEqual(s['k3'], 'v3;#//still v3# and still v3') + class ExceptionContextTestCase(unittest.TestCase): """ Test that implementation details doesn't leak through raising exceptions. """ @@ -1816,5 +1845,199 @@ config.remove_option('Section1', 'an_int') self.assertIs(cm.exception.__suppress_context__, True) + +class ConvertersTestCase(BasicTestCase, unittest.TestCase): + """Introduced in 3.5, issue #18159.""" + + config_class = configparser.ConfigParser + + def newconfig(self, defaults=None): + instance = super().newconfig(defaults=defaults) + instance.converters['list'] = lambda v: [e.strip() for e in v.split() + if e.strip()] + return instance + + def test_converters(self): + cfg = self.newconfig() + self.assertIn('boolean', cfg.converters) + self.assertIn('list', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertIsNotNone(cfg.converters['list']) + self.assertEqual(len(cfg.converters), 4) + with self.assertRaises(ValueError): + cfg.converters[''] = lambda v: v + with self.assertRaises(ValueError): + cfg.converters[None] = lambda v: v + cfg.read_string(""" + [s] + str = string + int = 1 + float = 0.5 + list = a b c d e f g + bool = yes + """) + s = cfg['s'] + self.assertEqual(s['str'], 'string') + self.assertEqual(s['int'], '1') + self.assertEqual(s['float'], '0.5') + self.assertEqual(s['list'], 'a b c d e f g') + self.assertEqual(s['bool'], 'yes') + self.assertEqual(cfg.get('s', 'str'), 'string') + self.assertEqual(cfg.get('s', 'int'), '1') + self.assertEqual(cfg.get('s', 'float'), '0.5') + self.assertEqual(cfg.get('s', 'list'), 'a b c d e f g') + self.assertEqual(cfg.get('s', 'bool'), 'yes') + self.assertEqual(cfg.get('s', 'str'), 'string') + self.assertEqual(cfg.getint('s', 'int'), 1) + self.assertEqual(cfg.getfloat('s', 'float'), 0.5) + self.assertEqual(cfg.getlist('s', 'list'), ['a', 'b', 'c', 'd', + 'e', 'f', 'g']) + self.assertEqual(cfg.getboolean('s', 'bool'), True) + self.assertEqual(s.get('str'), 'string') + self.assertEqual(s.getint('int'), 1) + self.assertEqual(s.getfloat('float'), 0.5) + self.assertEqual(s.getlist('list'), ['a', 'b', 'c', 'd', + 'e', 'f', 'g']) + self.assertEqual(s.getboolean('bool'), True) + with self.assertRaises(AttributeError): + cfg.getdecimal('s', 'float') + with self.assertRaises(AttributeError): + s.getdecimal('float') + import decimal + cfg.converters['decimal'] = decimal.Decimal + self.assertIn('decimal', cfg.converters) + self.assertIsNotNone(cfg.converters['decimal']) + self.assertEqual(len(cfg.converters), 5) + dec0_5 = decimal.Decimal('0.5') + self.assertEqual(cfg.getdecimal('s', 'float'), dec0_5) + self.assertEqual(s.getdecimal('float'), dec0_5) + del cfg.converters['decimal'] + self.assertNotIn('decimal', cfg.converters) + self.assertEqual(len(cfg.converters), 4) + with self.assertRaises(AttributeError): + cfg.getdecimal('s', 'float') + with self.assertRaises(AttributeError): + s.getdecimal('float') + with self.assertRaises(KeyError): + del cfg.converters['decimal'] + with self.assertRaises(KeyError): + del cfg.converters[''] + with self.assertRaises(KeyError): + del cfg.converters[None] + + +class BlatantOverrideConvertersTestCase(unittest.TestCase): + """What if somebody overrode a getboolean()? We want to make sure that in + this case the automatic converters do not kick in.""" + + config = """ + [one] + one = false + two = false + three = long story short + + [two] + one = false + two = false + three = four + """ + + def test_converters_at_init(self): + cfg = configparser.ConfigParser(converters={'len': len}) + cfg.read_string(self.config) + self._test_len(cfg) + self.assertIsNotNone(cfg.converters['len']) + + def test_inheritance(self): + class StrangeConfigParser(configparser.ConfigParser): + gettysburg = 'a historic borough in south central Pennsylvania' + + def getboolean(self, section, option, *, raw=False, vars=None, + fallback=configparser._UNSET): + if section == option: + return True + return super().getboolean(section, option, raw=raw, vars=vars, + fallback=fallback) + def getlen(self, section, option, *, raw=False, vars=None, + fallback=configparser._UNSET): + return self._get_conv(section, option, len, raw=raw, vars=vars, + fallback=fallback) + + cfg = StrangeConfigParser() + cfg.read_string(self.config) + self._test_len(cfg) + self.assertIsNone(cfg.converters['len']) + self.assertTrue(cfg.getboolean('one', 'one')) + self.assertTrue(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + cfg.converters['boolean'] = cfg._convert_to_boolean + self.assertFalse(cfg.getboolean('one', 'one')) + self.assertFalse(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + + def _test_len(self, cfg): + self.assertEqual(len(cfg.converters), 4) + self.assertIn('boolean', cfg.converters) + self.assertIn('len', cfg.converters) + self.assertNotIn('tysburg', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertEqual(cfg.getlen('one', 'one'), 5) + self.assertEqual(cfg.getlen('one', 'two'), 5) + self.assertEqual(cfg.getlen('one', 'three'), 16) + self.assertEqual(cfg.getlen('two', 'one'), 5) + self.assertEqual(cfg.getlen('two', 'two'), 5) + self.assertEqual(cfg.getlen('two', 'three'), 4) + self.assertEqual(cfg.getlen('two', 'four', fallback=0), 0) + with self.assertRaises(configparser.NoOptionError): + cfg.getlen('two', 'four') + self.assertEqual(cfg['one'].getlen('one'), 5) + self.assertEqual(cfg['one'].getlen('two'), 5) + self.assertEqual(cfg['one'].getlen('three'), 16) + self.assertEqual(cfg['two'].getlen('one'), 5) + self.assertEqual(cfg['two'].getlen('two'), 5) + self.assertEqual(cfg['two'].getlen('three'), 4) + self.assertEqual(cfg['two'].getlen('four', 0), 0) + self.assertEqual(cfg['two'].getlen('four'), None) + + def test_instance_assignment(self): + cfg = configparser.ConfigParser() + cfg.getboolean = lambda section, option: True + cfg.getlen = lambda section, option: len(cfg[section][option]) + cfg.read_string(self.config) + self.assertEqual(len(cfg.converters), 3) + self.assertIn('boolean', cfg.converters) + self.assertNotIn('len', cfg.converters) + self.assertIsNone(cfg.converters['int']) + self.assertIsNone(cfg.converters['float']) + self.assertIsNone(cfg.converters['boolean']) + self.assertTrue(cfg.getboolean('one', 'one')) + self.assertTrue(cfg.getboolean('two', 'two')) + self.assertTrue(cfg.getboolean('one', 'two')) + self.assertTrue(cfg.getboolean('two', 'one')) + cfg.converters['boolean'] = cfg._convert_to_boolean + self.assertFalse(cfg.getboolean('one', 'one')) + self.assertFalse(cfg.getboolean('two', 'two')) + self.assertFalse(cfg.getboolean('one', 'two')) + self.assertFalse(cfg.getboolean('two', 'one')) + self.assertEqual(cfg.getlen('one', 'one'), 5) + self.assertEqual(cfg.getlen('one', 'two'), 5) + self.assertEqual(cfg.getlen('one', 'three'), 16) + self.assertEqual(cfg.getlen('two', 'one'), 5) + self.assertEqual(cfg.getlen('two', 'two'), 5) + self.assertEqual(cfg.getlen('two', 'three'), 4) + # If a getter impl is assigned straight to the instance, it won't + # be available on the section proxies. + with self.assertRaises(AttributeError): + self.assertEqual(cfg['one'].getlen('one'), 5) + with self.assertRaises(AttributeError): + self.assertEqual(cfg['two'].getlen('one'), 5) + + if __name__ == '__main__': unittest.main() -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 13:31:49 2014 From: python-checkins at python.org (victor.stinner) Date: Mon, 15 Sep 2014 11:31:49 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_418=3A_Fix_clock=5Fresolu?= =?utf-8?q?tion=2Epy_script?= Message-ID: <20140915113146.116491.19035@mail.hg.python.org> http://hg.python.org/peps/rev/c834ee0da60d changeset: 5552:c834ee0da60d user: Victor Stinner date: Mon Sep 15 13:31:42 2014 +0200 summary: PEP 418: Fix clock_resolution.py script files: pep-0418/clock_resolution.py | 26 +++++++++++------------ 1 files changed, 12 insertions(+), 14 deletions(-) diff --git a/pep-0418/clock_resolution.py b/pep-0418/clock_resolution.py --- a/pep-0418/clock_resolution.py +++ b/pep-0418/clock_resolution.py @@ -5,8 +5,8 @@ except ImportError: from time import time as timeout_time -def compute_precision(func): - precision = None +def compute_resolution(func): + resolution = None points = 0 timeout = timeout_time() + 1.0 previous = func() @@ -21,13 +21,13 @@ dt = t2 - previous if dt <= 0.0: continue - if precision is not None: - precision = min(precision, dt) + if resolution is not None: + resolution = min(resolution, dt) else: - precision = dt + resolution = dt points += 1 previous = func() - return precision + return resolution def format_duration(dt): if dt >= 1e-3: @@ -39,8 +39,8 @@ def test_clock(name, func): print("%s:" % name) - precision = compute_precision(func) - print("- precision in Python: %s" % format_duration(precision)) + resolution = compute_resolution(func) + print("- resolution in Python: %s" % format_duration(resolution)) clocks = ['clock', 'perf_counter', 'process_time'] @@ -51,10 +51,8 @@ func = getattr(time, name) test_clock("%s()" % name, func) info = time.get_clock_info(name) - if 'precision' in info: - print("- announced precision: %s" % format_duration(info['precision'])) - print("- implementation: %s" % info['implementation']) - print("- resolution: %s" % format_duration(info['resolution'])) + print("- implementation: %s" % info.implementation) + print("- resolution: %s" % format_duration(info.resolution)) clock_ids = [name for name in dir(time) if name.startswith("CLOCK_")] clock_ids.sort() @@ -69,6 +67,6 @@ print("%s failed: %s" % (name, err)) continue test_clock(name, gettime) - precision = time.clock_getres(clock_id) - print("- announced precision: %s" % format_duration(precision)) + resolution = time.clock_getres(clock_id) + print("- announced resolution: %s" % format_duration(resolution)) -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Mon Sep 15 13:56:04 2014 From: python-checkins at python.org (nick.coghlan) Date: Mon, 15 Sep 2014 11:56:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_fix_for_issue_=2322166_from_3=2E4?= Message-ID: <20140915115534.117587.63734@mail.hg.python.org> http://hg.python.org/cpython/rev/322ee2f2e922 changeset: 92436:322ee2f2e922 parent: 92434:5eb95d41ee43 parent: 92435:fcf45ec7863e user: Nick Coghlan date: Mon Sep 15 23:55:16 2014 +1200 summary: Merge fix for issue #22166 from 3.4 files: Include/codecs.h | 4 ++ Lib/test/test_codecs.py | 14 +++++++ Misc/NEWS | 4 ++ Modules/_codecsmodule.c | 54 +++++++++++++++++++++++++++++ Python/codecs.c | 26 +++++++++++++ 5 files changed, 102 insertions(+), 0 deletions(-) diff --git a/Include/codecs.h b/Include/codecs.h --- a/Include/codecs.h +++ b/Include/codecs.h @@ -49,6 +49,10 @@ PyAPI_FUNC(PyObject *) _PyCodec_Lookup( const char *encoding ); + +PyAPI_FUNC(int) _PyCodec_Forget( + const char *encoding + ); #endif /* Codec registry encoding check API. diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -2586,6 +2586,14 @@ return _TEST_CODECS.get(codec_name) codecs.register(_get_test_codec) # Returns None, not usable as a decorator +try: + # Issue #22166: Also need to clear the internal cache in CPython + from _codecs import _forget_codec +except ImportError: + def _forget_codec(codec_name): + pass + + class ExceptionChainingTest(unittest.TestCase): def setUp(self): @@ -2611,6 +2619,12 @@ def tearDown(self): _TEST_CODECS.pop(self.codec_name, None) + # Issue #22166: Also pop from caches to avoid appearance of ref leaks + encodings._cache.pop(self.codec_name, None) + try: + _forget_codec(self.codec_name) + except KeyError: + pass def set_codec(self, encode, decode): codec_info = codecs.CodecInfo(encode, decode, diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -525,6 +525,10 @@ - Issue #21525: Most Tkinter methods which accepted tuples now accept lists too. +- Issue #22166: with the assistance of a new internal _codecs._forget_codec + helping function, test_codecs now clears the encoding caches to avoid the + appearance of a reference leak + - Issue #22236: Tkinter tests now don't reuse default root window. New root window is created for every test class. diff --git a/Modules/_codecsmodule.c b/Modules/_codecsmodule.c --- a/Modules/_codecsmodule.c +++ b/Modules/_codecsmodule.c @@ -42,6 +42,12 @@ #include #endif +/*[clinic input] +module _codecs +[clinic start generated code]*/ +/*[clinic end generated code: output=da39a3ee5e6b4b0d input=e1390e3da3cb9deb]*/ + + /* --- Registry ----------------------------------------------------------- */ PyDoc_STRVAR(register__doc__, @@ -138,6 +144,53 @@ /* --- Helpers ------------------------------------------------------------ */ +/*[clinic input] +_codecs._forget_codec + + encoding: str + / + +Purge the named codec from the internal codec lookup cache +[clinic start generated code]*/ + +PyDoc_STRVAR(_codecs__forget_codec__doc__, +"_forget_codec($module, encoding, /)\n" +"--\n" +"\n" +"Purge the named codec from the internal codec lookup cache"); + +#define _CODECS__FORGET_CODEC_METHODDEF \ + {"_forget_codec", (PyCFunction)_codecs__forget_codec, METH_VARARGS, _codecs__forget_codec__doc__}, + +static PyObject * +_codecs__forget_codec_impl(PyModuleDef *module, const char *encoding); + +static PyObject * +_codecs__forget_codec(PyModuleDef *module, PyObject *args) +{ + PyObject *return_value = NULL; + const char *encoding; + + if (!PyArg_ParseTuple(args, + "s:_forget_codec", + &encoding)) + goto exit; + return_value = _codecs__forget_codec_impl(module, encoding); + +exit: + return return_value; +} + +static PyObject * +_codecs__forget_codec_impl(PyModuleDef *module, const char *encoding) +/*[clinic end generated code: output=a75e631591702a5c input=18d5d92d0e386c38]*/ +{ + if (_PyCodec_Forget(encoding) < 0) { + return NULL; + }; + Py_RETURN_NONE; +} + static PyObject *codec_tuple(PyObject *unicode, Py_ssize_t len) @@ -1172,6 +1225,7 @@ register_error__doc__}, {"lookup_error", lookup_error, METH_VARARGS, lookup_error__doc__}, + _CODECS__FORGET_CODEC_METHODDEF {NULL, NULL} /* sentinel */ }; diff --git a/Python/codecs.c b/Python/codecs.c --- a/Python/codecs.c +++ b/Python/codecs.c @@ -185,6 +185,32 @@ return NULL; } +int _PyCodec_Forget(const char *encoding) +{ + PyInterpreterState *interp; + PyObject *v; + int result; + + interp = PyThreadState_GET()->interp; + if (interp->codec_search_path == NULL) { + return -1; + } + + /* Convert the encoding to a normalized Python string: all + characters are converted to lower case, spaces and hyphens are + replaced with underscores. */ + v = normalizestring(encoding); + if (v == NULL) { + return -1; + } + + /* Drop the named codec from the internal cache */ + result = PyDict_DelItem(interp->codec_search_cache, v); + Py_DECREF(v); + + return result; +} + /* Codec registry encoding check API. */ int PyCodec_KnownEncoding(const char *encoding) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 13:56:04 2014 From: python-checkins at python.org (nick.coghlan) Date: Mon, 15 Sep 2014 11:56:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMTY2?= =?utf-8?q?=3A_clear_codec_caches_in_test=5Fcodecs?= Message-ID: <20140915115534.117587.24447@mail.hg.python.org> http://hg.python.org/cpython/rev/fcf45ec7863e changeset: 92435:fcf45ec7863e branch: 3.4 parent: 92429:0668b3daa84e user: Nick Coghlan date: Mon Sep 15 23:50:44 2014 +1200 summary: Issue #22166: clear codec caches in test_codecs files: Include/codecs.h | 4 ++ Lib/test/test_codecs.py | 14 +++++++ Misc/NEWS | 4 ++ Modules/_codecsmodule.c | 54 +++++++++++++++++++++++++++++ Python/codecs.c | 26 +++++++++++++ 5 files changed, 102 insertions(+), 0 deletions(-) diff --git a/Include/codecs.h b/Include/codecs.h --- a/Include/codecs.h +++ b/Include/codecs.h @@ -49,6 +49,10 @@ PyAPI_FUNC(PyObject *) _PyCodec_Lookup( const char *encoding ); + +PyAPI_FUNC(int) _PyCodec_Forget( + const char *encoding + ); #endif /* Codec registry encoding check API. diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -2578,6 +2578,14 @@ return _TEST_CODECS.get(codec_name) codecs.register(_get_test_codec) # Returns None, not usable as a decorator +try: + # Issue #22166: Also need to clear the internal cache in CPython + from _codecs import _forget_codec +except ImportError: + def _forget_codec(codec_name): + pass + + class ExceptionChainingTest(unittest.TestCase): def setUp(self): @@ -2603,6 +2611,12 @@ def tearDown(self): _TEST_CODECS.pop(self.codec_name, None) + # Issue #22166: Also pop from caches to avoid appearance of ref leaks + encodings._cache.pop(self.codec_name, None) + try: + _forget_codec(self.codec_name) + except KeyError: + pass def set_codec(self, encode, decode): codec_info = codecs.CodecInfo(encode, decode, diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -344,6 +344,10 @@ Tests ----- +- Issue #22166: with the assistance of a new internal _codecs._forget_codec + helping function, test_codecs now clears the encoding caches to avoid the + appearance of a reference leak + - Issue #22236: Tkinter tests now don't reuse default root window. New root window is created for every test class. diff --git a/Modules/_codecsmodule.c b/Modules/_codecsmodule.c --- a/Modules/_codecsmodule.c +++ b/Modules/_codecsmodule.c @@ -42,6 +42,12 @@ #include #endif +/*[clinic input] +module _codecs +[clinic start generated code]*/ +/*[clinic end generated code: output=da39a3ee5e6b4b0d input=e1390e3da3cb9deb]*/ + + /* --- Registry ----------------------------------------------------------- */ PyDoc_STRVAR(register__doc__, @@ -134,6 +140,53 @@ /* --- Helpers ------------------------------------------------------------ */ +/*[clinic input] +_codecs._forget_codec + + encoding: str + / + +Purge the named codec from the internal codec lookup cache +[clinic start generated code]*/ + +PyDoc_STRVAR(_codecs__forget_codec__doc__, +"_forget_codec($module, encoding, /)\n" +"--\n" +"\n" +"Purge the named codec from the internal codec lookup cache"); + +#define _CODECS__FORGET_CODEC_METHODDEF \ + {"_forget_codec", (PyCFunction)_codecs__forget_codec, METH_VARARGS, _codecs__forget_codec__doc__}, + +static PyObject * +_codecs__forget_codec_impl(PyModuleDef *module, const char *encoding); + +static PyObject * +_codecs__forget_codec(PyModuleDef *module, PyObject *args) +{ + PyObject *return_value = NULL; + const char *encoding; + + if (!PyArg_ParseTuple(args, + "s:_forget_codec", + &encoding)) + goto exit; + return_value = _codecs__forget_codec_impl(module, encoding); + +exit: + return return_value; +} + +static PyObject * +_codecs__forget_codec_impl(PyModuleDef *module, const char *encoding) +/*[clinic end generated code: output=a75e631591702a5c input=18d5d92d0e386c38]*/ +{ + if (_PyCodec_Forget(encoding) < 0) { + return NULL; + }; + Py_RETURN_NONE; +} + static PyObject *codec_tuple(PyObject *unicode, Py_ssize_t len) @@ -1168,6 +1221,7 @@ register_error__doc__}, {"lookup_error", lookup_error, METH_VARARGS, lookup_error__doc__}, + _CODECS__FORGET_CODEC_METHODDEF {NULL, NULL} /* sentinel */ }; diff --git a/Python/codecs.c b/Python/codecs.c --- a/Python/codecs.c +++ b/Python/codecs.c @@ -185,6 +185,32 @@ return NULL; } +int _PyCodec_Forget(const char *encoding) +{ + PyInterpreterState *interp; + PyObject *v; + int result; + + interp = PyThreadState_GET()->interp; + if (interp->codec_search_path == NULL) { + return -1; + } + + /* Convert the encoding to a normalized Python string: all + characters are converted to lower case, spaces and hyphens are + replaced with underscores. */ + v = normalizestring(encoding); + if (v == NULL) { + return -1; + } + + /* Drop the named codec from the internal cache */ + result = PyDict_DelItem(interp->codec_search_cache, v); + Py_DECREF(v); + + return result; +} + /* Codec registry encoding check API. */ int PyCodec_KnownEncoding(const char *encoding) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Sep 15 23:29:55 2014 From: python-checkins at python.org (alex.gaynor) Date: Mon, 15 Sep 2014 21:29:55 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_476=3A_Link_to_the_two_re?= =?utf-8?q?levant_tickets?= Message-ID: <20140915212943.2930.90877@mail.hg.python.org> http://hg.python.org/peps/rev/ab5cdcf9c1fd changeset: 5553:ab5cdcf9c1fd user: Alex Gaynor date: Mon Sep 15 14:29:40 2014 -0700 summary: PEP 476: Link to the two relevant tickets files: pep-0476.txt | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -135,6 +135,14 @@ backported in :pep:`466`. +Implementation +============== + +* `Issue 22366 `_ adds the ``context`` + argument to ``urlib.request.urlopen``. +* `Issue 22417 `_ implements the substance + of this PEP. + Copyright ========= -- Repository URL: http://hg.python.org/peps From solipsis at pitrou.net Tue Sep 16 10:39:56 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 16 Sep 2014 10:39:56 +0200 Subject: [Python-checkins] Daily reference leaks (322ee2f2e922): sum=130905 Message-ID: results for 322ee2f2e922 on branch "default" -------------------------------------------- test_distutils leaked [37725, 37725, 37725] references, sum=113175 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 0] references, sum=-2 test_site leaked [-2, 0, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog3bkO7u', '-x'] From python-checkins at python.org Tue Sep 16 15:04:03 2014 From: python-checkins at python.org (kushal.das) Date: Tue, 16 Sep 2014 13:04:03 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2321270_=3A_We_now?= =?utf-8?q?_override_tuple_methods_in_mock=2Ecall_objects=2E?= Message-ID: <20140916130349.67123.38684@mail.hg.python.org> http://hg.python.org/cpython/rev/5660c1bdc2b6 changeset: 92437:5660c1bdc2b6 user: Kushal Das date: Tue Sep 16 18:33:37 2014 +0530 summary: Closes #21270 : We now override tuple methods in mock.call objects. files: Lib/unittest/mock.py | 6 ++++++ Lib/unittest/test/testmock/testmock.py | 10 ++++++++++ Misc/NEWS | 3 +++ 3 files changed, 19 insertions(+), 0 deletions(-) diff --git a/Lib/unittest/mock.py b/Lib/unittest/mock.py --- a/Lib/unittest/mock.py +++ b/Lib/unittest/mock.py @@ -2035,6 +2035,12 @@ return _Call(name=name, parent=self, from_kall=False) + def count(self, *args, **kwargs): + return self.__getattr__('count')(*args, **kwargs) + + def index(self, *args, **kwargs): + return self.__getattr__('index')(*args, **kwargs) + def __repr__(self): if not self.from_kall: name = self.name or 'call' diff --git a/Lib/unittest/test/testmock/testmock.py b/Lib/unittest/test/testmock/testmock.py --- a/Lib/unittest/test/testmock/testmock.py +++ b/Lib/unittest/test/testmock/testmock.py @@ -1213,6 +1213,16 @@ text = "call(daddy='hero', name='hello')" self.assertEqual(repr(m.hello.call_args), text) + #Issue21270 overrides tuple methods for mock.call objects + def test_override_tuple_methods(self): + c = call.count() + i = call.index(132,'hello') + m = Mock() + m.count() + m.index(132,"hello") + self.assertEqual(m.method_calls[0], c) + self.assertEqual(m.method_calls[1], i) + def test_mock_add_spec(self): class _One(object): one = 1 diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -157,6 +157,9 @@ - Issue #12410: imaplib.IMAP4 now supports the context management protocol. Original patch by Tarek Ziad?. +- Issue #21270: We now override tuple methods in mock.call objects so that + they can be used as normal call attributes. + - Issue #16662: load_tests() is now unconditionally run when it is present in a package's __init__.py. TestLoader.loadTestsFromModule() still accepts use_load_tests, but it is deprecated and ignored. A new keyword-only -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:27:36 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 16 Sep 2014 22:27:36 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_Lax_cookie_parsing_in_http=2Ecookies_could_be_a_security_issue?= =?utf-8?q?_when_combined?= Message-ID: <20140916222735.35920.50447@mail.hg.python.org> http://hg.python.org/cpython/rev/60cab9d28525 changeset: 92439:60cab9d28525 branch: 3.4 parent: 92435:fcf45ec7863e parent: 92438:270f61ec1157 user: Antoine Pitrou date: Wed Sep 17 00:25:57 2014 +0200 summary: Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. files: Lib/http/cookies.py | 3 ++- Lib/test/test_http_cookies.py | 9 +++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 16 insertions(+), 1 deletions(-) diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -431,6 +431,7 @@ _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern + \s* # Optional whitespace at start of cookie (?P # Start of group 'key' """ + _LegalCharsPatt + r"""+? # Any word of at least one letter ) # End of group 'key' @@ -534,7 +535,7 @@ while 0 <= i < n: # Start looking for a cookie - match = patt.search(str, i) + match = patt.match(str, i) if not match: # No more cookies break diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py --- a/Lib/test/test_http_cookies.py +++ b/Lib/test/test_http_cookies.py @@ -179,6 +179,15 @@ """) + def test_invalid_cookies(self): + # Accepting these could be a security issue + C = cookies.SimpleCookie() + for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'): + C.load(s) + self.assertEqual(dict(C), {}) + self.assertEqual(C.output(), '') + + class MorselTests(unittest.TestCase): """Tests for the Morsel object.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -140,6 +140,7 @@ Pablo Bleyer Erik van Blokland Eric Blossom +Sergey Bobrov Finn Bock Paul Boddie Matthew Boedicker diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,10 @@ Library ------- +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. + - Issue #22384: An exception in Tkinter callback no longer crashes the program when it is run with pythonw.exe. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:27:36 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 16 Sep 2014 22:27:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_Lax_cookie_par?= =?utf-8?q?sing_in_http=2Ecookies_could_be_a_security_issue_when_combined?= Message-ID: <20140916222734.26497.94967@mail.hg.python.org> http://hg.python.org/cpython/rev/270f61ec1157 changeset: 92438:270f61ec1157 branch: 3.3 parent: 91975:a36d469f31c1 user: Antoine Pitrou date: Wed Sep 17 00:23:55 2014 +0200 summary: Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. files: Lib/http/cookies.py | 3 ++- Lib/test/test_http_cookies.py | 9 +++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 16 insertions(+), 1 deletions(-) diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -431,6 +431,7 @@ _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern + \s* # Optional whitespace at start of cookie (?P # Start of group 'key' """ + _LegalCharsPatt + r"""+? # Any word of at least one letter ) # End of group 'key' @@ -534,7 +535,7 @@ while 0 <= i < n: # Start looking for a cookie - match = patt.search(str, i) + match = patt.match(str, i) if not match: # No more cookies break diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py --- a/Lib/test/test_http_cookies.py +++ b/Lib/test/test_http_cookies.py @@ -179,6 +179,15 @@ """) + def test_invalid_cookies(self): + # Accepting these could be a security issue + C = cookies.SimpleCookie() + for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'): + C.load(s) + self.assertEqual(dict(C), {}) + self.assertEqual(C.output(), '') + + class MorselTests(unittest.TestCase): """Tests for the Morsel object.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -131,6 +131,7 @@ Pablo Bleyer Erik van Blokland Eric Blossom +Sergey Bobrov Finn Bock Paul Boddie Matthew Boedicker diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,10 @@ Library ------- +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. + - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:27:36 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 16 Sep 2014 22:27:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Lax_cookie_parsing_in_http=2Ecookies_could_be_a_security?= =?utf-8?q?_issue_when_combined?= Message-ID: <20140916222735.115064.83923@mail.hg.python.org> http://hg.python.org/cpython/rev/d3663a0f97ed changeset: 92440:d3663a0f97ed parent: 92437:5660c1bdc2b6 parent: 92439:60cab9d28525 user: Antoine Pitrou date: Wed Sep 17 00:27:26 2014 +0200 summary: Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. files: Lib/http/cookies.py | 3 ++- Lib/test/test_http_cookies.py | 9 +++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 16 insertions(+), 1 deletions(-) diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -431,6 +431,7 @@ _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern + \s* # Optional whitespace at start of cookie (?P # Start of group 'key' """ + _LegalCharsPatt + r"""+? # Any word of at least one letter ) # End of group 'key' @@ -534,7 +535,7 @@ while 0 <= i < n: # Start looking for a cookie - match = patt.search(str, i) + match = patt.match(str, i) if not match: # No more cookies break diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py --- a/Lib/test/test_http_cookies.py +++ b/Lib/test/test_http_cookies.py @@ -179,6 +179,15 @@ """) + def test_invalid_cookies(self): + # Accepting these could be a security issue + C = cookies.SimpleCookie() + for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'): + C.load(s) + self.assertEqual(dict(C), {}) + self.assertEqual(C.output(), '') + + class MorselTests(unittest.TestCase): """Tests for the Morsel object.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -142,6 +142,7 @@ Pablo Bleyer Erik van Blokland Eric Blossom +Sergey Bobrov Finn Bock Paul Boddie Matthew Boedicker diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,10 @@ Library ------- +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. + - Issue #20537: logging methods now accept an exception instance as well as a Boolean value or exception tuple. Thanks to Yury Selivanov for the patch. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:40:24 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 16 Sep 2014 22:40:24 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Replace_bad_ft?= =?utf-8?q?p_URLs_in_test=5Furllib2net?= Message-ID: <20140916224021.35474.92574@mail.hg.python.org> http://hg.python.org/cpython/rev/20f2c845a62a changeset: 92441:20f2c845a62a branch: 3.4 parent: 92439:60cab9d28525 user: Antoine Pitrou date: Wed Sep 17 00:39:21 2014 +0200 summary: Replace bad ftp URLs in test_urllib2net files: Lib/test/test_urllib2net.py | 68 ++++++++++++------------ 1 files changed, 34 insertions(+), 34 deletions(-) diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py --- a/Lib/test/test_urllib2net.py +++ b/Lib/test/test_urllib2net.py @@ -103,9 +103,9 @@ def test_ftp(self): urls = [ - 'ftp://ftp.kernel.org/pub/linux/kernel/README', - 'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file', - #'ftp://ftp.kernel.org/pub/leenox/kernel/test', + 'ftp://ftp.debian.org/debian/README', + ('ftp://ftp.debian.org/debian/non-existent-file', + None, urllib.error.URLError), 'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC' '/research-reports/00README-Legal-Rules-Regs', ] @@ -215,39 +215,39 @@ urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError) for url in urls: - if isinstance(url, tuple): - url, req, expected_err = url - else: - req = expected_err = None + with self.subTest(url=url): + if isinstance(url, tuple): + url, req, expected_err = url + else: + req = expected_err = None - with support.transient_internet(url): - debug(url) - try: - f = urlopen(url, req, TIMEOUT) - except OSError as err: - debug(err) - if expected_err: - msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % - (expected_err, url, req, type(err), err)) - self.assertIsInstance(err, expected_err, msg) - except urllib.error.URLError as err: - if isinstance(err[0], socket.timeout): - print("" % url, file=sys.stderr) - continue + with support.transient_internet(url): + try: + f = urlopen(url, req, TIMEOUT) + except OSError as err: + if expected_err: + msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % + (expected_err, url, req, type(err), err)) + self.assertIsInstance(err, expected_err, msg) + else: + raise + except urllib.error.URLError as err: + if isinstance(err[0], socket.timeout): + print("" % url, file=sys.stderr) + continue + else: + raise else: - raise - else: - try: - with support.time_out, \ - support.socket_peer_reset, \ - support.ioerror_peer_reset: - buf = f.read() - debug("read %d bytes" % len(buf)) - except socket.timeout: - print("" % url, file=sys.stderr) - f.close() - debug("******** next url coming up...") - time.sleep(0.1) + try: + with support.time_out, \ + support.socket_peer_reset, \ + support.ioerror_peer_reset: + buf = f.read() + debug("read %d bytes" % len(buf)) + except socket.timeout: + print("" % url, file=sys.stderr) + f.close() + time.sleep(0.1) def _extra_handlers(self): handlers = [] -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:40:24 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 16 Sep 2014 22:40:24 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Replace_bad_ftp_URLs_in_test=5Furllib2net?= Message-ID: <20140916224021.101119.74810@mail.hg.python.org> http://hg.python.org/cpython/rev/74f7fbf548d0 changeset: 92442:74f7fbf548d0 parent: 92440:d3663a0f97ed parent: 92441:20f2c845a62a user: Antoine Pitrou date: Wed Sep 17 00:40:13 2014 +0200 summary: Replace bad ftp URLs in test_urllib2net files: Lib/test/test_urllib2net.py | 68 ++++++++++++------------ 1 files changed, 34 insertions(+), 34 deletions(-) diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py --- a/Lib/test/test_urllib2net.py +++ b/Lib/test/test_urllib2net.py @@ -103,9 +103,9 @@ def test_ftp(self): urls = [ - 'ftp://ftp.kernel.org/pub/linux/kernel/README', - 'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file', - #'ftp://ftp.kernel.org/pub/leenox/kernel/test', + 'ftp://ftp.debian.org/debian/README', + ('ftp://ftp.debian.org/debian/non-existent-file', + None, urllib.error.URLError), 'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC' '/research-reports/00README-Legal-Rules-Regs', ] @@ -215,39 +215,39 @@ urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError) for url in urls: - if isinstance(url, tuple): - url, req, expected_err = url - else: - req = expected_err = None + with self.subTest(url=url): + if isinstance(url, tuple): + url, req, expected_err = url + else: + req = expected_err = None - with support.transient_internet(url): - debug(url) - try: - f = urlopen(url, req, TIMEOUT) - except OSError as err: - debug(err) - if expected_err: - msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % - (expected_err, url, req, type(err), err)) - self.assertIsInstance(err, expected_err, msg) - except urllib.error.URLError as err: - if isinstance(err[0], socket.timeout): - print("" % url, file=sys.stderr) - continue + with support.transient_internet(url): + try: + f = urlopen(url, req, TIMEOUT) + except OSError as err: + if expected_err: + msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" % + (expected_err, url, req, type(err), err)) + self.assertIsInstance(err, expected_err, msg) + else: + raise + except urllib.error.URLError as err: + if isinstance(err[0], socket.timeout): + print("" % url, file=sys.stderr) + continue + else: + raise else: - raise - else: - try: - with support.time_out, \ - support.socket_peer_reset, \ - support.ioerror_peer_reset: - buf = f.read() - debug("read %d bytes" % len(buf)) - except socket.timeout: - print("" % url, file=sys.stderr) - f.close() - debug("******** next url coming up...") - time.sleep(0.1) + try: + with support.time_out, \ + support.socket_peer_reset, \ + support.ioerror_peer_reset: + buf = f.read() + debug("read %d bytes" % len(buf)) + except socket.timeout: + print("" % url, file=sys.stderr) + f.close() + time.sleep(0.1) def _extra_handlers(self): handlers = [] -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 00:45:43 2014 From: python-checkins at python.org (guido.van.rossum) Date: Tue, 16 Sep 2014 22:45:43 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Lax_cookie_par?= =?utf-8?q?sing_in_http=2Ecookies_could_be_a_security_issue_when?= Message-ID: <20140916224543.89391.12980@mail.hg.python.org> http://hg.python.org/cpython/rev/9e765e65e5cb changeset: 92443:9e765e65e5cb branch: 2.7 parent: 92431:e6c7a5a94a1d user: Guido van Rossum date: Tue Sep 16 15:45:36 2014 -0700 summary: Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. files: Lib/Cookie.py | 3 ++- Lib/test/test_cookie.py | 9 +++++++++ Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 15 insertions(+), 1 deletions(-) diff --git a/Lib/Cookie.py b/Lib/Cookie.py --- a/Lib/Cookie.py +++ b/Lib/Cookie.py @@ -531,6 +531,7 @@ _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile( r"(?x)" # This is a Verbose pattern + r"\s*" # Optional whitespace at start of cookie r"(?P" # Start of group 'key' ""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy r")" # End of group 'key' @@ -646,7 +647,7 @@ while 0 <= i < n: # Start looking for a cookie - match = patt.search(str, i) + match = patt.match(str, i) if not match: break # No more cookies K,V = match.group("key"), match.group("val") diff --git a/Lib/test/test_cookie.py b/Lib/test/test_cookie.py --- a/Lib/test/test_cookie.py +++ b/Lib/test/test_cookie.py @@ -133,6 +133,15 @@ self.assertEqual(C['Customer']['version'], '1') self.assertEqual(C['Customer']['path'], '/acme') + def test_invalid_cookies(self): + # Accepting these could be a security issue + C = Cookie.SimpleCookie() + for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'): + C.load(s) + self.assertEqual(dict(C), {}) + self.assertEqual(C.output(), '') + + def test_main(): run_unittest(CookieTests) if Cookie.__doc__ is not None: diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -136,6 +136,7 @@ Pablo Bleyer Erik van Blokland Eric Blossom +Sergey Bobrov Finn Bock Paul Boddie Matthew Boedicker diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -21,6 +21,9 @@ Library ------- +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. - Issue #21147: sqlite3 now raises an exception if the request contains a null character instead of truncate it. Based on patch by Victor Stinner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 04:03:09 2014 From: python-checkins at python.org (ethan.furman) Date: Wed, 17 Sep 2014 02:03:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_consistently_u?= =?utf-8?q?se_=5Fname=5F_and_=5Fvalue=5F=3B_patch_from_Kiss_Gyorgy?= Message-ID: <20140917020305.57705.36388@mail.hg.python.org> http://hg.python.org/cpython/rev/4135f3929b35 changeset: 92444:4135f3929b35 branch: 3.4 parent: 92441:20f2c845a62a user: Ethan Furman date: Tue Sep 16 17:31:23 2014 -0700 summary: consistently use _name_ and _value_; patch from Kiss Gyorgy files: Lib/enum.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/enum.py b/Lib/enum.py --- a/Lib/enum.py +++ b/Lib/enum.py @@ -159,7 +159,7 @@ # If another member with the same value was already defined, the # new member becomes an alias to the existing one. for name, canonical_member in enum_class._member_map_.items(): - if canonical_member.value == enum_member._value_: + if canonical_member._value_ == enum_member._value_: enum_member = canonical_member break else: @@ -224,7 +224,7 @@ return cls._create_(value, names, module=module, qualname=qualname, type=type) def __contains__(cls, member): - return isinstance(member, cls) and member.name in cls._member_map_ + return isinstance(member, cls) and member._name_ in cls._member_map_ def __delattr__(cls, attr): # nicer error message when someone tries to delete an attribute @@ -452,9 +452,9 @@ except TypeError: # not there, now do long search -- O(n) behavior for member in cls._member_map_.values(): - if member.value == value: + if member._value_ == value: return member - raise ValueError("%s is not a valid %s" % (value, cls.__name__)) + raise ValueError("%r is not a valid %s" % (value, cls.__name__)) def __repr__(self): return "<%s.%s: %r>" % ( @@ -480,7 +480,7 @@ # mix-in branch else: cls = self._member_type_ - val = self.value + val = self._value_ return cls.__format__(val, format_spec) def __hash__(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 04:03:09 2014 From: python-checkins at python.org (ethan.furman) Date: Wed, 17 Sep 2014 02:03:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_consistently_use_=5Fname=5F_and_=5Fvalue=5F=3B_patch_fro?= =?utf-8?q?m_Kiss_Gyorgy?= Message-ID: <20140917020305.53736.46224@mail.hg.python.org> http://hg.python.org/cpython/rev/cdd412347827 changeset: 92445:cdd412347827 parent: 92442:74f7fbf548d0 parent: 92444:4135f3929b35 user: Ethan Furman date: Tue Sep 16 19:02:30 2014 -0700 summary: consistently use _name_ and _value_; patch from Kiss Gyorgy files: Lib/enum.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/enum.py b/Lib/enum.py --- a/Lib/enum.py +++ b/Lib/enum.py @@ -159,7 +159,7 @@ # If another member with the same value was already defined, the # new member becomes an alias to the existing one. for name, canonical_member in enum_class._member_map_.items(): - if canonical_member.value == enum_member._value_: + if canonical_member._value_ == enum_member._value_: enum_member = canonical_member break else: @@ -224,7 +224,7 @@ return cls._create_(value, names, module=module, qualname=qualname, type=type) def __contains__(cls, member): - return isinstance(member, cls) and member.name in cls._member_map_ + return isinstance(member, cls) and member._name_ in cls._member_map_ def __delattr__(cls, attr): # nicer error message when someone tries to delete an attribute @@ -452,9 +452,9 @@ except TypeError: # not there, now do long search -- O(n) behavior for member in cls._member_map_.values(): - if member.value == value: + if member._value_ == value: return member - raise ValueError("%s is not a valid %s" % (value, cls.__name__)) + raise ValueError("%r is not a valid %s" % (value, cls.__name__)) def __repr__(self): return "<%s.%s: %r>" % ( @@ -480,7 +480,7 @@ # mix-in branch else: cls = self._member_type_ - val = self.value + val = self._value_ return cls.__format__(val, format_spec) def __hash__(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 04:14:35 2014 From: python-checkins at python.org (ethan.furman) Date: Wed, 17 Sep 2014 02:14:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue21738=3A_clarify_usage_of_=5F=5Fnew=5F=5F_in_Enum_s?= =?utf-8?q?ubclasses?= Message-ID: <20140917021430.84318.88177@mail.hg.python.org> http://hg.python.org/cpython/rev/91ec34801232 changeset: 92447:91ec34801232 parent: 92445:cdd412347827 parent: 92446:28c21f09719e user: Ethan Furman date: Tue Sep 16 19:14:00 2014 -0700 summary: Issue21738: clarify usage of __new__ in Enum subclasses files: Doc/library/enum.rst | 15 +++++++++------ 1 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -586,8 +586,7 @@ The :meth:`__new__` method, if defined, is used during creation of the Enum members; it is then replaced by Enum's :meth:`__new__` which is used after - class creation for lookup of existing members. Due to the way Enums are - supposed to behave, there is no way to customize Enum's :meth:`__new__`. + class creation for lookup of existing members. OrderedEnum @@ -743,7 +742,11 @@ >>> dir(Planet.EARTH) ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value'] -A :meth:`__new__` method will only be used for the creation of the -:class:`Enum` members -- after that it is replaced. This means if you wish to -change how :class:`Enum` members are looked up you either have to write a -helper function or a :func:`classmethod`. +The :meth:`__new__` method will only be used for the creation of the +:class:`Enum` members -- after that it is replaced. Any custom :meth:`__new__` +method must create the object and set the :attr:`_value_` attribute +appropriately. + +If you wish to change how :class:`Enum` members are looked up you should either +write a helper function or a :func:`classmethod` for the :class:`Enum` +subclass. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 04:14:35 2014 From: python-checkins at python.org (ethan.furman) Date: Wed, 17 Sep 2014 02:14:35 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUyMTczODog?= =?utf-8?q?clarify_usage_of_=5F=5Fnew=5F=5F_in_Enum_subclasses?= Message-ID: <20140917021429.39277.34467@mail.hg.python.org> http://hg.python.org/cpython/rev/28c21f09719e changeset: 92446:28c21f09719e branch: 3.4 parent: 92444:4135f3929b35 user: Ethan Furman date: Tue Sep 16 19:13:31 2014 -0700 summary: Issue21738: clarify usage of __new__ in Enum subclasses files: Doc/library/enum.rst | 15 +++++++++------ 1 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -586,8 +586,7 @@ The :meth:`__new__` method, if defined, is used during creation of the Enum members; it is then replaced by Enum's :meth:`__new__` which is used after - class creation for lookup of existing members. Due to the way Enums are - supposed to behave, there is no way to customize Enum's :meth:`__new__`. + class creation for lookup of existing members. OrderedEnum @@ -743,7 +742,11 @@ >>> dir(Planet.EARTH) ['__class__', '__doc__', '__module__', 'name', 'surface_gravity', 'value'] -A :meth:`__new__` method will only be used for the creation of the -:class:`Enum` members -- after that it is replaced. This means if you wish to -change how :class:`Enum` members are looked up you either have to write a -helper function or a :func:`classmethod`. +The :meth:`__new__` method will only be used for the creation of the +:class:`Enum` members -- after that it is replaced. Any custom :meth:`__new__` +method must create the object and set the :attr:`_value_` attribute +appropriately. + +If you wish to change how :class:`Enum` members are looked up you should either +write a helper function or a :func:`classmethod` for the :class:`Enum` +subclass. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 05:36:21 2014 From: python-checkins at python.org (ethan.furman) Date: Wed, 17 Sep 2014 03:36:21 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Close_issue21706=3A_add_?= =?utf-8?q?=27start=27_parameter_to_functional_API?= Message-ID: <20140917033619.105139.80037@mail.hg.python.org> http://hg.python.org/cpython/rev/ec016ba862ba changeset: 92448:ec016ba862ba user: Ethan Furman date: Tue Sep 16 20:35:55 2014 -0700 summary: Close issue21706: add 'start' parameter to functional API files: Doc/library/enum.rst | 9 ++- Lib/enum.py | 16 +++--- Lib/test/test_enum.py | 66 +++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 11 deletions(-) diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -400,7 +400,8 @@ whitespace-separated string of names, a sequence of names, a sequence of 2-tuples with key/value pairs, or a mapping (e.g. dictionary) of names to values. The last two options enable assigning arbitrary values to -enumerations; the others auto-assign increasing integers starting with 1. A +enumerations; the others auto-assign increasing integers starting with 1 (use +the `start` parameter to specify a different starting value). A new class derived from :class:`Enum` is returned. In other words, the above assignment to :class:`Animal` is equivalent to:: @@ -438,12 +439,12 @@ The complete signature is:: - Enum(value='NewEnumName', names=<...>, *, module='...', qualname='...', type=) + Enum(value='NewEnumName', names=<...>, *, module='...', qualname='...', type=, start=1) :value: What the new Enum class will record as its name. :names: The Enum members. This can be a whitespace or comma separated string - (values will start at 1):: + (values will start at 1 unless otherwise specified):: 'red green blue' | 'red,green,blue' | 'red, green, blue' @@ -461,6 +462,8 @@ :type: type to mix in to new Enum class. +:start: number to start counting at if only names are passed in + Derived Enumerations -------------------- diff --git a/Lib/enum.py b/Lib/enum.py --- a/Lib/enum.py +++ b/Lib/enum.py @@ -193,7 +193,7 @@ enum_class.__new__ = Enum.__new__ return enum_class - def __call__(cls, value, names=None, *, module=None, qualname=None, type=None): + def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): """Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match @@ -205,7 +205,7 @@ `value` will be the name of the new class. `names` should be either a string of white-space/comma delimited names - (values will start at 1), or an iterator/mapping of name, value pairs. + (values will start at `start`), or an iterator/mapping of name, value pairs. `module` should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if @@ -221,7 +221,7 @@ if names is None: # simple value lookup return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type - return cls._create_(value, names, module=module, qualname=qualname, type=type) + return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start) def __contains__(cls, member): return isinstance(member, cls) and member._name_ in cls._member_map_ @@ -292,16 +292,16 @@ raise AttributeError('Cannot reassign members.') super().__setattr__(name, value) - def _create_(cls, class_name, names=None, *, module=None, qualname=None, type=None): + def _create_(cls, class_name, names=None, *, module=None, qualname=None, type=None, start=1): """Convenience method to create a new Enum class. `names` can be: * A string containing member names, separated either with spaces or - commas. Values are auto-numbered from 1. - * An iterable of member names. Values are auto-numbered from 1. + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. * An iterable of (member name, value) pairs. - * A mapping of member name -> value. + * A mapping of member name -> value pairs. """ metacls = cls.__class__ @@ -312,7 +312,7 @@ if isinstance(names, str): names = names.replace(',', ' ').split() if isinstance(names, (tuple, list)) and isinstance(names[0], str): - names = [(e, i) for (i, e) in enumerate(names, 1)] + names = [(e, i) for (i, e) in enumerate(names, start)] # Here, names is either an iterable of (name, value) or a mapping. for item in names: diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py --- a/Lib/test/test_enum.py +++ b/Lib/test/test_enum.py @@ -634,6 +634,23 @@ self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) + def test_programatic_function_string_with_start(self): + SummerMonth = Enum('SummerMonth', 'june july august', start=10) + lst = list(SummerMonth) + self.assertEqual(len(lst), len(SummerMonth)) + self.assertEqual(len(SummerMonth), 3, SummerMonth) + self.assertEqual( + [SummerMonth.june, SummerMonth.july, SummerMonth.august], + lst, + ) + for i, month in enumerate('june july august'.split(), 10): + e = SummerMonth(i) + self.assertEqual(int(e.value), i) + self.assertNotEqual(e, i) + self.assertEqual(e.name, month) + self.assertIn(e, SummerMonth) + self.assertIs(type(e), SummerMonth) + def test_programatic_function_string_list(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august']) lst = list(SummerMonth) @@ -651,6 +668,23 @@ self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) + def test_programatic_function_string_list_with_start(self): + SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20) + lst = list(SummerMonth) + self.assertEqual(len(lst), len(SummerMonth)) + self.assertEqual(len(SummerMonth), 3, SummerMonth) + self.assertEqual( + [SummerMonth.june, SummerMonth.july, SummerMonth.august], + lst, + ) + for i, month in enumerate('june july august'.split(), 20): + e = SummerMonth(i) + self.assertEqual(int(e.value), i) + self.assertNotEqual(e, i) + self.assertEqual(e.name, month) + self.assertIn(e, SummerMonth) + self.assertIs(type(e), SummerMonth) + def test_programatic_function_iterable(self): SummerMonth = Enum( 'SummerMonth', @@ -707,6 +741,22 @@ self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) + def test_programatic_function_type_with_start(self): + SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30) + lst = list(SummerMonth) + self.assertEqual(len(lst), len(SummerMonth)) + self.assertEqual(len(SummerMonth), 3, SummerMonth) + self.assertEqual( + [SummerMonth.june, SummerMonth.july, SummerMonth.august], + lst, + ) + for i, month in enumerate('june july august'.split(), 30): + e = SummerMonth(i) + self.assertEqual(e, i) + self.assertEqual(e.name, month) + self.assertIn(e, SummerMonth) + self.assertIs(type(e), SummerMonth) + def test_programatic_function_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', 'june july august') lst = list(SummerMonth) @@ -723,6 +773,22 @@ self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) + def test_programatic_function_type_from_subclass_with_start(self): + SummerMonth = IntEnum('SummerMonth', 'june july august', start=40) + lst = list(SummerMonth) + self.assertEqual(len(lst), len(SummerMonth)) + self.assertEqual(len(SummerMonth), 3, SummerMonth) + self.assertEqual( + [SummerMonth.june, SummerMonth.july, SummerMonth.august], + lst, + ) + for i, month in enumerate('june july august'.split(), 40): + e = SummerMonth(i) + self.assertEqual(e, i) + self.assertEqual(e.name, month) + self.assertIn(e, SummerMonth) + self.assertIs(type(e), SummerMonth) + def test_subclassing(self): if isinstance(Name, Exception): raise Name -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 07:19:49 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 05:19:49 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E4?= Message-ID: <20140917051948.118327.13125@mail.hg.python.org> http://hg.python.org/cpython/rev/9f7b97fac919 changeset: 92451:9f7b97fac919 parent: 92448:ec016ba862ba parent: 92450:d36c0f2ab821 user: Senthil Kumaran date: Wed Sep 17 13:19:34 2014 +0800 summary: Merge from 3.4 Issue #22421 - Secure pydoc server run. Bind it to localhost instead of all interfaces. files: Lib/pydoc.py | 4 ++-- Lib/test/test_pydoc.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -2176,8 +2176,8 @@ class DocServer(http.server.HTTPServer): def __init__(self, port, callback): - self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost' - self.address = ('', port) + self.host = 'localhost' + self.address = (self.host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) self.quit = False diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -789,6 +789,8 @@ return text serverthread = pydoc._start_server(my_url_handler, port=0) + self.assertIn('localhost', serverthread.docserver.address) + starttime = time.time() timeout = 1 #seconds -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 07:19:49 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 05:19:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_Merge_from_3=2E3?= Message-ID: <20140917051947.57499.1360@mail.hg.python.org> http://hg.python.org/cpython/rev/d36c0f2ab821 changeset: 92450:d36c0f2ab821 branch: 3.4 parent: 92446:28c21f09719e parent: 92449:c438f6aaafa9 user: Senthil Kumaran date: Wed Sep 17 13:19:01 2014 +0800 summary: Merge from 3.3 Issue #22421 - Secure pydoc server run. Bind it to localhost instead of all interfaces. files: Lib/pydoc.py | 4 ++-- Lib/test/test_pydoc.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -2178,8 +2178,8 @@ class DocServer(http.server.HTTPServer): def __init__(self, port, callback): - self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost' - self.address = ('', port) + self.host = 'localhost' + self.address = (self.host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) self.quit = False diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -803,6 +803,8 @@ return text serverthread = pydoc._start_server(my_url_handler, port=0) + self.assertIn('localhost', serverthread.docserver.address) + starttime = time.time() timeout = 1 #seconds -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 07:20:10 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 05:20:10 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4zKTogSXNzdWUgIzIyNDIx?= =?utf-8?q?_-_Secure_pydoc_server_run=2E_Bind_it_to_localhost_instead_of_a?= =?utf-8?q?ll?= Message-ID: <20140917051947.19219.83096@mail.hg.python.org> http://hg.python.org/cpython/rev/c438f6aaafa9 changeset: 92449:c438f6aaafa9 branch: 3.3 parent: 92438:270f61ec1157 user: Senthil Kumaran date: Wed Sep 17 13:17:58 2014 +0800 summary: Issue #22421 - Secure pydoc server run. Bind it to localhost instead of all interfaces. files: Lib/pydoc.py | 4 ++-- Lib/test/test_pydoc.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -2168,8 +2168,8 @@ class DocServer(http.server.HTTPServer): def __init__(self, port, callback): - self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost' - self.address = ('', port) + self.host = 'localhost' + self.address = (self.host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) self.quit = False diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -557,6 +557,8 @@ return text serverthread = pydoc._start_server(my_url_handler, port=0) + self.assertIn('localhost', serverthread.docserver.address) + starttime = time.time() timeout = 1 #seconds -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Wed Sep 17 09:58:45 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 17 Sep 2014 09:58:45 +0200 Subject: [Python-checkins] Daily reference leaks (74f7fbf548d0): sum=130905 Message-ID: results for 74f7fbf548d0 on branch "default" -------------------------------------------- test_distutils leaked [37725, 37725, 37725] references, sum=113175 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 0, -2] references, sum=-2 test_site leaked [0, 0, -2] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogIeMDUh', '-x'] From python-checkins at python.org Wed Sep 17 10:33:01 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 08:33:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDE5?= =?utf-8?q?=3A_Limit_the_length_of_incoming_HTTP_request_in_wsgiref_server?= =?utf-8?q?_to?= Message-ID: <20140917083300.5909.90378@mail.hg.python.org> http://hg.python.org/cpython/rev/7a4d960fc801 changeset: 92452:7a4d960fc801 branch: 2.7 parent: 92443:9e765e65e5cb user: Senthil Kumaran date: Wed Sep 17 16:27:06 2014 +0800 summary: Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes. files: Lib/test/test_wsgiref.py | 5 +++++ Lib/wsgiref/simple_server.py | 9 ++++++++- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 18 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -113,6 +113,11 @@ out, err = run_amock() self.check_hello(out) + def test_request_length(self): + out, err = run_amock(data="GET " + ("x" * 65537) + " HTTP/1.0\n\n") + self.assertEqual(out.splitlines()[0], + "HTTP/1.0 414 Request-URI Too Long") + def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py --- a/Lib/wsgiref/simple_server.py +++ b/Lib/wsgiref/simple_server.py @@ -113,7 +113,14 @@ def handle(self): """Handle a single HTTP request""" - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.parse_request(): # An error code has been sent, just exit return diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -268,6 +268,7 @@ Phil Connell Juan Jos? Conti Matt Conway +Devin Cook David M. Cooke Jason R. Coombs Garrett Cooper diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -21,6 +21,10 @@ Library ------- +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + - Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 10:33:03 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 08:33:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4zKTogSXNzdWUgIzIyNDE5?= =?utf-8?q?=3A_Limit_the_length_of_incoming_HTTP_request_in_wsgiref_server?= =?utf-8?q?_to?= Message-ID: <20140917083301.114346.20368@mail.hg.python.org> http://hg.python.org/cpython/rev/a4e0aee1a9b5 changeset: 92453:a4e0aee1a9b5 branch: 3.3 parent: 92449:c438f6aaafa9 user: Senthil Kumaran date: Wed Sep 17 16:29:29 2014 +0800 summary: Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes. files: Lib/test/test_wsgiref.py | 5 +++++ Lib/wsgiref/simple_server.py | 9 ++++++++- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 18 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -118,6 +118,11 @@ out, err = run_amock() self.check_hello(out) + def test_request_length(self): + out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n") + self.assertEqual(out.splitlines()[0], + b"HTTP/1.0 414 Request-URI Too Long") + def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py --- a/Lib/wsgiref/simple_server.py +++ b/Lib/wsgiref/simple_server.py @@ -115,7 +115,14 @@ def handle(self): """Handle a single HTTP request""" - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.parse_request(): # An error code has been sent, just exit return diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -251,6 +251,7 @@ Phil Connell Juan Jos? Conti Matt Conway +Devin Cook David M. Cooke Jason R. Coombs Garrett Cooper diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,10 @@ Library ------- +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + - Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 10:33:03 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 08:33:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_Merge_from_3=2E3?= Message-ID: <20140917083301.54497.46780@mail.hg.python.org> http://hg.python.org/cpython/rev/ba86978c8ab5 changeset: 92454:ba86978c8ab5 branch: 3.4 parent: 92450:d36c0f2ab821 parent: 92453:a4e0aee1a9b5 user: Senthil Kumaran date: Wed Sep 17 16:31:47 2014 +0800 summary: Merge from 3.3 Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes. files: Lib/test/test_wsgiref.py | 5 +++++ Lib/wsgiref/simple_server.py | 9 ++++++++- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 18 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -118,6 +118,11 @@ out, err = run_amock() self.check_hello(out) + def test_request_length(self): + out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n") + self.assertEqual(out.splitlines()[0], + b"HTTP/1.0 414 Request-URI Too Long") + def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py --- a/Lib/wsgiref/simple_server.py +++ b/Lib/wsgiref/simple_server.py @@ -115,7 +115,14 @@ def handle(self): """Handle a single HTTP request""" - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.parse_request(): # An error code has been sent, just exit return diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -272,6 +272,7 @@ Phil Connell Juan Jos? Conti Matt Conway +Devin Cook David M. Cooke Jason R. Coombs Garrett Cooper diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,10 @@ Library ------- +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + - Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 10:33:03 2014 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 17 Sep 2014 08:33:03 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E4?= Message-ID: <20140917083302.98507.16029@mail.hg.python.org> http://hg.python.org/cpython/rev/07b928530cdf changeset: 92455:07b928530cdf parent: 92451:9f7b97fac919 parent: 92454:ba86978c8ab5 user: Senthil Kumaran date: Wed Sep 17 16:32:46 2014 +0800 summary: Merge from 3.4 Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes. files: Lib/test/test_wsgiref.py | 5 +++++ Lib/wsgiref/simple_server.py | 9 ++++++++- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 18 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -118,6 +118,11 @@ out, err = run_amock() self.check_hello(out) + def test_request_length(self): + out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n") + self.assertEqual(out.splitlines()[0], + b"HTTP/1.0 414 Request-URI Too Long") + def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py --- a/Lib/wsgiref/simple_server.py +++ b/Lib/wsgiref/simple_server.py @@ -115,7 +115,14 @@ def handle(self): """Handle a single HTTP request""" - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.parse_request(): # An error code has been sent, just exit return diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -274,6 +274,7 @@ Phil Connell Juan Jos? Conti Matt Conway +Devin Cook David M. Cooke Jason R. Coombs Garrett Cooper diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,10 @@ Library ------- +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + - Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 23:26:55 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 17 Sep 2014 21:26:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogYXN5bmNpbywgVHVs?= =?utf-8?q?ip_issue_206=3A_In_debug_mode=2C_keep_the_callback_in_the?= Message-ID: <20140917212653.26922.17159@mail.hg.python.org> http://hg.python.org/cpython/rev/e2869887e6c2 changeset: 92456:e2869887e6c2 branch: 3.4 parent: 92454:ba86978c8ab5 user: Victor Stinner date: Wed Sep 17 23:24:13 2014 +0200 summary: asyncio, Tulip issue 206: In debug mode, keep the callback in the representation of Handle and TimerHandle after cancel(). files: Lib/asyncio/events.py | 32 +++++++++------ Lib/test/test_asyncio/test_events.py | 9 ++-- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -73,7 +73,7 @@ """Object returned by callback registration methods.""" __slots__ = ('_callback', '_args', '_cancelled', '_loop', - '_source_traceback', '__weakref__') + '_source_traceback', '_repr', '__weakref__') def __init__(self, callback, args, loop): assert not isinstance(callback, Handle), 'A Handle is not a callback' @@ -81,12 +81,13 @@ self._callback = callback self._args = args self._cancelled = False + self._repr = None if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) else: self._source_traceback = None - def __repr__(self): + def _repr_info(self): info = [self.__class__.__name__] if self._cancelled: info.append('cancelled') @@ -95,10 +96,21 @@ if self._source_traceback: frame = self._source_traceback[-1] info.append('created at %s:%s' % (frame[0], frame[1])) + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() return '<%s>' % ' '.join(info) def cancel(self): self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning "Executing took 2.5 second" + self._repr = repr(self) self._callback = None self._args = None @@ -131,17 +143,11 @@ del self._source_traceback[-1] self._when = when - def __repr__(self): - info = [] - if self._cancelled: - info.append('cancelled') - info.append('when=%s' % self._when) - if self._callback is not None: - info.append(_format_callback(self._callback, self._args)) - if self._source_traceback: - frame = self._source_traceback[-1] - info.append('created at %s:%s' % (frame[0], frame[1])) - return '<%s %s>' % (self.__class__.__name__, ' '.join(info)) + def _repr_info(self): + info = super()._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, 'when=%s' % self._when) + return info def __hash__(self): return hash(self._when) diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -1891,8 +1891,8 @@ # cancelled handle h.cancel() self.assertEqual(repr(h), - '' - % (create_filename, create_lineno)) + '' + % (filename, lineno, create_filename, create_lineno)) def test_handle_source_traceback(self): loop = asyncio.get_event_loop_policy().new_event_loop() @@ -1987,8 +1987,9 @@ # cancelled handle h.cancel() self.assertEqual(repr(h), - '' - % (create_filename, create_lineno)) + '' + % (filename, lineno, create_filename, create_lineno)) def test_timer_comparison(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Sep 17 23:26:55 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 17 Sep 2014 21:26:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_asyncio=2C_Tulip_issue_206=3A_In_debug?= =?utf-8?q?_mode=2C_keep_the_callback_in_the?= Message-ID: <20140917212653.18454.848@mail.hg.python.org> http://hg.python.org/cpython/rev/04147b0172d7 changeset: 92457:04147b0172d7 parent: 92455:07b928530cdf parent: 92456:e2869887e6c2 user: Victor Stinner date: Wed Sep 17 23:24:39 2014 +0200 summary: (Merge 3.4) asyncio, Tulip issue 206: In debug mode, keep the callback in the representation of Handle and TimerHandle after cancel(). files: Lib/asyncio/events.py | 32 +++++++++------ Lib/test/test_asyncio/test_events.py | 9 ++-- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -73,7 +73,7 @@ """Object returned by callback registration methods.""" __slots__ = ('_callback', '_args', '_cancelled', '_loop', - '_source_traceback', '__weakref__') + '_source_traceback', '_repr', '__weakref__') def __init__(self, callback, args, loop): assert not isinstance(callback, Handle), 'A Handle is not a callback' @@ -81,12 +81,13 @@ self._callback = callback self._args = args self._cancelled = False + self._repr = None if self._loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) else: self._source_traceback = None - def __repr__(self): + def _repr_info(self): info = [self.__class__.__name__] if self._cancelled: info.append('cancelled') @@ -95,10 +96,21 @@ if self._source_traceback: frame = self._source_traceback[-1] info.append('created at %s:%s' % (frame[0], frame[1])) + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() return '<%s>' % ' '.join(info) def cancel(self): self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning "Executing took 2.5 second" + self._repr = repr(self) self._callback = None self._args = None @@ -131,17 +143,11 @@ del self._source_traceback[-1] self._when = when - def __repr__(self): - info = [] - if self._cancelled: - info.append('cancelled') - info.append('when=%s' % self._when) - if self._callback is not None: - info.append(_format_callback(self._callback, self._args)) - if self._source_traceback: - frame = self._source_traceback[-1] - info.append('created at %s:%s' % (frame[0], frame[1])) - return '<%s %s>' % (self.__class__.__name__, ' '.join(info)) + def _repr_info(self): + info = super()._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, 'when=%s' % self._when) + return info def __hash__(self): return hash(self._when) diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -1891,8 +1891,8 @@ # cancelled handle h.cancel() self.assertEqual(repr(h), - '' - % (create_filename, create_lineno)) + '' + % (filename, lineno, create_filename, create_lineno)) def test_handle_source_traceback(self): loop = asyncio.get_event_loop_policy().new_event_loop() @@ -1987,8 +1987,9 @@ # cancelled handle h.cancel() self.assertEqual(repr(h), - '' - % (create_filename, create_lineno)) + '' + % (filename, lineno, create_filename, create_lineno)) def test_timer_comparison(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 02:44:39 2014 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 18 Sep 2014 00:44:39 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzQxODA6?= =?utf-8?q?_The_warnings_registries_are_now_reset_when_the_filters_are?= Message-ID: <20140918004439.60765.49928@mail.hg.python.org> http://hg.python.org/cpython/rev/8adb2c6e0803 changeset: 92458:8adb2c6e0803 branch: 3.4 parent: 92456:e2869887e6c2 user: Antoine Pitrou date: Thu Sep 18 02:40:46 2014 +0200 summary: Issue #4180: The warnings registries are now reset when the filters are modified. files: Lib/test/test_warnings.py | 49 +++++++++++++++++++++++++- Lib/warnings.py | 17 ++++++++- Misc/NEWS | 3 + Python/_warnings.c | 41 +++++++++++++++++++--- 4 files changed, 101 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_warnings.py b/Lib/test/test_warnings.py --- a/Lib/test/test_warnings.py +++ b/Lib/test/test_warnings.py @@ -92,6 +92,16 @@ self.assertRaises(UserWarning, self.module.warn, "FilterTests.test_error") + def test_error_after_default(self): + with original_warnings.catch_warnings(module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_ignore_after_default" + def f(): + self.module.warn(message, UserWarning) + f() + self.module.filterwarnings("error", category=UserWarning) + self.assertRaises(UserWarning, f) + def test_ignore(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -100,6 +110,19 @@ self.module.warn("FilterTests.test_ignore", UserWarning) self.assertEqual(len(w), 0) + def test_ignore_after_default(self): + with original_warnings.catch_warnings(record=True, + module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_ignore_after_default" + def f(): + self.module.warn(message, UserWarning) + f() + self.module.filterwarnings("ignore", category=UserWarning) + f() + f() + self.assertEqual(len(w), 1) + def test_always(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -111,6 +134,26 @@ self.module.warn(message, UserWarning) self.assertTrue(w[-1].message, message) + def test_always_after_default(self): + with original_warnings.catch_warnings(record=True, + module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_always_after_ignore" + def f(): + self.module.warn(message, UserWarning) + f() + self.assertEqual(len(w), 1) + self.assertEqual(w[-1].message.args[0], message) + f() + self.assertEqual(len(w), 1) + self.module.filterwarnings("always", category=UserWarning) + f() + self.assertEqual(len(w), 2) + self.assertEqual(w[-1].message.args[0], message) + f() + self.assertEqual(len(w), 3) + self.assertEqual(w[-1].message.args[0], message) + def test_default(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -506,7 +549,9 @@ registry=registry) self.assertEqual(w[-1].message, message) self.assertEqual(len(w), 1) - self.assertEqual(len(registry), 1) + # One actual registry key plus the "version" key + self.assertEqual(len(registry), 2) + self.assertIn("version", registry) del w[:] # Test removal. del self.module.defaultaction @@ -516,7 +561,7 @@ registry=registry) self.assertEqual(w[-1].message, message) self.assertEqual(len(w), 1) - self.assertEqual(len(registry), 1) + self.assertEqual(len(registry), 2) del w[:] # Test setting. self.module.defaultaction = "ignore" diff --git a/Lib/warnings.py b/Lib/warnings.py --- a/Lib/warnings.py +++ b/Lib/warnings.py @@ -53,6 +53,7 @@ filters.append(item) else: filters.insert(0, item) + _filters_mutated() def simplefilter(action, category=Warning, lineno=0, append=False): """Insert a simple entry into the list of warnings filters (at the front). @@ -73,10 +74,12 @@ filters.append(item) else: filters.insert(0, item) + _filters_mutated() def resetwarnings(): """Clear the list of warning filters, so that no filters are active.""" filters[:] = [] + _filters_mutated() class _OptionError(Exception): """Exception used by option processing helpers.""" @@ -204,6 +207,9 @@ module = module[:-3] # XXX What about leading pathname? if registry is None: registry = {} + if registry.get('version', 0) != _filters_version: + registry.clear() + registry['version'] = _filters_version if isinstance(message, Warning): text = str(message) category = message.__class__ @@ -329,6 +335,7 @@ self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] + self._module._filters_mutated() self._showwarning = self._module.showwarning if self._record: log = [] @@ -343,6 +350,7 @@ if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters + self._module._filters_mutated() self._module.showwarning = self._showwarning @@ -357,15 +365,22 @@ _warnings_defaults = False try: from _warnings import (filters, _defaultaction, _onceregistry, - warn, warn_explicit) + warn, warn_explicit, _filters_mutated) defaultaction = _defaultaction onceregistry = _onceregistry _warnings_defaults = True + except ImportError: filters = [] defaultaction = "default" onceregistry = {} + _filters_version = 1 + + def _filters_mutated(): + global _filters_version + _filters_version += 1 + # Module initialization _processoptions(sys.warnoptions) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #4180: The warnings registries are now reset when the filters + are modified. + - Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. diff --git a/Python/_warnings.c b/Python/_warnings.c --- a/Python/_warnings.c +++ b/Python/_warnings.c @@ -12,6 +12,7 @@ static PyObject *_filters; /* List */ static PyObject *_once_registry; /* Dict */ static PyObject *_default_action; /* String */ +static long _filters_version; _Py_IDENTIFIER(argv); _Py_IDENTIFIER(stderr); @@ -178,16 +179,33 @@ static int already_warned(PyObject *registry, PyObject *key, int should_set) { - PyObject *already_warned; + PyObject *version_obj, *already_warned; + _Py_IDENTIFIER(version); if (key == NULL) return -1; - already_warned = PyDict_GetItem(registry, key); - if (already_warned != NULL) { - int rc = PyObject_IsTrue(already_warned); - if (rc != 0) - return rc; + version_obj = _PyDict_GetItemId(registry, &PyId_version); + if (version_obj == NULL + || !PyLong_CheckExact(version_obj) + || PyLong_AsLong(version_obj) != _filters_version) { + PyDict_Clear(registry); + version_obj = PyLong_FromLong(_filters_version); + if (version_obj == NULL) + return -1; + if (_PyDict_SetItemId(registry, &PyId_version, version_obj) < 0) { + Py_DECREF(version_obj); + return -1; + } + Py_DECREF(version_obj); + } + else { + already_warned = PyDict_GetItem(registry, key); + if (already_warned != NULL) { + int rc = PyObject_IsTrue(already_warned); + if (rc != 0) + return rc; + } } /* This warning wasn't found in the registry, set it. */ @@ -750,6 +768,13 @@ registry, NULL); } +static PyObject * +warnings_filters_mutated(PyObject *self, PyObject *args) +{ + _filters_version++; + Py_RETURN_NONE; +} + /* Function to issue a warning message; may raise an exception. */ @@ -917,6 +942,8 @@ warn_doc}, {"warn_explicit", (PyCFunction)warnings_warn_explicit, METH_VARARGS | METH_KEYWORDS, warn_explicit_doc}, + {"_filters_mutated", (PyCFunction)warnings_filters_mutated, METH_NOARGS, + NULL}, /* XXX(brett.cannon): add showwarning? */ /* XXX(brett.cannon): Reasonable to add formatwarning? */ {NULL, NULL} /* sentinel */ @@ -1069,5 +1096,7 @@ Py_INCREF(_default_action); if (PyModule_AddObject(m, "_defaultaction", _default_action) < 0) return NULL; + + _filters_version = 0; return m; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 02:44:40 2014 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 18 Sep 2014 00:44:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=234180=3A_The_warnings_registries_are_now_reset_w?= =?utf-8?q?hen_the_filters_are?= Message-ID: <20140918004439.19219.16138@mail.hg.python.org> http://hg.python.org/cpython/rev/4bc60eb68d3e changeset: 92459:4bc60eb68d3e parent: 92457:04147b0172d7 parent: 92458:8adb2c6e0803 user: Antoine Pitrou date: Thu Sep 18 02:42:05 2014 +0200 summary: Issue #4180: The warnings registries are now reset when the filters are modified. files: Lib/test/test_warnings.py | 49 +++++++++++++++++++++++++- Lib/warnings.py | 17 ++++++++- Misc/NEWS | 3 + Python/_warnings.c | 41 +++++++++++++++++++--- 4 files changed, 101 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_warnings.py b/Lib/test/test_warnings.py --- a/Lib/test/test_warnings.py +++ b/Lib/test/test_warnings.py @@ -92,6 +92,16 @@ self.assertRaises(UserWarning, self.module.warn, "FilterTests.test_error") + def test_error_after_default(self): + with original_warnings.catch_warnings(module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_ignore_after_default" + def f(): + self.module.warn(message, UserWarning) + f() + self.module.filterwarnings("error", category=UserWarning) + self.assertRaises(UserWarning, f) + def test_ignore(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -100,6 +110,19 @@ self.module.warn("FilterTests.test_ignore", UserWarning) self.assertEqual(len(w), 0) + def test_ignore_after_default(self): + with original_warnings.catch_warnings(record=True, + module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_ignore_after_default" + def f(): + self.module.warn(message, UserWarning) + f() + self.module.filterwarnings("ignore", category=UserWarning) + f() + f() + self.assertEqual(len(w), 1) + def test_always(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -111,6 +134,26 @@ self.module.warn(message, UserWarning) self.assertTrue(w[-1].message, message) + def test_always_after_default(self): + with original_warnings.catch_warnings(record=True, + module=self.module) as w: + self.module.resetwarnings() + message = "FilterTests.test_always_after_ignore" + def f(): + self.module.warn(message, UserWarning) + f() + self.assertEqual(len(w), 1) + self.assertEqual(w[-1].message.args[0], message) + f() + self.assertEqual(len(w), 1) + self.module.filterwarnings("always", category=UserWarning) + f() + self.assertEqual(len(w), 2) + self.assertEqual(w[-1].message.args[0], message) + f() + self.assertEqual(len(w), 3) + self.assertEqual(w[-1].message.args[0], message) + def test_default(self): with original_warnings.catch_warnings(record=True, module=self.module) as w: @@ -541,7 +584,9 @@ registry=registry) self.assertEqual(w[-1].message, message) self.assertEqual(len(w), 1) - self.assertEqual(len(registry), 1) + # One actual registry key plus the "version" key + self.assertEqual(len(registry), 2) + self.assertIn("version", registry) del w[:] # Test removal. del self.module.defaultaction @@ -551,7 +596,7 @@ registry=registry) self.assertEqual(w[-1].message, message) self.assertEqual(len(w), 1) - self.assertEqual(len(registry), 1) + self.assertEqual(len(registry), 2) del w[:] # Test setting. self.module.defaultaction = "ignore" diff --git a/Lib/warnings.py b/Lib/warnings.py --- a/Lib/warnings.py +++ b/Lib/warnings.py @@ -53,6 +53,7 @@ filters.append(item) else: filters.insert(0, item) + _filters_mutated() def simplefilter(action, category=Warning, lineno=0, append=False): """Insert a simple entry into the list of warnings filters (at the front). @@ -73,10 +74,12 @@ filters.append(item) else: filters.insert(0, item) + _filters_mutated() def resetwarnings(): """Clear the list of warning filters, so that no filters are active.""" filters[:] = [] + _filters_mutated() class _OptionError(Exception): """Exception used by option processing helpers.""" @@ -206,6 +209,9 @@ module = module[:-3] # XXX What about leading pathname? if registry is None: registry = {} + if registry.get('version', 0) != _filters_version: + registry.clear() + registry['version'] = _filters_version if isinstance(message, Warning): text = str(message) category = message.__class__ @@ -331,6 +337,7 @@ self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] + self._module._filters_mutated() self._showwarning = self._module.showwarning if self._record: log = [] @@ -345,6 +352,7 @@ if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters + self._module._filters_mutated() self._module.showwarning = self._showwarning @@ -359,15 +367,22 @@ _warnings_defaults = False try: from _warnings import (filters, _defaultaction, _onceregistry, - warn, warn_explicit) + warn, warn_explicit, _filters_mutated) defaultaction = _defaultaction onceregistry = _onceregistry _warnings_defaults = True + except ImportError: filters = [] defaultaction = "default" onceregistry = {} + _filters_version = 1 + + def _filters_mutated(): + global _filters_version + _filters_version += 1 + # Module initialization _processoptions(sys.warnoptions) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -132,6 +132,9 @@ Library ------- +- Issue #4180: The warnings registries are now reset when the filters + are modified. + - Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. diff --git a/Python/_warnings.c b/Python/_warnings.c --- a/Python/_warnings.c +++ b/Python/_warnings.c @@ -12,6 +12,7 @@ static PyObject *_filters; /* List */ static PyObject *_once_registry; /* Dict */ static PyObject *_default_action; /* String */ +static long _filters_version; _Py_IDENTIFIER(argv); _Py_IDENTIFIER(stderr); @@ -178,16 +179,33 @@ static int already_warned(PyObject *registry, PyObject *key, int should_set) { - PyObject *already_warned; + PyObject *version_obj, *already_warned; + _Py_IDENTIFIER(version); if (key == NULL) return -1; - already_warned = PyDict_GetItem(registry, key); - if (already_warned != NULL) { - int rc = PyObject_IsTrue(already_warned); - if (rc != 0) - return rc; + version_obj = _PyDict_GetItemId(registry, &PyId_version); + if (version_obj == NULL + || !PyLong_CheckExact(version_obj) + || PyLong_AsLong(version_obj) != _filters_version) { + PyDict_Clear(registry); + version_obj = PyLong_FromLong(_filters_version); + if (version_obj == NULL) + return -1; + if (_PyDict_SetItemId(registry, &PyId_version, version_obj) < 0) { + Py_DECREF(version_obj); + return -1; + } + Py_DECREF(version_obj); + } + else { + already_warned = PyDict_GetItem(registry, key); + if (already_warned != NULL) { + int rc = PyObject_IsTrue(already_warned); + if (rc != 0) + return rc; + } } /* This warning wasn't found in the registry, set it. */ @@ -751,6 +769,13 @@ registry, NULL); } +static PyObject * +warnings_filters_mutated(PyObject *self, PyObject *args) +{ + _filters_version++; + Py_RETURN_NONE; +} + /* Function to issue a warning message; may raise an exception. */ @@ -918,6 +943,8 @@ warn_doc}, {"warn_explicit", (PyCFunction)warnings_warn_explicit, METH_VARARGS | METH_KEYWORDS, warn_explicit_doc}, + {"_filters_mutated", (PyCFunction)warnings_filters_mutated, METH_NOARGS, + NULL}, /* XXX(brett.cannon): add showwarning? */ /* XXX(brett.cannon): Reasonable to add formatwarning? */ {NULL, NULL} /* sentinel */ @@ -1070,5 +1097,7 @@ Py_INCREF(_default_action); if (PyModule_AddObject(m, "_defaultaction", _default_action) < 0) return NULL; + + _filters_version = 0; return m; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 03:06:56 2014 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 18 Sep 2014 01:06:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2311471=3A_avoid_g?= =?utf-8?q?enerating_a_JUMP=5FFORWARD_instruction_at_the_end_of_an?= Message-ID: <20140918010656.3935.35130@mail.hg.python.org> http://hg.python.org/cpython/rev/c0ca9d32aed4 changeset: 92460:c0ca9d32aed4 user: Antoine Pitrou date: Thu Sep 18 03:06:50 2014 +0200 summary: Closes #11471: avoid generating a JUMP_FORWARD instruction at the end of an if-block if there is no else-clause. Original patch by Eugene Toder. files: Lib/test/test_dis.py | 172 +++++++++++++++--------------- Misc/NEWS | 3 + Python/compile.c | 6 +- 3 files changed, 90 insertions(+), 91 deletions(-) diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py --- a/Lib/test/test_dis.py +++ b/Lib/test/test_dis.py @@ -568,10 +568,10 @@ #_instructions = dis.get_instructions(outer, first_line=expected_outer_line) #print('expected_opinfo_outer = [\n ', #',\n '.join(map(str, _instructions)), ',\n]', sep='') -#_instructions = dis.get_instructions(outer(), first_line=expected_outer_line) +#_instructions = dis.get_instructions(outer(), first_line=expected_f_line) #print('expected_opinfo_f = [\n ', #',\n '.join(map(str, _instructions)), ',\n]', sep='') -#_instructions = dis.get_instructions(outer()(), first_line=expected_outer_line) +#_instructions = dis.get_instructions(outer()(), first_line=expected_inner_line) #print('expected_opinfo_inner = [\n ', #',\n '.join(map(str, _instructions)), ',\n]', sep='') #_instructions = dis.get_instructions(jumpy, first_line=expected_jumpy_line) @@ -642,12 +642,12 @@ ] expected_opinfo_jumpy = [ - Instruction(opname='SETUP_LOOP', opcode=120, arg=74, argval=77, argrepr='to 77', offset=0, starts_line=3, is_jump_target=False), + Instruction(opname='SETUP_LOOP', opcode=120, arg=68, argval=71, argrepr='to 71', offset=0, starts_line=3, is_jump_target=False), Instruction(opname='LOAD_GLOBAL', opcode=116, arg=0, argval='range', argrepr='range', offset=3, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_CONST', opcode=100, arg=1, argval=10, argrepr='10', offset=6, starts_line=None, is_jump_target=False), Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=9, starts_line=None, is_jump_target=False), Instruction(opname='GET_ITER', opcode=68, arg=None, argval=None, argrepr='', offset=12, starts_line=None, is_jump_target=False), - Instruction(opname='FOR_ITER', opcode=93, arg=50, argval=66, argrepr='to 66', offset=13, starts_line=None, is_jump_target=True), + Instruction(opname='FOR_ITER', opcode=93, arg=44, argval=60, argrepr='to 60', offset=13, starts_line=None, is_jump_target=True), Instruction(opname='STORE_FAST', opcode=125, arg=0, argval='i', argrepr='i', offset=16, starts_line=None, is_jump_target=False), Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=19, starts_line=4, is_jump_target=False), Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=22, starts_line=None, is_jump_target=False), @@ -656,92 +656,88 @@ Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=29, starts_line=5, is_jump_target=False), Instruction(opname='LOAD_CONST', opcode=100, arg=2, argval=4, argrepr='4', offset=32, starts_line=None, is_jump_target=False), Instruction(opname='COMPARE_OP', opcode=107, arg=0, argval='<', argrepr='<', offset=35, starts_line=None, is_jump_target=False), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=47, argval=47, argrepr='', offset=38, starts_line=None, is_jump_target=False), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=44, argval=44, argrepr='', offset=38, starts_line=None, is_jump_target=False), Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=13, argval=13, argrepr='', offset=41, starts_line=6, is_jump_target=False), - Instruction(opname='JUMP_FORWARD', opcode=110, arg=0, argval=47, argrepr='to 47', offset=44, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=47, starts_line=7, is_jump_target=True), - Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=6, argrepr='6', offset=50, starts_line=None, is_jump_target=False), - Instruction(opname='COMPARE_OP', opcode=107, arg=4, argval='>', argrepr='>', offset=53, starts_line=None, is_jump_target=False), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=13, argval=13, argrepr='', offset=56, starts_line=None, is_jump_target=False), - Instruction(opname='BREAK_LOOP', opcode=80, arg=None, argval=None, argrepr='', offset=59, starts_line=8, is_jump_target=False), - Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=13, argval=13, argrepr='', offset=60, starts_line=None, is_jump_target=False), - Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=13, argval=13, argrepr='', offset=63, starts_line=None, is_jump_target=False), - Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=66, starts_line=None, is_jump_target=True), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=67, starts_line=10, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=4, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=70, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=73, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=76, starts_line=None, is_jump_target=False), - Instruction(opname='SETUP_LOOP', opcode=120, arg=74, argval=154, argrepr='to 154', offset=77, starts_line=11, is_jump_target=True), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=80, starts_line=None, is_jump_target=True), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=143, argval=143, argrepr='', offset=83, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=86, starts_line=12, is_jump_target=False), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=89, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=92, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=95, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=96, starts_line=13, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=1, argrepr='1', offset=99, starts_line=None, is_jump_target=False), - Instruction(opname='INPLACE_SUBTRACT', opcode=56, arg=None, argval=None, argrepr='', offset=102, starts_line=None, is_jump_target=False), - Instruction(opname='STORE_FAST', opcode=125, arg=0, argval='i', argrepr='i', offset=103, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=106, starts_line=14, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=6, argrepr='6', offset=109, starts_line=None, is_jump_target=False), - Instruction(opname='COMPARE_OP', opcode=107, arg=4, argval='>', argrepr='>', offset=112, starts_line=None, is_jump_target=False), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=124, argval=124, argrepr='', offset=115, starts_line=None, is_jump_target=False), - Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=80, argval=80, argrepr='', offset=118, starts_line=15, is_jump_target=False), - Instruction(opname='JUMP_FORWARD', opcode=110, arg=0, argval=124, argrepr='to 124', offset=121, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=124, starts_line=16, is_jump_target=True), - Instruction(opname='LOAD_CONST', opcode=100, arg=2, argval=4, argrepr='4', offset=127, starts_line=None, is_jump_target=False), - Instruction(opname='COMPARE_OP', opcode=107, arg=0, argval='<', argrepr='<', offset=130, starts_line=None, is_jump_target=False), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=80, argval=80, argrepr='', offset=133, starts_line=None, is_jump_target=False), - Instruction(opname='BREAK_LOOP', opcode=80, arg=None, argval=None, argrepr='', offset=136, starts_line=17, is_jump_target=False), - Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=80, argval=80, argrepr='', offset=137, starts_line=None, is_jump_target=False), - Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=80, argval=80, argrepr='', offset=140, starts_line=None, is_jump_target=False), - Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=143, starts_line=None, is_jump_target=True), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=144, starts_line=19, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=6, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=147, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=150, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=153, starts_line=None, is_jump_target=False), - Instruction(opname='SETUP_FINALLY', opcode=122, arg=72, argval=229, argrepr='to 229', offset=154, starts_line=20, is_jump_target=True), - Instruction(opname='SETUP_EXCEPT', opcode=121, arg=12, argval=172, argrepr='to 172', offset=157, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=1, argrepr='1', offset=160, starts_line=21, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=7, argval=0, argrepr='0', offset=163, starts_line=None, is_jump_target=False), - Instruction(opname='BINARY_TRUE_DIVIDE', opcode=27, arg=None, argval=None, argrepr='', offset=166, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=167, starts_line=None, is_jump_target=False), - Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=168, starts_line=None, is_jump_target=False), - Instruction(opname='JUMP_FORWARD', opcode=110, arg=28, argval=200, argrepr='to 200', offset=169, starts_line=None, is_jump_target=False), - Instruction(opname='DUP_TOP', opcode=4, arg=None, argval=None, argrepr='', offset=172, starts_line=22, is_jump_target=True), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=2, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=173, starts_line=None, is_jump_target=False), - Instruction(opname='COMPARE_OP', opcode=107, arg=10, argval='exception match', argrepr='exception match', offset=176, starts_line=None, is_jump_target=False), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=199, argval=199, argrepr='', offset=179, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=44, starts_line=7, is_jump_target=True), + Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=6, argrepr='6', offset=47, starts_line=None, is_jump_target=False), + Instruction(opname='COMPARE_OP', opcode=107, arg=4, argval='>', argrepr='>', offset=50, starts_line=None, is_jump_target=False), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=13, argval=13, argrepr='', offset=53, starts_line=None, is_jump_target=False), + Instruction(opname='BREAK_LOOP', opcode=80, arg=None, argval=None, argrepr='', offset=56, starts_line=8, is_jump_target=False), + Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=13, argval=13, argrepr='', offset=57, starts_line=None, is_jump_target=False), + Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=60, starts_line=None, is_jump_target=True), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=61, starts_line=10, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=4, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=64, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=67, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=70, starts_line=None, is_jump_target=False), + Instruction(opname='SETUP_LOOP', opcode=120, arg=68, argval=142, argrepr='to 142', offset=71, starts_line=11, is_jump_target=True), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=74, starts_line=None, is_jump_target=True), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=131, argval=131, argrepr='', offset=77, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=80, starts_line=12, is_jump_target=False), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=83, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=86, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=89, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=90, starts_line=13, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=1, argrepr='1', offset=93, starts_line=None, is_jump_target=False), + Instruction(opname='INPLACE_SUBTRACT', opcode=56, arg=None, argval=None, argrepr='', offset=96, starts_line=None, is_jump_target=False), + Instruction(opname='STORE_FAST', opcode=125, arg=0, argval='i', argrepr='i', offset=97, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=100, starts_line=14, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=3, argval=6, argrepr='6', offset=103, starts_line=None, is_jump_target=False), + Instruction(opname='COMPARE_OP', opcode=107, arg=4, argval='>', argrepr='>', offset=106, starts_line=None, is_jump_target=False), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=115, argval=115, argrepr='', offset=109, starts_line=None, is_jump_target=False), + Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=74, argval=74, argrepr='', offset=112, starts_line=15, is_jump_target=False), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=115, starts_line=16, is_jump_target=True), + Instruction(opname='LOAD_CONST', opcode=100, arg=2, argval=4, argrepr='4', offset=118, starts_line=None, is_jump_target=False), + Instruction(opname='COMPARE_OP', opcode=107, arg=0, argval='<', argrepr='<', offset=121, starts_line=None, is_jump_target=False), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=74, argval=74, argrepr='', offset=124, starts_line=None, is_jump_target=False), + Instruction(opname='BREAK_LOOP', opcode=80, arg=None, argval=None, argrepr='', offset=127, starts_line=17, is_jump_target=False), + Instruction(opname='JUMP_ABSOLUTE', opcode=113, arg=74, argval=74, argrepr='', offset=128, starts_line=None, is_jump_target=False), + Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=131, starts_line=None, is_jump_target=True), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=132, starts_line=19, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=6, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=135, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=138, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=141, starts_line=None, is_jump_target=False), + Instruction(opname='SETUP_FINALLY', opcode=122, arg=72, argval=217, argrepr='to 217', offset=142, starts_line=20, is_jump_target=True), + Instruction(opname='SETUP_EXCEPT', opcode=121, arg=12, argval=160, argrepr='to 160', offset=145, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=5, argval=1, argrepr='1', offset=148, starts_line=21, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=7, argval=0, argrepr='0', offset=151, starts_line=None, is_jump_target=False), + Instruction(opname='BINARY_TRUE_DIVIDE', opcode=27, arg=None, argval=None, argrepr='', offset=154, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=155, starts_line=None, is_jump_target=False), + Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=156, starts_line=None, is_jump_target=False), + Instruction(opname='JUMP_FORWARD', opcode=110, arg=28, argval=188, argrepr='to 188', offset=157, starts_line=None, is_jump_target=False), + Instruction(opname='DUP_TOP', opcode=4, arg=None, argval=None, argrepr='', offset=160, starts_line=22, is_jump_target=True), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=2, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=161, starts_line=None, is_jump_target=False), + Instruction(opname='COMPARE_OP', opcode=107, arg=10, argval='exception match', argrepr='exception match', offset=164, starts_line=None, is_jump_target=False), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=187, argval=187, argrepr='', offset=167, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=170, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=171, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=172, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=173, starts_line=23, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=8, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=176, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=179, starts_line=None, is_jump_target=False), Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=182, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=183, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=184, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=185, starts_line=23, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=8, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=188, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=191, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=194, starts_line=None, is_jump_target=False), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=195, starts_line=None, is_jump_target=False), - Instruction(opname='JUMP_FORWARD', opcode=110, arg=26, argval=225, argrepr='to 225', offset=196, starts_line=None, is_jump_target=False), - Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=199, starts_line=None, is_jump_target=True), - Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=200, starts_line=25, is_jump_target=True), - Instruction(opname='SETUP_WITH', opcode=143, arg=17, argval=223, argrepr='to 223', offset=203, starts_line=None, is_jump_target=False), - Instruction(opname='STORE_FAST', opcode=125, arg=1, argval='dodgy', argrepr='dodgy', offset=206, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=209, starts_line=26, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=9, argval='Never reach this', argrepr="'Never reach this'", offset=212, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=215, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=218, starts_line=None, is_jump_target=False), - Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=219, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=220, starts_line=None, is_jump_target=False), - Instruction(opname='WITH_CLEANUP', opcode=81, arg=None, argval=None, argrepr='', offset=223, starts_line=None, is_jump_target=True), - Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=224, starts_line=None, is_jump_target=False), - Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=225, starts_line=None, is_jump_target=True), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=226, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=229, starts_line=28, is_jump_target=True), - Instruction(opname='LOAD_CONST', opcode=100, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=232, starts_line=None, is_jump_target=False), - Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=235, starts_line=None, is_jump_target=False), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=238, starts_line=None, is_jump_target=False), - Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=239, starts_line=None, is_jump_target=False), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=240, starts_line=None, is_jump_target=False), - Instruction(opname='RETURN_VALUE', opcode=83, arg=None, argval=None, argrepr='', offset=243, starts_line=None, is_jump_target=False), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=183, starts_line=None, is_jump_target=False), + Instruction(opname='JUMP_FORWARD', opcode=110, arg=26, argval=213, argrepr='to 213', offset=184, starts_line=None, is_jump_target=False), + Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=187, starts_line=None, is_jump_target=True), + Instruction(opname='LOAD_FAST', opcode=124, arg=0, argval='i', argrepr='i', offset=188, starts_line=25, is_jump_target=True), + Instruction(opname='SETUP_WITH', opcode=143, arg=17, argval=211, argrepr='to 211', offset=191, starts_line=None, is_jump_target=False), + Instruction(opname='STORE_FAST', opcode=125, arg=1, argval='dodgy', argrepr='dodgy', offset=194, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=197, starts_line=26, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=9, argval='Never reach this', argrepr="'Never reach this'", offset=200, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=203, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=206, starts_line=None, is_jump_target=False), + Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=207, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=208, starts_line=None, is_jump_target=False), + Instruction(opname='WITH_CLEANUP', opcode=81, arg=None, argval=None, argrepr='', offset=211, starts_line=None, is_jump_target=True), + Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=212, starts_line=None, is_jump_target=False), + Instruction(opname='POP_BLOCK', opcode=87, arg=None, argval=None, argrepr='', offset=213, starts_line=None, is_jump_target=True), + Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=214, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=1, argval='print', argrepr='print', offset=217, starts_line=28, is_jump_target=True), + Instruction(opname='LOAD_CONST', opcode=100, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=220, starts_line=None, is_jump_target=False), + Instruction(opname='CALL_FUNCTION', opcode=131, arg=1, argval=1, argrepr='1 positional, 0 keyword pair', offset=223, starts_line=None, is_jump_target=False), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=226, starts_line=None, is_jump_target=False), + Instruction(opname='END_FINALLY', opcode=88, arg=None, argval=None, argrepr='', offset=227, starts_line=None, is_jump_target=False), + Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=228, starts_line=None, is_jump_target=False), + Instruction(opname='RETURN_VALUE', opcode=83, arg=None, argval=None, argrepr='', offset=231, starts_line=None, is_jump_target=False), ] # One last piece of inspect fodder to check the default line number handling diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #11471: avoid generating a JUMP_FORWARD instruction at the end of + an if-block if there is no else-clause. Original patch by Eugene Toder. + - Issue #22215: Now ValueError is raised instead of TypeError when str or bytes argument contains not permitted null character or byte. diff --git a/Python/compile.c b/Python/compile.c --- a/Python/compile.c +++ b/Python/compile.c @@ -1940,7 +1940,7 @@ } else if (constant == 1) { VISIT_SEQ(c, stmt, s->v.If.body); } else { - if (s->v.If.orelse) { + if (asdl_seq_LEN(s->v.If.orelse)) { next = compiler_new_block(c); if (next == NULL) return 0; @@ -1950,8 +1950,8 @@ VISIT(c, expr, s->v.If.test); ADDOP_JABS(c, POP_JUMP_IF_FALSE, next); VISIT_SEQ(c, stmt, s->v.If.body); - ADDOP_JREL(c, JUMP_FORWARD, end); - if (s->v.If.orelse) { + if (asdl_seq_LEN(s->v.If.orelse)) { + ADDOP_JREL(c, JUMP_FORWARD, end); compiler_use_next_block(c, next); VISIT_SEQ(c, stmt, s->v.If.orelse); } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 04:10:46 2014 From: python-checkins at python.org (berker.peksag) Date: Thu, 18 Sep 2014 02:10:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2321391=3A_Use_os?= =?utf-8?q?=2Epath=2Eabspath_in_the_shutil_module=2E?= Message-ID: <20140918021034.99472.25139@mail.hg.python.org> http://hg.python.org/cpython/rev/ab369d809200 changeset: 92461:ab369d809200 user: Berker Peksag date: Thu Sep 18 05:11:15 2014 +0300 summary: Issue #21391: Use os.path.abspath in the shutil module. files: Lib/shutil.py | 5 ++--- Misc/NEWS | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Lib/shutil.py b/Lib/shutil.py --- a/Lib/shutil.py +++ b/Lib/shutil.py @@ -7,7 +7,6 @@ import os import sys import stat -from os.path import abspath import fnmatch import collections import errno @@ -550,8 +549,8 @@ return real_dst def _destinsrc(src, dst): - src = abspath(src) - dst = abspath(dst) + src = os.path.abspath(src) + dst = os.path.abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #21391: Use os.path.abspath in the shutil module. + - Issue #11471: avoid generating a JUMP_FORWARD instruction at the end of an if-block if there is no else-clause. Original patch by Eugene Toder. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 04:22:32 2014 From: python-checkins at python.org (berker.peksag) Date: Thu, 18 Sep 2014 02:22:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2321706=3A_Add_a_ve?= =?utf-8?q?rsionchanged_directive_to_the_functional_API_docs=2E?= Message-ID: <20140918022228.95833.42858@mail.hg.python.org> http://hg.python.org/cpython/rev/713ee49ec3ba changeset: 92462:713ee49ec3ba user: Berker Peksag date: Thu Sep 18 05:23:14 2014 +0300 summary: Issue #21706: Add a versionchanged directive to the functional API docs. files: Doc/library/enum.rst | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -464,6 +464,9 @@ :start: number to start counting at if only names are passed in +.. versionchanged:: 3.5 + The *start* parameter was added. + Derived Enumerations -------------------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 05:04:53 2014 From: python-checkins at python.org (berker.peksag) Date: Thu, 18 Sep 2014 03:04:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2314824=3A_Update_Repr=2Erepr=5FTYPE_documentatio?= =?utf-8?q?n_to_use_correct_name_mangling?= Message-ID: <20140918030453.49117.8039@mail.hg.python.org> http://hg.python.org/cpython/rev/a0372781eafb changeset: 92464:a0372781eafb parent: 92462:713ee49ec3ba parent: 92463:e9968782c9ba user: Berker Peksag date: Thu Sep 18 06:05:37 2014 +0300 summary: Issue #14824: Update Repr.repr_TYPE documentation to use correct name mangling implementation. Patch by Chris Rebert. files: Doc/library/reprlib.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/reprlib.rst b/Doc/library/reprlib.rst --- a/Doc/library/reprlib.rst +++ b/Doc/library/reprlib.rst @@ -129,9 +129,9 @@ Formatting methods for specific types are implemented as methods with a name based on the type name. In the method name, **TYPE** is replaced by - ``string.join(string.split(type(obj).__name__, '_'))``. Dispatch to these - methods is handled by :meth:`repr1`. Type-specific methods which need to - recursively format a value should call ``self.repr1(subobj, level - 1)``. + ``'_'.join(type(obj).__name__.split())``. Dispatch to these methods is + handled by :meth:`repr1`. Type-specific methods which need to recursively + format a value should call ``self.repr1(subobj, level - 1)``. .. _subclassing-reprs: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 05:04:54 2014 From: python-checkins at python.org (berker.peksag) Date: Thu, 18 Sep 2014 03:04:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE0ODI0?= =?utf-8?q?=3A_Update_Repr=2Erepr=5FTYPE_documentation_to_use_correct_name?= =?utf-8?q?_mangling?= Message-ID: <20140918030452.10381.25370@mail.hg.python.org> http://hg.python.org/cpython/rev/e9968782c9ba changeset: 92463:e9968782c9ba branch: 3.4 parent: 92458:8adb2c6e0803 user: Berker Peksag date: Thu Sep 18 06:05:14 2014 +0300 summary: Issue #14824: Update Repr.repr_TYPE documentation to use correct name mangling implementation. Patch by Chris Rebert. files: Doc/library/reprlib.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/reprlib.rst b/Doc/library/reprlib.rst --- a/Doc/library/reprlib.rst +++ b/Doc/library/reprlib.rst @@ -129,9 +129,9 @@ Formatting methods for specific types are implemented as methods with a name based on the type name. In the method name, **TYPE** is replaced by - ``string.join(string.split(type(obj).__name__, '_'))``. Dispatch to these - methods is handled by :meth:`repr1`. Type-specific methods which need to - recursively format a value should call ``self.repr1(subobj, level - 1)``. + ``'_'.join(type(obj).__name__.split())``. Dispatch to these methods is + handled by :meth:`repr1`. Type-specific methods which need to recursively + format a value should call ``self.repr1(subobj, level - 1)``. .. _subclassing-reprs: -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Thu Sep 18 09:58:12 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 18 Sep 2014 09:58:12 +0200 Subject: [Python-checkins] Daily reference leaks (c0ca9d32aed4): sum=130907 Message-ID: results for c0ca9d32aed4 on branch "default" -------------------------------------------- test_collections leaked [0, -2, 0] references, sum=-2 test_distutils leaked [37725, 37725, 37725] references, sum=113175 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 2, -2] references, sum=0 test_site leaked [0, 2, -2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogDHvTta', '-x'] From python-checkins at python.org Thu Sep 18 15:38:01 2014 From: python-checkins at python.org (senthil.kumaran) Date: Thu, 18 Sep 2014 13:38:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_from_3=2E4?= Message-ID: <20140918133758.66692.58163@mail.hg.python.org> http://hg.python.org/cpython/rev/ea401e7c55e4 changeset: 92467:ea401e7c55e4 parent: 92464:a0372781eafb parent: 92466:1e81c7a261a7 user: Senthil Kumaran date: Thu Sep 18 21:37:26 2014 +0800 summary: merge from 3.4 Issue #16827: Make Interpreter introduction section of the tutorial more focussed and move advanced section and customization information to a separate file called appendix. Patch credits: Jamayla Wiley, Ya-Ting Huang and James Brewer. files: Doc/tutorial/appendix.rst | 124 +++++++++++++++++++++++ Doc/tutorial/index.rst | 1 + Doc/tutorial/interpreter.rst | 113 +-------------------- 3 files changed, 128 insertions(+), 110 deletions(-) diff --git a/Doc/tutorial/appendix.rst b/Doc/tutorial/appendix.rst new file mode 100644 --- /dev/null +++ b/Doc/tutorial/appendix.rst @@ -0,0 +1,124 @@ +.. _tut-appendix: + +******** +Appendix +******** + + +.. _tut-interac: + +Interactive Mode +================ + +.. _tut-error: + +Error Handling +-------------- + +When an error occurs, the interpreter prints an error message and a stack trace. +In interactive mode, it then returns to the primary prompt; when input came from +a file, it exits with a nonzero exit status after printing the stack trace. +(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement +are not errors in this context.) Some errors are unconditionally fatal and +cause an exit with a nonzero exit; this applies to internal inconsistencies and +some cases of running out of memory. All error messages are written to the +standard error stream; normal output from executed commands is written to +standard output. + +Typing the interrupt character (usually Control-C or DEL) to the primary or +secondary prompt cancels the input and returns to the primary prompt. [#]_ +Typing an interrupt while a command is executing raises the +:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` +statement. + + +.. _tut-scripts: + +Executable Python Scripts +------------------------- + +On BSD'ish Unix systems, Python scripts can be made directly executable, like +shell scripts, by putting the line :: + + #!/usr/bin/env python3.5 + +(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning +of the script and giving the file an executable mode. The ``#!`` must be the +first two characters of the file. On some platforms, this first line must end +with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line +ending. Note that the hash, or pound, character, ``'#'``, is used to start a +comment in Python. + +The script can be given an executable mode, or permission, using the +:program:`chmod` command. + +.. code-block:: bash + + $ chmod +x myscript.py + +On Windows systems, there is no notion of an "executable mode". The Python +installer automatically associates ``.py`` files with ``python.exe`` so that +a double-click on a Python file will run it as a script. The extension can +also be ``.pyw``, in that case, the console window that normally appears is +suppressed. + + +.. _tut-startup: + +The Interactive Startup File +---------------------------- + +When you use Python interactively, it is frequently handy to have some standard +commands executed every time the interpreter is started. You can do this by +setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a +file containing your start-up commands. This is similar to the :file:`.profile` +feature of the Unix shells. + +This file is only read in interactive sessions, not when Python reads commands +from a script, and not when :file:`/dev/tty` is given as the explicit source of +commands (which otherwise behaves like an interactive session). It is executed +in the same namespace where interactive commands are executed, so that objects +that it defines or imports can be used without qualification in the interactive +session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this +file. + +If you want to read an additional start-up file from the current directory, you +can program this in the global start-up file using code like ``if +os.path.isfile('.pythonrc.py'): exec(open('.pythonrc.py').read())``. +If you want to use the startup file in a script, you must do this explicitly +in the script:: + + import os + filename = os.environ.get('PYTHONSTARTUP') + if filename and os.path.isfile(filename): + with open(filename) as fobj: + startup_file = fobj.read() + exec(startup_file) + + +.. _tut-customize: + +The Customization Modules +------------------------- + +Python provides two hooks to let you customize it: :mod:`sitecustomize` and +:mod:`usercustomize`. To see how it works, you need first to find the location +of your user site-packages directory. Start Python and run this code:: + + >>> import site + >>> site.getusersitepackages() + '/home/user/.local/lib/python3.5/site-packages' + +Now you can create a file named :file:`usercustomize.py` in that directory and +put anything you want in it. It will affect every invocation of Python, unless +it is started with the :option:`-s` option to disable the automatic import. + +:mod:`sitecustomize` works in the same way, but is typically created by an +administrator of the computer in the global site-packages directory, and is +imported before :mod:`usercustomize`. See the documentation of the :mod:`site` +module for more details. + + +.. rubric:: Footnotes + +.. [#] A problem with the GNU Readline package may prevent this. diff --git a/Doc/tutorial/index.rst b/Doc/tutorial/index.rst --- a/Doc/tutorial/index.rst +++ b/Doc/tutorial/index.rst @@ -56,3 +56,4 @@ whatnow.rst interactive.rst floatingpoint.rst + appendix.rst diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst --- a/Doc/tutorial/interpreter.rst +++ b/Doc/tutorial/interpreter.rst @@ -112,63 +112,15 @@ Be careful not to fall off! +For more on interactive mode, see :ref:`tut-interac`. + + .. _tut-interp: The Interpreter and Its Environment =================================== -.. _tut-error: - -Error Handling --------------- - -When an error occurs, the interpreter prints an error message and a stack trace. -In interactive mode, it then returns to the primary prompt; when input came from -a file, it exits with a nonzero exit status after printing the stack trace. -(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement -are not errors in this context.) Some errors are unconditionally fatal and -cause an exit with a nonzero exit; this applies to internal inconsistencies and -some cases of running out of memory. All error messages are written to the -standard error stream; normal output from executed commands is written to -standard output. - -Typing the interrupt character (usually Control-C or DEL) to the primary or -secondary prompt cancels the input and returns to the primary prompt. [#]_ -Typing an interrupt while a command is executing raises the -:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` -statement. - - -.. _tut-scripts: - -Executable Python Scripts -------------------------- - -On BSD'ish Unix systems, Python scripts can be made directly executable, like -shell scripts, by putting the line :: - - #! /usr/bin/env python3.5 - -(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning -of the script and giving the file an executable mode. The ``#!`` must be the -first two characters of the file. On some platforms, this first line must end -with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line -ending. Note that the hash, or pound, character, ``'#'``, is used to start a -comment in Python. - -The script can be given an executable mode, or permission, using the -:program:`chmod` command:: - - $ chmod +x myscript.py - -On Windows systems, there is no notion of an "executable mode". The Python -installer automatically associates ``.py`` files with ``python.exe`` so that -a double-click on a Python file will run it as a script. The extension can -also be ``.pyw``, in that case, the console window that normally appears is -suppressed. - - .. _tut-source-encoding: Source Code Encoding @@ -202,67 +154,8 @@ within the file. -.. _tut-startup: - -The Interactive Startup File ----------------------------- - -When you use Python interactively, it is frequently handy to have some standard -commands executed every time the interpreter is started. You can do this by -setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a -file containing your start-up commands. This is similar to the :file:`.profile` -feature of the Unix shells. - -.. XXX This should probably be dumped in an appendix, since most people - don't use Python interactively in non-trivial ways. - -This file is only read in interactive sessions, not when Python reads commands -from a script, and not when :file:`/dev/tty` is given as the explicit source of -commands (which otherwise behaves like an interactive session). It is executed -in the same namespace where interactive commands are executed, so that objects -that it defines or imports can be used without qualification in the interactive -session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this -file. - -If you want to read an additional start-up file from the current directory, you -can program this in the global start-up file using code like ``if -os.path.isfile('.pythonrc.py'): exec(open('.pythonrc.py').read())``. -If you want to use the startup file in a script, you must do this explicitly -in the script:: - - import os - filename = os.environ.get('PYTHONSTARTUP') - if filename and os.path.isfile(filename): - exec(open(filename).read()) - - -.. _tut-customize: - -The Customization Modules -------------------------- - -Python provides two hooks to let you customize it: :mod:`sitecustomize` and -:mod:`usercustomize`. To see how it works, you need first to find the location -of your user site-packages directory. Start Python and run this code: - - >>> import site - >>> site.getusersitepackages() - '/home/user/.local/lib/python3.2/site-packages' - -Now you can create a file named :file:`usercustomize.py` in that directory and -put anything you want in it. It will affect every invocation of Python, unless -it is started with the :option:`-s` option to disable the automatic import. - -:mod:`sitecustomize` works in the same way, but is typically created by an -administrator of the computer in the global site-packages directory, and is -imported before :mod:`usercustomize`. See the documentation of the :mod:`site` -module for more details. - - .. rubric:: Footnotes .. [#] On Unix, the Python 3.x interpreter is by default not installed with the executable named ``python``, so that it does not conflict with a simultaneously installed Python 2.x executable. - -.. [#] A problem with the GNU Readline package may prevent this. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 15:38:01 2014 From: python-checkins at python.org (senthil.kumaran) Date: Thu, 18 Sep 2014 13:38:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE2ODI3?= =?utf-8?q?=3A_Make_Interpreter_introduction_section_of_the_tutorial_more?= Message-ID: <20140918133757.23490.64468@mail.hg.python.org> http://hg.python.org/cpython/rev/1e81c7a261a7 changeset: 92466:1e81c7a261a7 branch: 3.4 parent: 92463:e9968782c9ba user: Senthil Kumaran date: Thu Sep 18 21:30:28 2014 +0800 summary: Issue #16827: Make Interpreter introduction section of the tutorial more focussed and move advanced section and customization information to a separate file called appendix. Patch credits: Jamayla Wiley, Ya-Ting Huang and James Brewer. files: Doc/tutorial/appendix.rst | 124 +++++++++++++++++++++++ Doc/tutorial/index.rst | 1 + Doc/tutorial/interpreter.rst | 113 +-------------------- 3 files changed, 128 insertions(+), 110 deletions(-) diff --git a/Doc/tutorial/appendix.rst b/Doc/tutorial/appendix.rst new file mode 100644 --- /dev/null +++ b/Doc/tutorial/appendix.rst @@ -0,0 +1,124 @@ +.. _tut-appendix: + +******** +Appendix +******** + + +.. _tut-interac: + +Interactive Mode +================ + +.. _tut-error: + +Error Handling +-------------- + +When an error occurs, the interpreter prints an error message and a stack trace. +In interactive mode, it then returns to the primary prompt; when input came from +a file, it exits with a nonzero exit status after printing the stack trace. +(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement +are not errors in this context.) Some errors are unconditionally fatal and +cause an exit with a nonzero exit; this applies to internal inconsistencies and +some cases of running out of memory. All error messages are written to the +standard error stream; normal output from executed commands is written to +standard output. + +Typing the interrupt character (usually Control-C or DEL) to the primary or +secondary prompt cancels the input and returns to the primary prompt. [#]_ +Typing an interrupt while a command is executing raises the +:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` +statement. + + +.. _tut-scripts: + +Executable Python Scripts +------------------------- + +On BSD'ish Unix systems, Python scripts can be made directly executable, like +shell scripts, by putting the line :: + + #!/usr/bin/env python3.4 + +(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning +of the script and giving the file an executable mode. The ``#!`` must be the +first two characters of the file. On some platforms, this first line must end +with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line +ending. Note that the hash, or pound, character, ``'#'``, is used to start a +comment in Python. + +The script can be given an executable mode, or permission, using the +:program:`chmod` command. + +.. code-block:: bash + + $ chmod +x myscript.py + +On Windows systems, there is no notion of an "executable mode". The Python +installer automatically associates ``.py`` files with ``python.exe`` so that +a double-click on a Python file will run it as a script. The extension can +also be ``.pyw``, in that case, the console window that normally appears is +suppressed. + + +.. _tut-startup: + +The Interactive Startup File +---------------------------- + +When you use Python interactively, it is frequently handy to have some standard +commands executed every time the interpreter is started. You can do this by +setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a +file containing your start-up commands. This is similar to the :file:`.profile` +feature of the Unix shells. + +This file is only read in interactive sessions, not when Python reads commands +from a script, and not when :file:`/dev/tty` is given as the explicit source of +commands (which otherwise behaves like an interactive session). It is executed +in the same namespace where interactive commands are executed, so that objects +that it defines or imports can be used without qualification in the interactive +session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this +file. + +If you want to read an additional start-up file from the current directory, you +can program this in the global start-up file using code like ``if +os.path.isfile('.pythonrc.py'): exec(open('.pythonrc.py').read())``. +If you want to use the startup file in a script, you must do this explicitly +in the script:: + + import os + filename = os.environ.get('PYTHONSTARTUP') + if filename and os.path.isfile(filename): + with open(filename) as fobj: + startup_file = fobj.read() + exec(startup_file) + + +.. _tut-customize: + +The Customization Modules +------------------------- + +Python provides two hooks to let you customize it: :mod:`sitecustomize` and +:mod:`usercustomize`. To see how it works, you need first to find the location +of your user site-packages directory. Start Python and run this code:: + + >>> import site + >>> site.getusersitepackages() + '/home/user/.local/lib/python3.4/site-packages' + +Now you can create a file named :file:`usercustomize.py` in that directory and +put anything you want in it. It will affect every invocation of Python, unless +it is started with the :option:`-s` option to disable the automatic import. + +:mod:`sitecustomize` works in the same way, but is typically created by an +administrator of the computer in the global site-packages directory, and is +imported before :mod:`usercustomize`. See the documentation of the :mod:`site` +module for more details. + + +.. rubric:: Footnotes + +.. [#] A problem with the GNU Readline package may prevent this. diff --git a/Doc/tutorial/index.rst b/Doc/tutorial/index.rst --- a/Doc/tutorial/index.rst +++ b/Doc/tutorial/index.rst @@ -56,3 +56,4 @@ whatnow.rst interactive.rst floatingpoint.rst + appendix.rst diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst --- a/Doc/tutorial/interpreter.rst +++ b/Doc/tutorial/interpreter.rst @@ -112,63 +112,15 @@ Be careful not to fall off! +For more on interactive mode, see :ref:`tut-interac`. + + .. _tut-interp: The Interpreter and Its Environment =================================== -.. _tut-error: - -Error Handling --------------- - -When an error occurs, the interpreter prints an error message and a stack trace. -In interactive mode, it then returns to the primary prompt; when input came from -a file, it exits with a nonzero exit status after printing the stack trace. -(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement -are not errors in this context.) Some errors are unconditionally fatal and -cause an exit with a nonzero exit; this applies to internal inconsistencies and -some cases of running out of memory. All error messages are written to the -standard error stream; normal output from executed commands is written to -standard output. - -Typing the interrupt character (usually Control-C or DEL) to the primary or -secondary prompt cancels the input and returns to the primary prompt. [#]_ -Typing an interrupt while a command is executing raises the -:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` -statement. - - -.. _tut-scripts: - -Executable Python Scripts -------------------------- - -On BSD'ish Unix systems, Python scripts can be made directly executable, like -shell scripts, by putting the line :: - - #! /usr/bin/env python3.4 - -(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning -of the script and giving the file an executable mode. The ``#!`` must be the -first two characters of the file. On some platforms, this first line must end -with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line -ending. Note that the hash, or pound, character, ``'#'``, is used to start a -comment in Python. - -The script can be given an executable mode, or permission, using the -:program:`chmod` command:: - - $ chmod +x myscript.py - -On Windows systems, there is no notion of an "executable mode". The Python -installer automatically associates ``.py`` files with ``python.exe`` so that -a double-click on a Python file will run it as a script. The extension can -also be ``.pyw``, in that case, the console window that normally appears is -suppressed. - - .. _tut-source-encoding: Source Code Encoding @@ -202,67 +154,8 @@ within the file. -.. _tut-startup: - -The Interactive Startup File ----------------------------- - -When you use Python interactively, it is frequently handy to have some standard -commands executed every time the interpreter is started. You can do this by -setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a -file containing your start-up commands. This is similar to the :file:`.profile` -feature of the Unix shells. - -.. XXX This should probably be dumped in an appendix, since most people - don't use Python interactively in non-trivial ways. - -This file is only read in interactive sessions, not when Python reads commands -from a script, and not when :file:`/dev/tty` is given as the explicit source of -commands (which otherwise behaves like an interactive session). It is executed -in the same namespace where interactive commands are executed, so that objects -that it defines or imports can be used without qualification in the interactive -session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this -file. - -If you want to read an additional start-up file from the current directory, you -can program this in the global start-up file using code like ``if -os.path.isfile('.pythonrc.py'): exec(open('.pythonrc.py').read())``. -If you want to use the startup file in a script, you must do this explicitly -in the script:: - - import os - filename = os.environ.get('PYTHONSTARTUP') - if filename and os.path.isfile(filename): - exec(open(filename).read()) - - -.. _tut-customize: - -The Customization Modules -------------------------- - -Python provides two hooks to let you customize it: :mod:`sitecustomize` and -:mod:`usercustomize`. To see how it works, you need first to find the location -of your user site-packages directory. Start Python and run this code: - - >>> import site - >>> site.getusersitepackages() - '/home/user/.local/lib/python3.2/site-packages' - -Now you can create a file named :file:`usercustomize.py` in that directory and -put anything you want in it. It will affect every invocation of Python, unless -it is started with the :option:`-s` option to disable the automatic import. - -:mod:`sitecustomize` works in the same way, but is typically created by an -administrator of the computer in the global site-packages directory, and is -imported before :mod:`usercustomize`. See the documentation of the :mod:`site` -module for more details. - - .. rubric:: Footnotes .. [#] On Unix, the Python 3.x interpreter is by default not installed with the executable named ``python``, so that it does not conflict with a simultaneously installed Python 2.x executable. - -.. [#] A problem with the GNU Readline package may prevent this. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 15:38:01 2014 From: python-checkins at python.org (senthil.kumaran) Date: Thu, 18 Sep 2014 13:38:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE2ODI3?= =?utf-8?q?=3A_Make_Interpreter_introduction_section_of_the_tutorial_more?= Message-ID: <20140918133757.23490.74897@mail.hg.python.org> http://hg.python.org/cpython/rev/2e3a81a0fa14 changeset: 92465:2e3a81a0fa14 branch: 2.7 parent: 92452:7a4d960fc801 user: Senthil Kumaran date: Thu Sep 18 21:29:21 2014 +0800 summary: Issue #16827: Make Interpreter introduction section of the tutorial more focussed and move advanced section and customization information to a separate file called appendix. Patch credits: Jamayla Wiley, Ya-Ting Huang and James Brewer. files: Doc/tutorial/appendix.rst | 124 +++++++++++++++++++++++ Doc/tutorial/index.rst | 1 + Doc/tutorial/interpreter.rst | 114 +-------------------- 3 files changed, 128 insertions(+), 111 deletions(-) diff --git a/Doc/tutorial/appendix.rst b/Doc/tutorial/appendix.rst new file mode 100644 --- /dev/null +++ b/Doc/tutorial/appendix.rst @@ -0,0 +1,124 @@ +.. _tut-appendix: + +******** +Appendix +******** + + +.. _tut-interac: + +Interactive Mode +================ + +.. _tut-error: + +Error Handling +-------------- + +When an error occurs, the interpreter prints an error message and a stack trace. +In interactive mode, it then returns to the primary prompt; when input came from +a file, it exits with a nonzero exit status after printing the stack trace. +(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement +are not errors in this context.) Some errors are unconditionally fatal and +cause an exit with a nonzero exit; this applies to internal inconsistencies and +some cases of running out of memory. All error messages are written to the +standard error stream; normal output from executed commands is written to +standard output. + +Typing the interrupt character (usually Control-C or DEL) to the primary or +secondary prompt cancels the input and returns to the primary prompt. [#]_ +Typing an interrupt while a command is executing raises the +:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` +statement. + + +.. _tut-scripts: + +Executable Python Scripts +------------------------- + +On BSD'ish Unix systems, Python scripts can be made directly executable, like +shell scripts, by putting the line :: + + #!/usr/bin/env python + +(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning +of the script and giving the file an executable mode. The ``#!`` must be the +first two characters of the file. On some platforms, this first line must end +with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line +ending. Note that the hash, or pound, character, ``'#'``, is used to start a +comment in Python. + +The script can be given an executable mode, or permission, using the +:program:`chmod` command. + +.. code-block:: bash + + $ chmod +x myscript.py + +On Windows systems, there is no notion of an "executable mode". The Python +installer automatically associates ``.py`` files with ``python.exe`` so that +a double-click on a Python file will run it as a script. The extension can +also be ``.pyw``, in that case, the console window that normally appears is +suppressed. + + +.. _tut-startup: + +The Interactive Startup File +---------------------------- + +When you use Python interactively, it is frequently handy to have some standard +commands executed every time the interpreter is started. You can do this by +setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a +file containing your start-up commands. This is similar to the :file:`.profile` +feature of the Unix shells. + +This file is only read in interactive sessions, not when Python reads commands +from a script, and not when :file:`/dev/tty` is given as the explicit source of +commands (which otherwise behaves like an interactive session). It is executed +in the same namespace where interactive commands are executed, so that objects +that it defines or imports can be used without qualification in the interactive +session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this +file. + +If you want to read an additional start-up file from the current directory, you +can program this in the global start-up file using code like ``if +os.path.isfile('.pythonrc.py'): exec(open('.pythonrc.py').read())``. +If you want to use the startup file in a script, you must do this explicitly +in the script:: + + import os + filename = os.environ.get('PYTHONSTARTUP') + if filename and os.path.isfile(filename): + with open(filename) as fobj: + startup_file = fobj.read() + exec(startup_file) + + +.. _tut-customize: + +The Customization Modules +------------------------- + +Python provides two hooks to let you customize it: :mod:`sitecustomize` and +:mod:`usercustomize`. To see how it works, you need first to find the location +of your user site-packages directory. Start Python and run this code:: + + >>> import site + >>> site.getusersitepackages() + '/home/user/.local/lib/python2.7/site-packages' + +Now you can create a file named :file:`usercustomize.py` in that directory and +put anything you want in it. It will affect every invocation of Python, unless +it is started with the :option:`-s` option to disable the automatic import. + +:mod:`sitecustomize` works in the same way, but is typically created by an +administrator of the computer in the global site-packages directory, and is +imported before :mod:`usercustomize`. See the documentation of the :mod:`site` +module for more details. + + +.. rubric:: Footnotes + +.. [#] A problem with the GNU Readline package may prevent this. diff --git a/Doc/tutorial/index.rst b/Doc/tutorial/index.rst --- a/Doc/tutorial/index.rst +++ b/Doc/tutorial/index.rst @@ -56,3 +56,4 @@ whatnow.rst interactive.rst floatingpoint.rst + appendix.rst diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst --- a/Doc/tutorial/interpreter.rst +++ b/Doc/tutorial/interpreter.rst @@ -108,63 +108,15 @@ Be careful not to fall off! +For more on interactive mode, see :ref:`tut-interac`. + + .. _tut-interp: The Interpreter and Its Environment =================================== -.. _tut-error: - -Error Handling --------------- - -When an error occurs, the interpreter prints an error message and a stack trace. -In interactive mode, it then returns to the primary prompt; when input came from -a file, it exits with a nonzero exit status after printing the stack trace. -(Exceptions handled by an :keyword:`except` clause in a :keyword:`try` statement -are not errors in this context.) Some errors are unconditionally fatal and -cause an exit with a nonzero exit; this applies to internal inconsistencies and -some cases of running out of memory. All error messages are written to the -standard error stream; normal output from executed commands is written to -standard output. - -Typing the interrupt character (usually Control-C or DEL) to the primary or -secondary prompt cancels the input and returns to the primary prompt. [#]_ -Typing an interrupt while a command is executing raises the -:exc:`KeyboardInterrupt` exception, which may be handled by a :keyword:`try` -statement. - - -.. _tut-scripts: - -Executable Python Scripts -------------------------- - -On BSD'ish Unix systems, Python scripts can be made directly executable, like -shell scripts, by putting the line :: - - #! /usr/bin/env python - -(assuming that the interpreter is on the user's :envvar:`PATH`) at the beginning -of the script and giving the file an executable mode. The ``#!`` must be the -first two characters of the file. On some platforms, this first line must end -with a Unix-style line ending (``'\n'``), not a Windows (``'\r\n'``) line -ending. Note that the hash, or pound, character, ``'#'``, is used to start a -comment in Python. - -The script can be given an executable mode, or permission, using the -:program:`chmod` command:: - - $ chmod +x myscript.py - -On Windows systems, there is no notion of an "executable mode". The Python -installer automatically associates ``.py`` files with ``python.exe`` so that -a double-click on a Python file will run it as a script. The extension can -also be ``.pyw``, in that case, the console window that normally appears is -suppressed. - - .. _tut-source-encoding: Source Code Encoding @@ -207,63 +159,3 @@ that the file is UTF-8, and it must use a font that supports all the characters in the file. - -.. _tut-startup: - -The Interactive Startup File ----------------------------- - -When you use Python interactively, it is frequently handy to have some standard -commands executed every time the interpreter is started. You can do this by -setting an environment variable named :envvar:`PYTHONSTARTUP` to the name of a -file containing your start-up commands. This is similar to the :file:`.profile` -feature of the Unix shells. - -.. XXX This should probably be dumped in an appendix, since most people - don't use Python interactively in non-trivial ways. - -This file is only read in interactive sessions, not when Python reads commands -from a script, and not when :file:`/dev/tty` is given as the explicit source of -commands (which otherwise behaves like an interactive session). It is executed -in the same namespace where interactive commands are executed, so that objects -that it defines or imports can be used without qualification in the interactive -session. You can also change the prompts ``sys.ps1`` and ``sys.ps2`` in this -file. - -If you want to read an additional start-up file from the current directory, you -can program this in the global start-up file using code like ``if -os.path.isfile('.pythonrc.py'): execfile('.pythonrc.py')``. If you want to use -the startup file in a script, you must do this explicitly in the script:: - - import os - filename = os.environ.get('PYTHONSTARTUP') - if filename and os.path.isfile(filename): - execfile(filename) - - -.. _tut-customize: - -The Customization Modules -------------------------- - -Python provides two hooks to let you customize it: :mod:`sitecustomize` and -:mod:`usercustomize`. To see how it works, you need first to find the location -of your user site-packages directory. Start Python and run this code: - - >>> import site - >>> site.getusersitepackages() - '/home/user/.local/lib/python2.7/site-packages' - -Now you can create a file named :file:`usercustomize.py` in that directory and -put anything you want in it. It will affect every invocation of Python, unless -it is started with the :option:`-s` option to disable the automatic import. - -:mod:`sitecustomize` works in the same way, but is typically created by an -administrator of the computer in the global site-packages directory, and is -imported before :mod:`usercustomize`. See the documentation of the :mod:`site` -module for more details. - - -.. rubric:: Footnotes - -.. [#] A problem with the GNU Readline package may prevent this. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 19:02:59 2014 From: python-checkins at python.org (vinay.sajip) Date: Thu, 18 Sep 2014 17:02:59 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDM2?= =?utf-8?q?=3A_Improved_documentation_for_getEffectiveLevel=28=29_and_getL?= =?utf-8?b?ZXZlbE5hbWUoKS4=?= Message-ID: <20140918170241.23490.66681@mail.hg.python.org> http://hg.python.org/cpython/rev/72a91df86b07 changeset: 92469:72a91df86b07 branch: 3.4 parent: 92466:1e81c7a261a7 user: Vinay Sajip date: Thu Sep 18 18:01:12 2014 +0100 summary: Issue #22436: Improved documentation for getEffectiveLevel() and getLevelName(). files: Doc/library/logging.rst | 13 +++++++++++-- 1 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -118,7 +118,9 @@ .. versionchanged:: 3.2 The *lvl* parameter now accepts a string representation of the level such as 'INFO' as an alternative to the integer constants - such as :const:`INFO`. + such as :const:`INFO`. Note, however, that levels are internally stored + as integers, and methods such as e.g. :meth:`getEffectiveLevel` and + :meth:`isEnabledFor` will return/expect to be passed integers. .. method:: Logger.isEnabledFor(lvl) @@ -134,7 +136,9 @@ Indicates the effective level for this logger. If a value other than :const:`NOTSET` has been set using :meth:`setLevel`, it is returned. Otherwise, the hierarchy is traversed towards the root until a value other than - :const:`NOTSET` is found, and that value is returned. + :const:`NOTSET` is found, and that value is returned. The value returned is + an integer, typically one of :const:`logging.DEBUG`, :const:`logging.INFO` + etc. .. method:: Logger.getChild(suffix) @@ -1049,6 +1053,11 @@ of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string 'Level %s' % lvl is returned. + .. note:: Levels are internally integers (as they need to be compared in the + logging logic). This function is used to convert between an integer level + and the level name displayed in the formatted log output by means of the + ``%(levelname)s`` format specifier (see :ref:`logrecord-attributes`). + .. versionchanged:: 3.4 In Python versions earlier than 3.4, this function could also be passed a text level, and would return the corresponding numeric value of the level. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 19:02:59 2014 From: python-checkins at python.org (vinay.sajip) Date: Thu, 18 Sep 2014 17:02:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2322436=3A_Merged_documentation_update_from_3=2E?= =?utf-8?q?4=2E?= Message-ID: <20140918170241.67284.56086@mail.hg.python.org> http://hg.python.org/cpython/rev/49dfe2630ae3 changeset: 92470:49dfe2630ae3 parent: 92467:ea401e7c55e4 parent: 92469:72a91df86b07 user: Vinay Sajip date: Thu Sep 18 18:02:29 2014 +0100 summary: Closes #22436: Merged documentation update from 3.4. files: Doc/library/logging.rst | 13 +++++++++++-- 1 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -118,7 +118,9 @@ .. versionchanged:: 3.2 The *lvl* parameter now accepts a string representation of the level such as 'INFO' as an alternative to the integer constants - such as :const:`INFO`. + such as :const:`INFO`. Note, however, that levels are internally stored + as integers, and methods such as e.g. :meth:`getEffectiveLevel` and + :meth:`isEnabledFor` will return/expect to be passed integers. .. method:: Logger.isEnabledFor(lvl) @@ -134,7 +136,9 @@ Indicates the effective level for this logger. If a value other than :const:`NOTSET` has been set using :meth:`setLevel`, it is returned. Otherwise, the hierarchy is traversed towards the root until a value other than - :const:`NOTSET` is found, and that value is returned. + :const:`NOTSET` is found, and that value is returned. The value returned is + an integer, typically one of :const:`logging.DEBUG`, :const:`logging.INFO` + etc. .. method:: Logger.getChild(suffix) @@ -1054,6 +1058,11 @@ of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string 'Level %s' % lvl is returned. + .. note:: Levels are internally integers (as they need to be compared in the + logging logic). This function is used to convert between an integer level + and the level name displayed in the formatted log output by means of the + ``%(levelname)s`` format specifier (see :ref:`logrecord-attributes`). + .. versionchanged:: 3.4 In Python versions earlier than 3.4, this function could also be passed a text level, and would return the corresponding numeric value of the level. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Sep 18 19:02:59 2014 From: python-checkins at python.org (vinay.sajip) Date: Thu, 18 Sep 2014 17:02:59 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDM2?= =?utf-8?q?=3A_Improved_documentation_for_getEffectiveLevel=28=29_and_getL?= =?utf-8?b?ZXZlbE5hbWUoKS4=?= Message-ID: <20140918170241.55992.84135@mail.hg.python.org> http://hg.python.org/cpython/rev/dd17c3eda73c changeset: 92468:dd17c3eda73c branch: 2.7 parent: 92465:2e3a81a0fa14 user: Vinay Sajip date: Thu Sep 18 17:46:58 2014 +0100 summary: Issue #22436: Improved documentation for getEffectiveLevel() and getLevelName(). files: Doc/library/logging.rst | 10 +++++++++- 1 files changed, 9 insertions(+), 1 deletions(-) diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst --- a/Doc/library/logging.rst +++ b/Doc/library/logging.rst @@ -130,7 +130,9 @@ Indicates the effective level for this logger. If a value other than :const:`NOTSET` has been set using :meth:`setLevel`, it is returned. Otherwise, the hierarchy is traversed towards the root until a value other than - :const:`NOTSET` is found, and that value is returned. + :const:`NOTSET` is found, and that value is returned. The value returned is + an integer, typically one of :const:`logging.DEBUG`, :const:`logging.INFO` + etc. .. method:: Logger.getChild(suffix) @@ -898,6 +900,12 @@ of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string "Level %s" % lvl is returned. + .. note:: Integer levels should be used when e.g. setting levels on instances + of :class:`Logger` and handlers. This function is used to convert between + an integer level and the level name displayed in the formatted log output + by means of the ``%(levelname)s`` format specifier (see + :ref:`logrecord-attributes`). + .. function:: makeLogRecord(attrdict) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 00:19:38 2014 From: python-checkins at python.org (charles-francois.natali) Date: Thu, 18 Sep 2014 22:19:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322378=3A_socket_m?= =?utf-8?q?odule=3A_add_SO=5FMARK=2E?= Message-ID: <20140918221934.100122.36460@mail.hg.python.org> http://hg.python.org/cpython/rev/ca0aa0d89273 changeset: 92471:ca0aa0d89273 user: Charles-Fran?ois Natali date: Thu Sep 18 23:18:46 2014 +0100 summary: Issue #22378: socket module: add SO_MARK. files: Modules/socketmodule.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -6248,6 +6248,9 @@ #ifdef SO_PRIORITY PyModule_AddIntMacro(m, SO_PRIORITY); #endif +#ifdef SO_MARK + PyModule_AddIntMacro(m, SO_MARK); +#endif /* Maximum number of connections for "listen" */ #ifdef SOMAXCONN -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 09:23:46 2014 From: python-checkins at python.org (senthil.kumaran) Date: Fri, 19 Sep 2014 07:23:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322366=3A_urllib?= =?utf-8?q?=2Erequest=2Eurlopen_will_accept_a_context_object_=28SSLContext?= =?utf-8?q?=29?= Message-ID: <20140919072340.54291.2899@mail.hg.python.org> http://hg.python.org/cpython/rev/c0b0dda16009 changeset: 92472:c0b0dda16009 user: Senthil Kumaran date: Fri Sep 19 15:23:30 2014 +0800 summary: Issue #22366: urllib.request.urlopen will accept a context object (SSLContext) as an argument which will then used be for HTTPS connection. Patch by Alex Gaynor. files: Doc/library/urllib.request.rst | 9 ++++++++- Lib/test/test_urllib.py | 8 ++++++++ Lib/urllib/request.py | 10 +++++++++- Misc/NEWS | 4 ++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -16,7 +16,7 @@ The :mod:`urllib.request` module defines the following functions: -.. function:: urlopen(url, data=None[, timeout], *, cafile=None, capath=None, cadefault=False) +.. function:: urlopen(url, data=None[, timeout], *, cafile=None, capath=None, cadefault=False, context=None) Open the URL *url*, which can be either a string or a :class:`Request` object. @@ -47,6 +47,10 @@ the global default timeout setting will be used). This actually only works for HTTP, HTTPS and FTP connections. + If *context* is specified, it must be a :class:`ssl.SSLContext` instance + describing the various SSL options. See + :class:`~http.client.HTTPSConnection` for more details. + The optional *cafile* and *capath* parameters specify a set of trusted CA certificates for HTTPS requests. *cafile* should point to a single file containing a bundle of CA certificates, whereas *capath* should @@ -111,6 +115,9 @@ .. versionchanged:: 3.3 *cadefault* was added. + .. versionchanged:: 3.5 + *context* was added. + .. function:: install_opener(opener) Install an :class:`OpenerDirector` instance as the default global opener. diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py --- a/Lib/test/test_urllib.py +++ b/Lib/test/test_urllib.py @@ -10,6 +10,7 @@ from unittest.mock import patch from test import support import os +import ssl import sys import tempfile from nturl2path import url2pathname, pathname2url @@ -379,6 +380,13 @@ with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() + def test_cafile_and_context(self): + context = ssl.create_default_context() + with self.assertRaises(ValueError): + urllib.request.urlopen( + "https://localhost", cafile="/nonexistent/path", context=context + ) + class urlopen_DataTests(unittest.TestCase): """Test urlopen() opening a data URL.""" diff --git a/Lib/urllib/request.py b/Lib/urllib/request.py --- a/Lib/urllib/request.py +++ b/Lib/urllib/request.py @@ -136,9 +136,14 @@ _opener = None def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - *, cafile=None, capath=None, cadefault=False): + *, cafile=None, capath=None, cadefault=False, context=None): global _opener if cafile or capath or cadefault: + if context is not None: + raise ValueError( + "You can't pass both context and any of cafile, capath, and " + "cadefault" + ) if not _have_ssl: raise ValueError('SSL support not available') context = ssl._create_stdlib_context(cert_reqs=ssl.CERT_REQUIRED, @@ -146,6 +151,9 @@ capath=capath) https_handler = HTTPSHandler(context=context, check_hostname=True) opener = build_opener(https_handler) + elif context: + https_handler = HTTPSHandler(context=context) + opener = build_opener(https_handler) elif _opener is None: _opener = opener = build_opener() else: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,10 @@ Library ------- +- Issue #22366: urllib.request.urlopen will accept a context object + (SSLContext) as an argument which will then used be for HTTPS connection. + Patch by Alex Gaynor. + - Issue #4180: The warnings registries are now reset when the filters are modified. -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Fri Sep 19 09:49:34 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 19 Sep 2014 09:49:34 +0200 Subject: [Python-checkins] Daily reference leaks (ca0aa0d89273): sum=130905 Message-ID: results for ca0aa0d89273 on branch "default" -------------------------------------------- test_collections leaked [-2, -2, 0] references, sum=-4 test_distutils leaked [37725, 37725, 37725] references, sum=113175 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, 0, -2] references, sum=0 test_site leaked [2, 0, -2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogYY4DNr', '-x'] From python-checkins at python.org Fri Sep 19 18:51:25 2014 From: python-checkins at python.org (alex.gaynor) Date: Fri, 19 Sep 2014 16:51:25 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP476=3A_Updated_based_on_pr?= =?utf-8?q?oposed_API_from_Nick=2E_Also_note_that_=2322366_has_been?= Message-ID: <20140919165121.97808.67611@mail.hg.python.org> http://hg.python.org/peps/rev/9c3249b358d0 changeset: 5554:9c3249b358d0 user: Alex Gaynor date: Fri Sep 19 09:51:17 2014 -0700 summary: PEP476: Updated based on proposed API from Nick. Also note that #22366 has been landed. files: pep-0476.txt | 15 ++++++++++----- 1 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -68,9 +68,14 @@ Failure to locate such a database would be an error, and users would need to explicitly specify a location to fix it. -This can be achieved by simply replacing the use of -``ssl._create_stdlib_context`` with ``ssl.create_default_context`` in -``http.client``. +This will be acheived by adding a new ``ssl._create_default_https_context`` +function, which is the same as ``ssl.create_default``. ``http.client`` can then +replace it's usage of ``ssl._create_stdlib_context`` with the new +``ssl._create_default_https_context``. + +Additionally ``ssl._create_stdlib_context`` is renamed +``ssl._create_unverified_context`` (an alias is kept around for backwards +compatibility reasons). Trust database -------------- @@ -138,8 +143,8 @@ Implementation ============== -* `Issue 22366 `_ adds the ``context`` - argument to ``urlib.request.urlopen``. +* **LANDED**: `Issue 22366 `_ adds the + ``context`` argument to ``urlib.request.urlopen``. * `Issue 22417 `_ implements the substance of this PEP. -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Fri Sep 19 23:27:08 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:27:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_standardize_pe?= =?utf-8?q?p_466_relnote_style?= Message-ID: <20140919212707.53951.54746@mail.hg.python.org> http://hg.python.org/cpython/rev/2ce7e7bc3e0c changeset: 92474:2ce7e7bc3e0c branch: 2.7 user: Benjamin Peterson date: Fri Sep 19 17:27:03 2014 -0400 summary: standardize pep 466 relnote style files: Doc/whatsnew/2.7.rst | 24 +++++++++++------------- 1 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2548,33 +2548,31 @@ :pep:`466` related features added in Python 2.7.7: * :func:`hmac.compare_digest` was backported from Python 3 to make a timing - attack resistant comparison operation broadly available to Python 2 - applications (backported by Alex Gaynor in :issue:`21306`) - -* upgraded to OpenSSL 1.0.1g for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21462`) - + attack resistant comparison operation available to Python 2 applications. + (Contributed by Alex Gaynor; :issue:`21306`.) + +* OpenSSL 1.0.1g was upgraded in the official Windows installers published on + python.org. (Contributed by Zachary Ware; :issue:`21462`.) :pep:`466` related features added in Python 2.7.8: * :func:`hashlib.pbkdf2_hmac` was backported from Python 3 to make a hashing algorithm suitable for secure password storage broadly available to Python - 2 applications (backported by Alex Gaynor in :issue:`21304`) - -* upgraded to OpenSSL 1.0.1h for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21671` for - CVE-2014-0224) + 2 applications. (Contributed by Alex Gaynor; :issue:`21304`.) + +* OpenSSL 1.0.1h was upgraded for the official Windows installers published on + python.org. (contributed by Zachary Ware in :issue:`21671` for CVE-2014-0224) :pep:`466` related features added in Python 2.7.9: * Most of Python 3.4's :mod:`ssl` module was backported. This means :mod:`ssl` now supports Server Name Indication, TLS1.x settings, access to the platform certificate store, the :class:`~ssl.SSLContext` class, and other - features. (Contributed by Alex Gaynor and David Reid in :issue:`21308`.) + features. (Contributed by Alex Gaynor and David Reid; :issue:`21308`.) * :func:`os.urandomn` was changed to cache a file descriptor to ``/dev/urandom`` instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex - Gaynor in :issue:`21305`.) + Gaynor; :issue:`21305`.) .. ====================================================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:27:08 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:27:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_whatsnew_for_p?= =?utf-8?q?ep_466_ssl_backport?= Message-ID: <20140919212707.7098.20299@mail.hg.python.org> http://hg.python.org/cpython/rev/5fba28d3d071 changeset: 92473:5fba28d3d071 branch: 2.7 parent: 92468:dd17c3eda73c user: Benjamin Peterson date: Fri Sep 19 17:23:21 2014 -0400 summary: whatsnew for pep 466 ssl backport files: Doc/whatsnew/2.7.rst | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2565,6 +2565,17 @@ on python.org (contributed by Zachary Ware in :issue:`21671` for CVE-2014-0224) +:pep:`466` related features added in Python 2.7.9: + +* Most of Python 3.4's :mod:`ssl` module was backported. This means :mod:`ssl` + now supports Server Name Indication, TLS1.x settings, access to the platform + certificate store, the :class:`~ssl.SSLContext` class, and other + features. (Contributed by Alex Gaynor and David Reid in :issue:`21308`.) + +* :func:`os.urandomn` was changed to cache a file descriptor to ``/dev/urandom`` + instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex + Gaynor in :issue:`21305`.) + .. ====================================================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:30:31 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:30:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_remove_extra_l?= =?utf-8?q?etter?= Message-ID: <20140919213027.17735.74124@mail.hg.python.org> http://hg.python.org/cpython/rev/39f19b4ff1c8 changeset: 92477:39f19b4ff1c8 branch: 3.4 parent: 92475:a4231aa7ff3d user: Benjamin Peterson date: Fri Sep 19 17:29:08 2014 -0400 summary: remove extra letter files: Doc/whatsnew/2.7.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2572,7 +2572,7 @@ certificate store, the :class:`~ssl.SSLContext` class, and other features. (Contributed by Alex Gaynor and David Reid in :issue:`21308`.) -* :func:`os.urandomn` was changed to cache a file descriptor to ``/dev/urandom`` +* :func:`os.urandom` was changed to cache a file descriptor to ``/dev/urandom`` instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex Gaynor in :issue:`21305`.) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:30:31 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:30:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_standardize_pe?= =?utf-8?q?p_466_relnote_style?= Message-ID: <20140919213027.79878.19714@mail.hg.python.org> http://hg.python.org/cpython/rev/8fe659ea85fe changeset: 92478:8fe659ea85fe branch: 3.4 user: Benjamin Peterson date: Fri Sep 19 17:27:03 2014 -0400 summary: standardize pep 466 relnote style files: Doc/whatsnew/2.7.rst | 24 +++++++++++------------- 1 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2548,33 +2548,31 @@ :pep:`466` related features added in Python 2.7.7: * :func:`hmac.compare_digest` was backported from Python 3 to make a timing - attack resistant comparison operation broadly available to Python 2 - applications (backported by Alex Gaynor in :issue:`21306`) - -* upgraded to OpenSSL 1.0.1g for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21462`) - + attack resistant comparison operation available to Python 2 applications. + (Contributed by Alex Gaynor; :issue:`21306`.) + +* OpenSSL 1.0.1g was upgraded in the official Windows installers published on + python.org. (Contributed by Zachary Ware; :issue:`21462`.) :pep:`466` related features added in Python 2.7.8: * :func:`hashlib.pbkdf2_hmac` was backported from Python 3 to make a hashing algorithm suitable for secure password storage broadly available to Python - 2 applications (backported by Alex Gaynor in :issue:`21304`) - -* upgraded to OpenSSL 1.0.1h for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21671` for - CVE-2014-0224) + 2 applications. (Contributed by Alex Gaynor; :issue:`21304`.) + +* OpenSSL 1.0.1h was upgraded for the official Windows installers published on + python.org. (contributed by Zachary Ware in :issue:`21671` for CVE-2014-0224) :pep:`466` related features added in Python 2.7.9: * Most of Python 3.4's :mod:`ssl` module was backported. This means :mod:`ssl` now supports Server Name Indication, TLS1.x settings, access to the platform certificate store, the :class:`~ssl.SSLContext` class, and other - features. (Contributed by Alex Gaynor and David Reid in :issue:`21308`.) + features. (Contributed by Alex Gaynor and David Reid; :issue:`21308`.) * :func:`os.urandom` was changed to cache a file descriptor to ``/dev/urandom`` instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex - Gaynor in :issue:`21305`.) + Gaynor; :issue:`21305`.) .. ====================================================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:30:31 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:30:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_remove_extra_l?= =?utf-8?q?etter?= Message-ID: <20140919213026.8863.59565@mail.hg.python.org> http://hg.python.org/cpython/rev/a33afa468c34 changeset: 92476:a33afa468c34 branch: 2.7 parent: 92474:2ce7e7bc3e0c user: Benjamin Peterson date: Fri Sep 19 17:29:08 2014 -0400 summary: remove extra letter files: Doc/whatsnew/2.7.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2570,7 +2570,7 @@ certificate store, the :class:`~ssl.SSLContext` class, and other features. (Contributed by Alex Gaynor and David Reid; :issue:`21308`.) -* :func:`os.urandomn` was changed to cache a file descriptor to ``/dev/urandom`` +* :func:`os.urandom` was changed to cache a file descriptor to ``/dev/urandom`` instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex Gaynor; :issue:`21305`.) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:30:31 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:30:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140919213027.53951.67070@mail.hg.python.org> http://hg.python.org/cpython/rev/8b58be9f98a7 changeset: 92479:8b58be9f98a7 parent: 92472:c0b0dda16009 parent: 92478:8fe659ea85fe user: Benjamin Peterson date: Fri Sep 19 17:30:21 2014 -0400 summary: merge 3.4 files: Doc/whatsnew/2.7.rst | 31 ++++++++++++++++++++----------- 1 files changed, 20 insertions(+), 11 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2548,22 +2548,31 @@ :pep:`466` related features added in Python 2.7.7: * :func:`hmac.compare_digest` was backported from Python 3 to make a timing - attack resistant comparison operation broadly available to Python 2 - applications (backported by Alex Gaynor in :issue:`21306`) - -* upgraded to OpenSSL 1.0.1g for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21462`) - + attack resistant comparison operation available to Python 2 applications. + (Contributed by Alex Gaynor; :issue:`21306`.) + +* OpenSSL 1.0.1g was upgraded in the official Windows installers published on + python.org. (Contributed by Zachary Ware; :issue:`21462`.) :pep:`466` related features added in Python 2.7.8: * :func:`hashlib.pbkdf2_hmac` was backported from Python 3 to make a hashing algorithm suitable for secure password storage broadly available to Python - 2 applications (backported by Alex Gaynor in :issue:`21304`) - -* upgraded to OpenSSL 1.0.1h for the prebuilt Windows installers published - on python.org (contributed by Zachary Ware in :issue:`21671` for - CVE-2014-0224) + 2 applications. (Contributed by Alex Gaynor; :issue:`21304`.) + +* OpenSSL 1.0.1h was upgraded for the official Windows installers published on + python.org. (contributed by Zachary Ware in :issue:`21671` for CVE-2014-0224) + +:pep:`466` related features added in Python 2.7.9: + +* Most of Python 3.4's :mod:`ssl` module was backported. This means :mod:`ssl` + now supports Server Name Indication, TLS1.x settings, access to the platform + certificate store, the :class:`~ssl.SSLContext` class, and other + features. (Contributed by Alex Gaynor and David Reid; :issue:`21308`.) + +* :func:`os.urandom` was changed to cache a file descriptor to ``/dev/urandom`` + instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex + Gaynor; :issue:`21305`.) .. ====================================================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:30:31 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:30:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_whatsnew_for_p?= =?utf-8?q?ep_466_ssl_backport?= Message-ID: <20140919213026.53755.26935@mail.hg.python.org> http://hg.python.org/cpython/rev/a4231aa7ff3d changeset: 92475:a4231aa7ff3d branch: 3.4 parent: 92469:72a91df86b07 user: Benjamin Peterson date: Fri Sep 19 17:23:21 2014 -0400 summary: whatsnew for pep 466 ssl backport files: Doc/whatsnew/2.7.rst | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst --- a/Doc/whatsnew/2.7.rst +++ b/Doc/whatsnew/2.7.rst @@ -2565,6 +2565,17 @@ on python.org (contributed by Zachary Ware in :issue:`21671` for CVE-2014-0224) +:pep:`466` related features added in Python 2.7.9: + +* Most of Python 3.4's :mod:`ssl` module was backported. This means :mod:`ssl` + now supports Server Name Indication, TLS1.x settings, access to the platform + certificate store, the :class:`~ssl.SSLContext` class, and other + features. (Contributed by Alex Gaynor and David Reid in :issue:`21308`.) + +* :func:`os.urandomn` was changed to cache a file descriptor to ``/dev/urandom`` + instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex + Gaynor in :issue:`21305`.) + .. ====================================================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Sep 19 23:31:23 2014 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 19 Sep 2014 21:31:23 +0000 Subject: [Python-checkins] =?utf-8?q?hooks=3A_http_-=3E_https?= Message-ID: <20140919213122.97808.54025@mail.hg.python.org> http://hg.python.org/hooks/rev/49ae783b189f changeset: 94:49ae783b189f user: Benjamin Peterson date: Fri Sep 19 17:31:21 2014 -0400 summary: http -> https files: mail.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/mail.py b/mail.py --- a/mail.py +++ b/mail.py @@ -24,7 +24,7 @@ import sys import traceback -BASE = 'http://hg.python.org/' +BASE = 'https://hg.python.org/' CSET_URL = BASE + '%s/rev/%s' -- Repository URL: http://hg.python.org/hooks From python-checkins at python.org Sat Sep 20 00:33:19 2014 From: python-checkins at python.org (alex.gaynor) Date: Fri, 19 Sep 2014 22:33:19 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP476=3A_Updates_based_on_fe?= =?utf-8?q?edback_from_Guido=2E?= Message-ID: <20140919223316.54339.2339@mail.hg.python.org> https://hg.python.org/peps/rev/f44d393ad12f changeset: 5555:f44d393ad12f user: Alex Gaynor date: Fri Sep 19 15:33:00 2014 -0700 summary: PEP476: Updates based on feedback from Guido. Fixed several typos, clean up language, and included an example of opting out files: pep-0476.txt | 38 +++++++++++++++++++++++++------------- 1 files changed, 25 insertions(+), 13 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -11,13 +11,13 @@ Abstract ======== -Currently when a standard library http client (the ``urllib`` and ``http`` -modules) encounters an ``https://`` URL it will wrap the network HTTP traffic -in a TLS stream, as is necessary to communicate with such a server. However, -during the TLS handshake it will not actually check that the server has an X509 -certificate is signed by a CA in any trust root, nor will it verify that the -Common Name (or Subject Alternate Name) on the presented certificate matches -the requested host. +Currently when a standard library http client (the ``urllib``, ``urllib2``, +``http``, and ``httplib`` modules) encounters an ``https://`` URL it will wrap +the network HTTP traffic in a TLS stream, as is necessary to communicate with +such a server. However, during the TLS handshake it will not actually check +that the server has an X509 certificate is signed by a CA in any trust root, +nor will it verify that the Common Name (or Subject Alternate Name) on the +presented certificate matches the requested host. The failure to do these checks means that anyone with a privileged network position is able to trivially execute a man in the middle attack against a @@ -68,10 +68,11 @@ Failure to locate such a database would be an error, and users would need to explicitly specify a location to fix it. -This will be acheived by adding a new ``ssl._create_default_https_context`` -function, which is the same as ``ssl.create_default``. ``http.client`` can then -replace it's usage of ``ssl._create_stdlib_context`` with the new -``ssl._create_default_https_context``. +This will be achieved by adding a new ``ssl._create_default_https_context`` +function, which is the same as ``ssl.create_default_context``. + +``http.client`` can then replace its usage of ``ssl._create_stdlib_context`` +with the ``ssl._create_default_https_context``. Additionally ``ssl._create_stdlib_context`` is renamed ``ssl._create_unverified_context`` (an alias is kept around for backwards @@ -116,6 +117,18 @@ Twisted's 14.0 release made this same change, and it has been met with almost no opposition. +Opting out +---------- + +For users who wish to opt out of certificate verification, they can achieve +this by providing the ``context`` argument to ``urllib.urlopen``: + + import ssl + + # This restores the same behavior as before. + context = ssl._create_unverified_context() + urllib.urlopen("https://no-valid-cert", context=context) + Other protocols =============== @@ -137,8 +150,7 @@ This PEP describes changes that will occur on both the 3.4.x, 3.5 and 2.7.X branches. For 2.7.X this will require backporting the ``context`` (``SSLContext``) argument to ``httplib``, in addition to the features already -backported in -:pep:`466`. +backported in :pep:`466`. Implementation ============== -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Sep 20 04:39:49 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:39:49 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322420=3A_Merge_with_3=2E4?= Message-ID: <20140920023948.7098.21801@mail.hg.python.org> https://hg.python.org/cpython/rev/ab050bd7e51c changeset: 92482:ab050bd7e51c parent: 92479:8b58be9f98a7 parent: 92481:42b03d5b1cbb user: Terry Jan Reedy date: Fri Sep 19 22:39:09 2014 -0400 summary: Issue #22420: Merge with 3.4 files: Lib/idlelib/PyShell.py | 11 +++++------ Lib/idlelib/configHandler.py | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py --- a/Lib/idlelib/PyShell.py +++ b/Lib/idlelib/PyShell.py @@ -21,7 +21,7 @@ try: from tkinter import * except ImportError: - print("** IDLE can't import Tkinter. " \ + print("** IDLE can't import Tkinter.\n" "Your Python may not be configured for Tk. **", file=sys.__stderr__) sys.exit(1) import tkinter.messagebox as tkMessageBox @@ -651,9 +651,9 @@ code = compile(source, filename, "exec") except (OverflowError, SyntaxError): self.tkconsole.resetoutput() - tkerr = self.tkconsole.stderr - print('*** Error in script or command!\n', file=tkerr) - print('Traceback (most recent call last):', file=tkerr) + print('*** Error in script or command!\n' + 'Traceback (most recent call last):', + file=self.tkconsole.stderr) InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt() else: @@ -1472,8 +1472,7 @@ try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: - sys.stderr.write("Error: %s\n" % str(msg)) - sys.stderr.write(usage_msg) + print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py --- a/Lib/idlelib/configHandler.py +++ b/Lib/idlelib/configHandler.py @@ -203,9 +203,9 @@ if userDir != '~': # expanduser() found user home dir if not os.path.exists(userDir): warn = ('\n Warning: os.path.expanduser("~") points to\n '+ - userDir+',\n but the path does not exist.\n') + userDir+',\n but the path does not exist.') try: - sys.stderr.write(warn) + print(warn, file=sys.stderr) except OSError: pass userDir = '~' @@ -218,8 +218,8 @@ os.mkdir(userDir) except OSError: warn = ('\n Warning: unable to create user config directory\n'+ - userDir+'\n Check path and permissions.\n Exiting!\n\n') - sys.stderr.write(warn) + userDir+'\n Check path and permissions.\n Exiting!\n') + print(warn, file=sys.stderr) raise SystemExit return userDir @@ -244,12 +244,12 @@ except ValueError: warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' invalid %r value for configuration option %r\n' - ' from section %r: %r\n' % + ' from section %r: %r' % (type, option, section, self.userCfg[configType].Get(section, option, raw=raw))) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass try: @@ -263,10 +263,10 @@ warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' problem retrieving configuration option %r\n' ' from section %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (option, section, default)) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass return default @@ -375,10 +375,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict' ' -\n problem retrieving theme element %r' '\n from theme %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (element, themeName, theme[element])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass colour=cfgParser.Get(themeName,element,default=theme[element]) @@ -635,10 +635,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys' ' -\n problem retrieving key binding for event %r' '\n from key set %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (event, keySetName, keyBindings[event])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass return keyBindings -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 04:39:49 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:39:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDIw?= =?utf-8?q?=3A_Avoid_=27write_to_None=27_crashes_by_using_print_instead=2E?= Message-ID: <20140920023948.97822.34304@mail.hg.python.org> https://hg.python.org/cpython/rev/42b03d5b1cbb changeset: 92481:42b03d5b1cbb branch: 3.4 parent: 92478:8fe659ea85fe user: Terry Jan Reedy date: Fri Sep 19 22:38:41 2014 -0400 summary: Issue #22420: Avoid 'write to None' crashes by using print instead. Change a couple of existing prints. Original patch by Serhiy Storchaka. files: Lib/idlelib/PyShell.py | 11 +++++------ Lib/idlelib/configHandler.py | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py --- a/Lib/idlelib/PyShell.py +++ b/Lib/idlelib/PyShell.py @@ -21,7 +21,7 @@ try: from tkinter import * except ImportError: - print("** IDLE can't import Tkinter. " \ + print("** IDLE can't import Tkinter.\n" "Your Python may not be configured for Tk. **", file=sys.__stderr__) sys.exit(1) import tkinter.messagebox as tkMessageBox @@ -651,9 +651,9 @@ code = compile(source, filename, "exec") except (OverflowError, SyntaxError): self.tkconsole.resetoutput() - tkerr = self.tkconsole.stderr - print('*** Error in script or command!\n', file=tkerr) - print('Traceback (most recent call last):', file=tkerr) + print('*** Error in script or command!\n' + 'Traceback (most recent call last):', + file=self.tkconsole.stderr) InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt() else: @@ -1472,8 +1472,7 @@ try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: - sys.stderr.write("Error: %s\n" % str(msg)) - sys.stderr.write(usage_msg) + print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py --- a/Lib/idlelib/configHandler.py +++ b/Lib/idlelib/configHandler.py @@ -203,9 +203,9 @@ if userDir != '~': # expanduser() found user home dir if not os.path.exists(userDir): warn = ('\n Warning: os.path.expanduser("~") points to\n '+ - userDir+',\n but the path does not exist.\n') + userDir+',\n but the path does not exist.') try: - sys.stderr.write(warn) + print(warn, file=sys.stderr) except OSError: pass userDir = '~' @@ -218,8 +218,8 @@ os.mkdir(userDir) except OSError: warn = ('\n Warning: unable to create user config directory\n'+ - userDir+'\n Check path and permissions.\n Exiting!\n\n') - sys.stderr.write(warn) + userDir+'\n Check path and permissions.\n Exiting!\n') + print(warn, file=sys.stderr) raise SystemExit return userDir @@ -244,12 +244,12 @@ except ValueError: warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' invalid %r value for configuration option %r\n' - ' from section %r: %r\n' % + ' from section %r: %r' % (type, option, section, self.userCfg[configType].Get(section, option, raw=raw))) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass try: @@ -263,10 +263,10 @@ warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' problem retrieving configuration option %r\n' ' from section %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (option, section, default)) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass return default @@ -375,10 +375,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict' ' -\n problem retrieving theme element %r' '\n from theme %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (element, themeName, theme[element])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass colour=cfgParser.Get(themeName,element,default=theme[element]) @@ -635,10 +635,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys' ' -\n problem retrieving key binding for event %r' '\n from key set %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (event, keySetName, keyBindings[event])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except OSError: pass return keyBindings -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 04:39:49 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:39:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDIw?= =?utf-8?q?=3A_Avoid_=27write_to_None=27_crashes_by_using_print_instead=2E?= Message-ID: <20140920023948.77390.25786@mail.hg.python.org> https://hg.python.org/cpython/rev/90a2a12fa34d changeset: 92480:90a2a12fa34d branch: 2.7 parent: 92476:a33afa468c34 user: Terry Jan Reedy date: Fri Sep 19 22:37:24 2014 -0400 summary: Issue #22420: Avoid 'write to None' crashes by using print instead. For 2,.7, add print_function __future__ import and convert print statements to print functions. Based on 3.x patch by Serhiy Storchaka. files: Lib/idlelib/PyShell.py | 28 ++++++++-------- Lib/idlelib/configHandler.py | 42 ++++++++++++------------ 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py --- a/Lib/idlelib/PyShell.py +++ b/Lib/idlelib/PyShell.py @@ -1,4 +1,5 @@ #! /usr/bin/env python +from __future__ import print_function import os import os.path @@ -20,8 +21,8 @@ try: from Tkinter import * except ImportError: - print>>sys.__stderr__, "** IDLE can't import Tkinter. " \ - "Your Python may not be configured for Tk. **" + print("** IDLE can't import Tkinter.\n" + "Your Python may not be configured for Tk. **", file=sys.__stderr__) sys.exit(1) import tkMessageBox @@ -587,14 +588,14 @@ console = self.tkconsole.console if how == "OK": if what is not None: - print >>console, repr(what) + print(repr(what), file=console) elif how == "EXCEPTION": if self.tkconsole.getvar("<>"): self.remote_stack_viewer() elif how == "ERROR": errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n" - print >>sys.__stderr__, errmsg, what - print >>console, errmsg, what + print(errmsg, what, file=sys.__stderr__) + print(errmsg, what, file=console) # we received a response to the currently active seq number: try: self.tkconsole.endexecuting() @@ -658,9 +659,9 @@ code = compile(source, filename, "exec") except (OverflowError, SyntaxError): self.tkconsole.resetoutput() - tkerr = self.tkconsole.stderr - print>>tkerr, '*** Error in script or command!\n' - print>>tkerr, 'Traceback (most recent call last):' + print('*** Error in script or command!\n' + 'Traceback (most recent call last):', + file=self.tkconsole.stderr) InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt() else: @@ -810,14 +811,14 @@ raise except: if use_subprocess: - print >>self.tkconsole.stderr, \ - "IDLE internal error in runcode()" + print("IDLE internal error in runcode()", + file=self.tkconsole.stderr) self.showtraceback() self.tkconsole.endexecuting() else: if self.tkconsole.canceled: self.tkconsole.canceled = False - print >>self.tkconsole.stderr, "KeyboardInterrupt" + print("KeyboardInterrupt", file=self.tkconsole.stderr) else: self.showtraceback() finally: @@ -1480,8 +1481,7 @@ try: opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:") except getopt.error as msg: - sys.stderr.write("Error: %s\n" % str(msg)) - sys.stderr.write(usage_msg) + print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr) sys.exit(2) for o, a in opts: if o == '-c': @@ -1504,7 +1504,7 @@ if os.path.isfile(script): pass else: - print "No script file: ", script + print("No script file: ", script, file=sys.stderr) sys.exit() enable_shell = True if o == '-s': diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py --- a/Lib/idlelib/configHandler.py +++ b/Lib/idlelib/configHandler.py @@ -15,8 +15,8 @@ the retrieval of config information. When a default is returned instead of a requested config value, a message is printed to stderr to aid in configuration problem notification and resolution. - """ +from __future__ import print_function import os import sys import string @@ -202,9 +202,9 @@ if userDir != '~': # expanduser() found user home dir if not os.path.exists(userDir): warn = ('\n Warning: os.path.expanduser("~") points to\n '+ - userDir+',\n but the path does not exist.\n') + userDir+',\n but the path does not exist.') try: - sys.stderr.write(warn) + print(warn, file=sys.stderr) except IOError: pass userDir = '~' @@ -217,8 +217,8 @@ os.mkdir(userDir) except (OSError, IOError): warn = ('\n Warning: unable to create user config directory\n'+ - userDir+'\n Check path and permissions.\n Exiting!\n\n') - sys.stderr.write(warn) + userDir+'\n Check path and permissions.\n Exiting!\n') + print(warn, file=sys.stderr) raise SystemExit return userDir @@ -243,12 +243,12 @@ except ValueError: warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' invalid %r value for configuration option %r\n' - ' from section %r: %r\n' % + ' from section %r: %r' % (type, option, section, self.userCfg[configType].Get(section, option, raw=raw))) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except IOError: pass try: @@ -262,10 +262,10 @@ warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n' ' problem retrieving configuration option %r\n' ' from section %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (option, section, default)) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except IOError: pass return default @@ -374,10 +374,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict' ' -\n problem retrieving theme element %r' '\n from theme %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (element, themeName, theme[element])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except IOError: pass colour=cfgParser.Get(themeName,element,default=theme[element]) @@ -634,10 +634,10 @@ warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys' ' -\n problem retrieving key binding for event %r' '\n from key set %r.\n' - ' returning default value: %r\n' % + ' returning default value: %r' % (event, keySetName, keyBindings[event])) try: - sys.stderr.write(warning) + print(warning, file=sys.stderr) except IOError: pass return keyBindings @@ -704,18 +704,18 @@ ### module test if __name__ == '__main__': def dumpCfg(cfg): - print '\n',cfg,'\n' + print('\n', cfg, '\n') for key in cfg.keys(): sections=cfg[key].sections() - print key - print sections + print(key) + print(sections) for section in sections: options=cfg[key].options(section) - print section - print options + print(section) + print(options) for option in options: - print option, '=', cfg[key].Get(section,option) + print(option, '=', cfg[key].Get(section,option)) dumpCfg(idleConf.defaultCfg) dumpCfg(idleConf.userCfg) - print idleConf.userCfg['main'].Get('Theme','name') - #print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal') + print(idleConf.userCfg['main'].Get('Theme','name')) + #print(idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 04:57:24 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:57:24 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogVXBkYXRlIElkbGUn?= =?utf-8?q?s_online_doc_url=2E?= Message-ID: <20140920025722.8863.29244@mail.hg.python.org> https://hg.python.org/cpython/rev/d41fa37dbf42 changeset: 92484:d41fa37dbf42 branch: 3.4 parent: 92481:42b03d5b1cbb user: Terry Jan Reedy date: Fri Sep 19 22:54:15 2014 -0400 summary: Update Idle's online doc url. files: Lib/idlelib/EditorWindow.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -124,7 +124,7 @@ # Safari requires real file:-URLs EditorWindow.help_url = 'file://' + EditorWindow.help_url else: - EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2] + EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2] currentTheme=idleConf.CurrentTheme() self.flist = flist root = root or flist.root -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 04:57:24 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:57:24 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E4?= Message-ID: <20140920025722.54291.24767@mail.hg.python.org> https://hg.python.org/cpython/rev/8437c613b69f changeset: 92485:8437c613b69f parent: 92482:ab050bd7e51c parent: 92484:d41fa37dbf42 user: Terry Jan Reedy date: Fri Sep 19 22:54:27 2014 -0400 summary: Merge with 3.4 files: Lib/idlelib/EditorWindow.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -124,7 +124,7 @@ # Safari requires real file:-URLs EditorWindow.help_url = 'file://' + EditorWindow.help_url else: - EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2] + EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2] currentTheme=idleConf.CurrentTheme() self.flist = flist root = root or flist.root -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 04:57:24 2014 From: python-checkins at python.org (terry.reedy) Date: Sat, 20 Sep 2014 02:57:24 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogVXBkYXRlIElkbGUn?= =?utf-8?q?s_online_doc_url=2E?= Message-ID: <20140920025722.53951.98825@mail.hg.python.org> https://hg.python.org/cpython/rev/2b8b4943fce3 changeset: 92483:2b8b4943fce3 branch: 2.7 parent: 92480:90a2a12fa34d user: Terry Jan Reedy date: Fri Sep 19 22:54:09 2014 -0400 summary: Update Idle's online doc url. files: Lib/idlelib/EditorWindow.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -153,7 +153,7 @@ # Safari requires real file:-URLs EditorWindow.help_url = 'file://' + EditorWindow.help_url else: - EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2] + EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2] currentTheme=idleConf.CurrentTheme() self.flist = flist root = root or flist.root -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 06:06:58 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 20 Sep 2014 04:06:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_null_merge?= Message-ID: <20140920040656.46795.8846@mail.hg.python.org> https://hg.python.org/cpython/rev/52b498e03df4 changeset: 92487:52b498e03df4 parent: 92485:8437c613b69f parent: 92486:569a889e3b6c user: Ned Deily date: Fri Sep 19 21:06:23 2014 -0700 summary: null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 06:06:58 2014 From: python-checkins at python.org (ned.deily) Date: Sat, 20 Sep 2014 04:06:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE1NjYx?= =?utf-8?q?=3A_Update_OS_X_installer_welcome_and_readme_files_for_3=2E4=2E?= =?utf-8?q?2=2E?= Message-ID: <20140920040656.30699.90707@mail.hg.python.org> https://hg.python.org/cpython/rev/569a889e3b6c changeset: 92486:569a889e3b6c branch: 3.4 parent: 92484:d41fa37dbf42 user: Ned Deily date: Fri Sep 19 21:03:45 2014 -0700 summary: Issue #15661: Update OS X installer welcome and readme files for 3.4.2. python.org OS X installers are now distributed as signed installer packages compatible with the Gatekeeper security feature. files: Mac/BuildScript/README.txt | 17 +- Mac/BuildScript/resources/ReadMe.txt | 106 +++---------- Mac/BuildScript/resources/Welcome.rtf | 8 +- Mac/BuildScript/resources/readme.rtf | 103 +++++++++++++ Misc/NEWS | 3 + 5 files changed, 150 insertions(+), 87 deletions(-) diff --git a/Mac/BuildScript/README.txt b/Mac/BuildScript/README.txt --- a/Mac/BuildScript/README.txt +++ b/Mac/BuildScript/README.txt @@ -1,14 +1,27 @@ Building a Python Mac OS X distribution ======================================= -The ``build-install.py`` script creates Python distributions, including +The ``build-installer.py`` script creates Python distributions, including certain third-party libraries as necessary. It builds a complete framework-based Python out-of-tree, installs it in a funny place with $DESTROOT, massages that installation to remove .pyc files and such, creates an Installer package from the installation plus other files in ``resources`` and ``scripts`` and placed that on a ``.dmg`` disk image. -For Python 3.4.0, PSF practice is to build two installer variants +This installers built by this script are legacy bundle installers that have +been supported from the early days of OS X. In particular, they are supported +on OS X 10.3.9, the earliest supported release for builds from this script. + +Beginning with Python 3.4.2, PSF practice is to build two installer variants +using the newer flat package format, supported on 10.5+, and signed with the +builder's Apple developer key, allowing downloaded packages to satisfy Apple's +default Gatekeeper policy (e.g. starting with 10.8, Apple store downloads and +Apple developer ID signed apps and installer packages). The process for +transforming the output build artifacts into signed flat packages is not +yet integrated into ``build-installer.py``. The steps prior to the flat +package creation are the same as for 3.4.1 below. + +For Python 3.4.0 and 3.4.1, PSF practice was to build two installer variants for each release. 1. 32-bit-only, i386 and PPC universal, capable on running on all machines diff --git a/Mac/BuildScript/resources/ReadMe.txt b/Mac/BuildScript/resources/ReadMe.txt --- a/Mac/BuildScript/resources/ReadMe.txt +++ b/Mac/BuildScript/resources/ReadMe.txt @@ -1,92 +1,36 @@ -This package will install Python $FULL_VERSION for Mac OS X -$MACOSX_DEPLOYMENT_TARGET for the following architecture(s): -$ARCHITECTURES. +This package will install Python $FULL_VERSION for Mac OS X $MACOSX_DEPLOYMENT_TARGET for the following architecture(s): $ARCHITECTURES. - **** IMPORTANT **** +============================= +Update your version of Tcl/Tk to use IDLE or other Tk applications +============================= +To use IDLE or other programs that use the Tkinter graphical user interface toolkit, you need to install a newer third-party version of the Tcl/Tk frameworks. Visit https://www.python.org/download/mac/tcltk/ for current information about supported and recommended versions of Tcl/Tk for this version of Python and of Mac OS X. + +============================= Installing on OS X 10.8 (Mountain Lion) or later systems -======================================================== +[CHANGED for Python 3.4.2] +============================= -If you are attempting to install on an OS X 10.8+ system, you may -see a message that Python can't be installed because it is from an -unidentified developer. This is because this Python installer -package is not yet compatible with the Gatekeeper security feature -introduced in OS X 10.8. To allow Python to be installed, you -can override the Gatekeeper policy for this install. In the Finder, -instead of double-clicking, control-click or right click the "Python" -installer package icon. Then select "Open using ... Installer" from -the contextual menu that appears. +As of Python 3.4.2, installer packages from python.org are now compatible with the Gatekeeper security feature introduced in OS X 10.8. Downloaded packages can now be directly installed by double-clicking with the default system security settings. Python.org installer packages for OS X are signed with the Developer ID of the builder, as identified on the download page for this release (https://www.python.org/downloads/). To inspect the digital signature of the package, click on the lock icon in the upper right corner of the Install Python installer window. Refer to Apple?s support pages for more information on Gatekeeper (http://support.apple.com/kb/ht5290). - **** IMPORTANT **** +============================= +Simplified web-based installs +[NEW for Python 3.4.2] +============================= -Update your version of Tcl/Tk to use IDLE or other Tk applications -================================================================== +With the change to the newer flat format installer package, the download file now has a .pkg extension as it is no longer necessary to embed the installer within a disk image (.dmg) container. If you download the Python installer through a web browser, the OS X installer application may open automatically to allow you to perform the install. If your browser settings do not allow automatic open, double click on the downloaded installer file. -To use IDLE or other programs that use the Tkinter graphical user -interface toolkit, you may need to install a newer third-party version -of the Tcl/Tk frameworks. Visit http://www.python.org/download/mac/tcltk/ -for current information about supported and recommended versions of -Tcl/Tk for this version of Python and of Mac OS X. +============================= +New Installation Options and Defaults +[NEW for Python 3.4.0] +============================= - **NEW* As of Python 3.4.0b1: +The Python installer now includes an option to automatically install or upgrade pip, a tool for installing and managing Python packages. This option is enabled by default and no Internet access is required. If you do not want the installer to do this, select the Customize option at the Installation Type step and uncheck the Install or ugprade pip option. -New Installation Options and Defaults -===================================== +To make it easier to use scripts installed by third-party Python packages, with pip or by other means, the Shell profile updater option is now enabled by default, as has been the case with Python 2.7.x installers. You can also turn this option off by selecting Customize and unchecking the Shell profile updater option. You can also update your shell profile later by launching the Update Shell Profile command found in the /Applications/Python $VERSION folder. You may need to start a new terminal window for the changes to take effect. -The Python installer now includes an option to automatically install -or upgrade pip, a tool for installing and managing Python packages. -This option is enabled by default and no Internet access is required. -If you do not want the installer to do this, select the "Customize" -option at the "Installation Type" step and uncheck the "Install or -ugprade pip" option. +============================= +Python 3 and Python 2 Co-existence +============================= -To make it easier to use scripts installed by third-party Python -packages, with pip or by other means, the "Shell profile updater" -option is now enabled by default, as has been the case with Python -2.7.x installers. You can also turn this option off by selecting -"Customize" and unchecking the "Shell profile updater" option. You -can also update your shell profile later by launching the "Update -Shell Profile" command found in the /Applications/Python $VERSION -folder. You may need to start a new terminal window for the -changes to take effect. - -Python.org Python $VERSION and 2.7.x versions can both be installed and -will not conflict. Command names for Python 3 contain a 3 in them, -python3 (or python$VERSION), idle3 (or idle$VERSION), pip3 (or pip$VERSION), etc. -Python 2.7 command names contain a 2 or no digit: python2 (or -python2.7 or python), idle2 (or idle2.7 or idle), etc. If you want to -use pip with Python 2.7.x, you will need to download and install a -separate copy of it from the Python Package Index -(https://pypi.python.org/pypi). - -Using this version of Python on OS X -==================================== - -Python consists of the Python programming language interpreter, plus -a set of programs to allow easy access to it for Mac users including -an integrated development environment, IDLE, plus a set of pre-built -extension modules that open up specific Macintosh technologies to -Python programs. - -The installer puts applications, an "Update Shell Profile" command, -and a link to the optionally installed Python Documentation into the -"Python $VERSION" subfolder of the system Applications folder, -and puts the underlying machinery into the folder -$PYTHONFRAMEWORKINSTALLDIR. It can -optionally place links to the command-line tools in /usr/local/bin as -well. Double-click on the "Update Shell Profile" command to add the -"bin" directory inside the framework to your shell's search path. - -You must install onto your current boot disk, even though the -installer may not enforce this, otherwise things will not work. - -You can verify the integrity of the disk image file containing the -installer package and this ReadMe file by comparing its md5 checksum -and size with the values published on the release page linked at -http://www.python.org/download/ - -Installation requires approximately $INSTALL_SIZE MB of disk space, -ignore the message that it will take zero bytes. - -More information on Python in general can be found at -http://www.python.org. +Python.org Python $VERSION and 2.7.x versions can both be installed on your system and will not conflict. Command names for Python 3 contain a 3 in them, python3 (or python$VERSION), idle3 (or idle$VERSION), pip3 (or pip$VERSION), etc. Python 2.7 command names contain a 2 or no digit: python2 (or python2.7 or python), idle2 (or idle2.7 or idle), etc. If you want to use pip with Python 2.7.x, download and install a separate copy of it from the Python Package Index (https://pypi.python.org/pypi/pip/). diff --git a/Mac/BuildScript/resources/Welcome.rtf b/Mac/BuildScript/resources/Welcome.rtf --- a/Mac/BuildScript/resources/Welcome.rtf +++ b/Mac/BuildScript/resources/Welcome.rtf @@ -1,7 +1,7 @@ -{\rtf1\ansi\ansicpg1252\cocoartf1187\cocoasubrtf400 +{\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf210 \cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;\f1\fmodern\fcharset0 CourierNewPSMT;} {\colortbl;\red255\green255\blue255;} -\paperw11905\paperh16837\margl1440\margr1440\vieww9640\viewh10620\viewkind0 +\paperw11905\paperh16837\margl1440\margr1440\vieww12200\viewh10880\viewkind0 \pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640 \f0\fs24 \cf0 This package will install @@ -14,7 +14,7 @@ \b Python for Mac OS X \b0 consists of the Python programming language interpreter, plus a set of programs to allow easy access to it for Mac OS X users including an integrated development environment \b IDLE -\b0 and a set of pre-built extension modules that open up specific Macintosh technologies to Python programs.\ +\b0 .\ \ \b NEW for Python 3.4: @@ -36,4 +36,4 @@ \b tkinter \b0 graphical user interface toolkit require specific versions of the \b Tcl/Tk -\b0 platform independent windowing toolkit. Visit {\field{\*\fldinst{HYPERLINK "http://www.python.org/download/mac/tcltk/"}}{\fldrslt http://www.python.org/download/mac/tcltk/}} for current information on supported and recommended versions of Tcl/Tk for this version of Python and Mac OS X.} \ No newline at end of file +\b0 platform independent windowing toolkit. Visit {\field{\*\fldinst{HYPERLINK "https://www.python.org/download/mac/tcltk/"}}{\fldrslt https://www.python.org/download/mac/tcltk/}} for current information on supported and recommended versions of Tcl/Tk for this version of Python and Mac OS X.} \ No newline at end of file diff --git a/Mac/BuildScript/resources/readme.rtf b/Mac/BuildScript/resources/readme.rtf new file mode 100644 --- /dev/null +++ b/Mac/BuildScript/resources/readme.rtf @@ -0,0 +1,103 @@ +{\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf210 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;\f1\fmodern\fcharset0 CourierNewPSMT;} +{\colortbl;\red255\green255\blue255;} +\margl1440\margr1440\vieww13020\viewh15160\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural + +\f0\fs24 \cf0 This package will install Python $FULL_VERSION for Mac OS X $MACOSX_DEPLOYMENT_TARGET for the following architecture(s): $ARCHITECTURES.\ +\ + +\b \ul Update your version of Tcl/Tk to use IDLE or other Tk applications +\b0 \ulnone \ +\ +To use IDLE or other programs that use the Tkinter graphical user interface toolkit, you need to install a newer third-party version of the +\i Tcl/Tk +\i0 frameworks. Visit {\field{\*\fldinst{HYPERLINK "https://www.python.org/download/mac/tcltk/"}}{\fldrslt https://www.python.org/download/mac/tcltk/}} for current information about supported and recommended versions of +\i Tcl/Tk +\i0 for this version of Python and of Mac OS X.\ + +\b \ul \ +Installing on OS X 10.8 (Mountain Lion) or later systems\ +\ulnone [CHANGED for Python 3.4.2] +\b0 \ +\ +As of Python 3.4.2, installer packages from python.org are now compatible with the Gatekeeper security feature introduced in OS X 10.8. Downloaded packages can now be directly installed by double-clicking with the default system security settings. Python.org installer packages for OS X are signed with the Developer ID of the builder, as identified on the download page for this release ({\field{\*\fldinst{HYPERLINK "https://www.python.org/downloads/"}}{\fldrslt https://www.python.org/downloads/}}). To inspect the digital signature of the package, click on the lock icon in the upper right corner of the +\i Install Python +\i0 installer window. Refer to Apple\'92s support pages for more information on Gatekeeper ({\field{\*\fldinst{HYPERLINK "http://support.apple.com/kb/ht5290"}}{\fldrslt http://support.apple.com/kb/ht5290}}).\ +\ + +\b \ul Simplified web-based installs\ +\ulnone [NEW for Python 3.4.2] +\b0 \ +\ +With the change to the newer flat format installer package, the download file now has a +\f1 .pkg +\f0 extension as it is no longer necessary to embed the installer within a disk image ( +\f1 .dmg +\f0 ) container. If you download the Python installer through a web browser, the OS X installer application may open automatically to allow you to perform the install. If your browser settings do not allow automatic open, double click on the downloaded installer file.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural + +\b \cf0 \ul \ulc0 New Installation Options and Defaults\ +\ulnone [NEW for Python 3.4.0] +\b0 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural +\cf0 \ +The Python installer now includes an option to automatically install or upgrade +\f1 pip +\f0 , a tool for installing and managing Python packages. This option is enabled by default and no Internet access is required. If you do not want the installer to do this, select the +\i Customize +\i0 option at the +\i Installation Type +\i0 step and uncheck the +\i Install or ugprade pip +\i0 option.\ +\ +To make it easier to use scripts installed by third-party Python packages, with +\f1 pip +\f0 or by other means, the +\i Shell profile updater +\i0 option is now enabled by default, as has been the case with Python 2.7.x installers. You can also turn this option off by selecting +\i Customize +\i0 and unchecking the +\i Shell profile updater +\i0 option. You can also update your shell profile later by launching the +\i Update Shell Profile +\i0 command found in the +\f1 /Applications/Python $VERSION +\f0 folder. You may need to start a new terminal window for the changes to take effect.\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural + +\b \cf0 \ul \ulc0 Python 3 and Python 2 Co-existence\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural + +\b0 \cf0 \ulnone \ +Python.org Python $VERSION and 2.7.x versions can both be installed on your system and will not conflict. Command names for Python 3 contain a 3 in them, +\f1 python3 +\f0 (or +\f1 python$VERSION +\f0 ), +\f1 idle3 +\f0 (or i +\f1 dle$VERSION +\f0 ), +\f1 pip3 +\f0 (or +\f1 pip$VERSION +\f0 ), etc. Python 2.7 command names contain a 2 or no digit: +\f1 python2 +\f0 (or +\f1 python2.7 +\f0 or +\f1 python +\f0 ), +\f1 idle2 +\f0 (or +\f1 idle2.7 +\f0 or +\f1 idle +\f0 ), etc. If you want to use +\f1 pip +\f0 with Python 2.7.x, download and install a separate copy of it from the Python Package Index ({\field{\*\fldinst{HYPERLINK "https://pypi.python.org/pypi/pip/"}}{\fldrslt https://pypi.python.org/pypi/pip/}}).\ +} \ No newline at end of file diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -302,6 +302,9 @@ Build ----- +- Issue #15661: python.org OS X installers are now distributed as signed + installer packages compatible with the Gatekeeper security feature. + - Issue #21958: Define HAVE_ROUND when building with Visual Studio 2013 and above. Patch by Zachary Turner. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 07:53:41 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 20 Sep 2014 05:53:41 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjQ3?= =?utf-8?b?OiBBZGQgTk5UUEVycm9yIHRvIG5udHBsaWIuX19hbGxfXy4=?= Message-ID: <20140920055341.99319.35636@mail.hg.python.org> https://hg.python.org/cpython/rev/eb9eac80c17a changeset: 92488:eb9eac80c17a branch: 3.4 parent: 92486:569a889e3b6c user: Berker Peksag date: Sat Sep 20 08:53:05 2014 +0300 summary: Issue #22247: Add NNTPError to nntplib.__all__. files: Lib/nntplib.py | 4 ++-- Lib/test/test_nntplib.py | 17 ++++++++++++----- Misc/NEWS | 2 ++ 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/Lib/nntplib.py b/Lib/nntplib.py --- a/Lib/nntplib.py +++ b/Lib/nntplib.py @@ -80,8 +80,8 @@ from socket import _GLOBAL_DEFAULT_TIMEOUT __all__ = ["NNTP", - "NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError", - "NNTPProtocolError", "NNTPDataError", + "NNTPError", "NNTPReplyError", "NNTPTemporaryError", + "NNTPPermanentError", "NNTPProtocolError", "NNTPDataError", "decode_header", ] diff --git a/Lib/test/test_nntplib.py b/Lib/test/test_nntplib.py --- a/Lib/test/test_nntplib.py +++ b/Lib/test/test_nntplib.py @@ -1412,11 +1412,18 @@ def test_ssl_support(self): self.assertTrue(hasattr(nntplib, 'NNTP_SSL')) -def test_main(): - tests = [MiscTests, NNTPv1Tests, NNTPv2Tests, CapsAfterLoginNNTPv2Tests, - SendReaderNNTPv2Tests, NetworkedNNTPTests, NetworkedNNTP_SSLTests] - support.run_unittest(*tests) +class PublicAPITests(unittest.TestCase): + """Ensures that the correct values are exposed in the public API.""" + + def test_module_all_attribute(self): + self.assertTrue(hasattr(nntplib, '__all__')) + target_api = ['NNTP', 'NNTPError', 'NNTPReplyError', + 'NNTPTemporaryError', 'NNTPPermanentError', + 'NNTPProtocolError', 'NNTPDataError', 'decode_header'] + if ssl is not None: + target_api.append('NNTP_SSL') + self.assertEqual(set(nntplib.__all__), set(target_api)) if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,8 @@ Library ------- +- Issue #22247: Add NNTPError to nntplib.__all__. + - Issue #4180: The warnings registries are now reset when the filters are modified. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 07:53:42 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 20 Sep 2014 05:53:42 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322247=3A_Add_NNTPError_to_nntplib=2E=5F=5Fall?= =?utf-8?b?X18u?= Message-ID: <20140920055341.15324.51461@mail.hg.python.org> https://hg.python.org/cpython/rev/e21b0bbc06ea changeset: 92489:e21b0bbc06ea parent: 92487:52b498e03df4 parent: 92488:eb9eac80c17a user: Berker Peksag date: Sat Sep 20 08:54:32 2014 +0300 summary: Issue #22247: Add NNTPError to nntplib.__all__. files: Lib/nntplib.py | 4 ++-- Lib/test/test_nntplib.py | 17 ++++++++++++----- Misc/NEWS | 2 ++ 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/Lib/nntplib.py b/Lib/nntplib.py --- a/Lib/nntplib.py +++ b/Lib/nntplib.py @@ -80,8 +80,8 @@ from socket import _GLOBAL_DEFAULT_TIMEOUT __all__ = ["NNTP", - "NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError", - "NNTPProtocolError", "NNTPDataError", + "NNTPError", "NNTPReplyError", "NNTPTemporaryError", + "NNTPPermanentError", "NNTPProtocolError", "NNTPDataError", "decode_header", ] diff --git a/Lib/test/test_nntplib.py b/Lib/test/test_nntplib.py --- a/Lib/test/test_nntplib.py +++ b/Lib/test/test_nntplib.py @@ -1412,11 +1412,18 @@ def test_ssl_support(self): self.assertTrue(hasattr(nntplib, 'NNTP_SSL')) -def test_main(): - tests = [MiscTests, NNTPv1Tests, NNTPv2Tests, CapsAfterLoginNNTPv2Tests, - SendReaderNNTPv2Tests, NetworkedNNTPTests, NetworkedNNTP_SSLTests] - support.run_unittest(*tests) +class PublicAPITests(unittest.TestCase): + """Ensures that the correct values are exposed in the public API.""" + + def test_module_all_attribute(self): + self.assertTrue(hasattr(nntplib, '__all__')) + target_api = ['NNTP', 'NNTPError', 'NNTPReplyError', + 'NNTPTemporaryError', 'NNTPPermanentError', + 'NNTPProtocolError', 'NNTPDataError', 'decode_header'] + if ssl is not None: + target_api.append('NNTP_SSL') + self.assertEqual(set(nntplib.__all__), set(target_api)) if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,8 @@ Library ------- +- Issue #22247: Add NNTPError to nntplib.__all__. + - Issue #22366: urllib.request.urlopen will accept a context object (SSLContext) as an argument which will then used be for HTTPS connection. Patch by Alex Gaynor. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sat Sep 20 09:33:07 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 20 Sep 2014 09:33:07 +0200 Subject: [Python-checkins] Daily reference leaks (8b58be9f98a7): sum=130911 Message-ID: results for 8b58be9f98a7 on branch "default" -------------------------------------------- test_collections leaked [4, 0, 0] references, sum=4 test_collections leaked [2, 0, 0] memory blocks, sum=2 test_distutils leaked [37725, 37725, 37725] references, sum=113175 test_distutils leaked [5909, 5911, 5911] memory blocks, sum=17731 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 0, -2] references, sum=-2 test_site leaked [0, 0, -2] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogVnF35u', '-x'] From python-checkins at python.org Sat Sep 20 17:41:54 2014 From: python-checkins at python.org (alex.gaynor) Date: Sat, 20 Sep 2014 15:41:54 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP476=3A_Explicitly_mention_?= =?utf-8?q?=28and_discourage=29_the_ability_to_monkeypatch=2C_and?= Message-ID: <20140920154150.17403.10751@mail.hg.python.org> https://hg.python.org/peps/rev/d37d916f3a34 changeset: 5556:d37d916f3a34 user: Alex Gaynor date: Sat Sep 20 08:41:47 2014 -0700 summary: PEP476: Explicitly mention (and discourage) the ability to monkeypatch, and note the python versions files: pep-0476.txt | 10 +++++++++- 1 files changed, 9 insertions(+), 1 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -26,7 +26,8 @@ This PEP proposes to enable verification of X509 certificate signatures, as well as hostname verification for Python's HTTP clients by default, subject to -opt-out on a per-call basis. +opt-out on a per-call basis. This change would be applied to Python 2.7, Python +3.4, and Python 3.5. Rationale ========= @@ -129,6 +130,13 @@ context = ssl._create_unverified_context() urllib.urlopen("https://no-valid-cert", context=context) +It is also possibly **though highly discouraged** to globally disable +verification by monkeypatching the ``ssl`` module:: + + import ssl + + ssl._create_default_https_context = ssl._create_unverified_context + Other protocols =============== -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Sep 20 17:53:32 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 20 Sep 2014 15:53:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_use_patch_cont?= =?utf-8?q?ext_manager_instead_of_decorator_because_the_decorator_=27leaks?= =?utf-8?q?=27?= Message-ID: <20140920155331.13856.40102@mail.hg.python.org> https://hg.python.org/cpython/rev/bf6163635eda changeset: 92490:bf6163635eda branch: 3.4 parent: 92488:eb9eac80c17a user: Benjamin Peterson date: Sat Sep 20 11:53:12 2014 -0400 summary: use patch context manager instead of decorator because the decorator 'leaks' metadata onto the function files: Lib/distutils/tests/test_dir_util.py | 6 +++--- Lib/distutils/tests/test_file_util.py | 15 +++++++-------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -122,12 +122,12 @@ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') - @patch('os.listdir', side_effect=OSError()) - def test_copy_tree_exception_in_listdir(self, listdir): + def test_copy_tree_exception_in_listdir(self): """ An exception in listdir should raise a DistutilsFileError """ - with self.assertRaises(errors.DistutilsFileError): + with patch("os.listdir", side_effect=OSError()), \ + self.assertRaises(errors.DistutilsFileError): src = self.tempdirs[-1] dir_util.copy_tree(src, None) diff --git a/Lib/distutils/tests/test_file_util.py b/Lib/distutils/tests/test_file_util.py --- a/Lib/distutils/tests/test_file_util.py +++ b/Lib/distutils/tests/test_file_util.py @@ -61,24 +61,23 @@ wanted = ['moving %s -> %s' % (self.source, self.target_dir)] self.assertEqual(self._logs, wanted) - @patch('os.rename', side_effect=OSError('wrong', 1)) - def test_move_file_exception_unpacking_rename(self, _): + def test_move_file_exception_unpacking_rename(self): # see issue 22182 - with self.assertRaises(DistutilsFileError): + with patch("os.rename", side_effect=OSError("wrong", 1)), \ + self.assertRaises(DistutilsFileError): with open(self.source, 'w') as fobj: fobj.write('spam eggs') move_file(self.source, self.target, verbose=0) - @patch('os.rename', side_effect=OSError(errno.EXDEV, 'wrong')) - @patch('os.unlink', side_effect=OSError('wrong', 1)) - def test_move_file_exception_unpacking_unlink(self, rename, unlink): + def test_move_file_exception_unpacking_unlink(self): # see issue 22182 - with self.assertRaises(DistutilsFileError): + with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \ + patch("os.unlink", side_effect=OSError("wrong", 1)), \ + self.assertRaises(DistutilsFileError): with open(self.source, 'w') as fobj: fobj.write('spam eggs') move_file(self.source, self.target, verbose=0) - def test_suite(): return unittest.makeSuite(FileUtilTestCase) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 17:53:32 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 20 Sep 2014 15:53:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140920155331.83944.26570@mail.hg.python.org> https://hg.python.org/cpython/rev/9e5389c92577 changeset: 92491:9e5389c92577 parent: 92489:e21b0bbc06ea parent: 92490:bf6163635eda user: Benjamin Peterson date: Sat Sep 20 11:53:27 2014 -0400 summary: merge 3.4 files: Lib/distutils/tests/test_dir_util.py | 6 +++--- Lib/distutils/tests/test_file_util.py | 15 +++++++-------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py --- a/Lib/distutils/tests/test_dir_util.py +++ b/Lib/distutils/tests/test_dir_util.py @@ -122,12 +122,12 @@ self.assertEqual(ensure_relative('c:\\home\\foo'), 'c:home\\foo') self.assertEqual(ensure_relative('home\\foo'), 'home\\foo') - @patch('os.listdir', side_effect=OSError()) - def test_copy_tree_exception_in_listdir(self, listdir): + def test_copy_tree_exception_in_listdir(self): """ An exception in listdir should raise a DistutilsFileError """ - with self.assertRaises(errors.DistutilsFileError): + with patch("os.listdir", side_effect=OSError()), \ + self.assertRaises(errors.DistutilsFileError): src = self.tempdirs[-1] dir_util.copy_tree(src, None) diff --git a/Lib/distutils/tests/test_file_util.py b/Lib/distutils/tests/test_file_util.py --- a/Lib/distutils/tests/test_file_util.py +++ b/Lib/distutils/tests/test_file_util.py @@ -61,24 +61,23 @@ wanted = ['moving %s -> %s' % (self.source, self.target_dir)] self.assertEqual(self._logs, wanted) - @patch('os.rename', side_effect=OSError('wrong', 1)) - def test_move_file_exception_unpacking_rename(self, _): + def test_move_file_exception_unpacking_rename(self): # see issue 22182 - with self.assertRaises(DistutilsFileError): + with patch("os.rename", side_effect=OSError("wrong", 1)), \ + self.assertRaises(DistutilsFileError): with open(self.source, 'w') as fobj: fobj.write('spam eggs') move_file(self.source, self.target, verbose=0) - @patch('os.rename', side_effect=OSError(errno.EXDEV, 'wrong')) - @patch('os.unlink', side_effect=OSError('wrong', 1)) - def test_move_file_exception_unpacking_unlink(self, rename, unlink): + def test_move_file_exception_unpacking_unlink(self): # see issue 22182 - with self.assertRaises(DistutilsFileError): + with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \ + patch("os.unlink", side_effect=OSError("wrong", 1)), \ + self.assertRaises(DistutilsFileError): with open(self.source, 'w') as fobj: fobj.write('spam eggs') move_file(self.source, self.target, verbose=0) - def test_suite(): return unittest.makeSuite(FileUtilTestCase) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 18:19:23 2014 From: python-checkins at python.org (alex.gaynor) Date: Sat, 20 Sep 2014 16:19:23 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP476=3A_Corrected_a_typo?= Message-ID: <20140920161921.102559.23809@mail.hg.python.org> https://hg.python.org/peps/rev/b2372f9b761f changeset: 5557:b2372f9b761f user: Alex Gaynor date: Sat Sep 20 09:19:19 2014 -0700 summary: PEP476: Corrected a typo files: pep-0476.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -130,7 +130,7 @@ context = ssl._create_unverified_context() urllib.urlopen("https://no-valid-cert", context=context) -It is also possibly **though highly discouraged** to globally disable +It is also possible **though highly discouraged** to globally disable verification by monkeypatching the ``ssl`` module:: import ssl -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Sep 20 19:14:03 2014 From: python-checkins at python.org (georg.brandl) Date: Sat, 20 Sep 2014 17:14:03 +0000 Subject: [Python-checkins] =?utf-8?q?release=3A_Adapt_to_new_Mac_installer?= =?utf-8?q?_file_names=2E?= Message-ID: <20140920171403.80455.73461@mail.hg.python.org> https://hg.python.org/release/rev/19e6f90396fa changeset: 87:19e6f90396fa user: Georg Brandl date: Sat Sep 20 19:13:59 2014 +0200 summary: Adapt to new Mac installer file names. files: add-to-pydotorg.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/add-to-pydotorg.py b/add-to-pydotorg.py --- a/add-to-pydotorg.py +++ b/add-to-pydotorg.py @@ -57,9 +57,9 @@ (rx(r'\.chm$'), ('Windows help file', 1, '')), (rx(r'amd64-pdb\.zip$'), ('Windows debug information files for 64-bit binaries', 1, '')), (rx(r'-pdb\.zip$'), ('Windows debug information files', 1, '')), - (rx(r'-macosx10\.5(_rev\d)?\.dmg$'), ('Mac OS X 32-bit i386/PPC installer', 2, + (rx(r'-macosx10\.5(_rev\d)?\.(dm|pk)g$'), ('Mac OS X 32-bit i386/PPC installer', 2, 'for Mac OS X 10.5 and later')), - (rx(r'-macosx10\.6(_rev\d)?\.dmg$'), ('Mac OS X 64-bit/32-bit installer', 2, + (rx(r'-macosx10\.6(_rev\d)?\.(dm|pk)g$'), ('Mac OS X 64-bit/32-bit installer', 2, 'for Mac OS X 10.6 and later')), ] @@ -113,6 +113,7 @@ filesize = filesize_for(release, rfile), download_button = 'tar.xz' in rfile or 'macosx10.6.dmg' in rfile or + 'macosx10.6.pkg' in rfile or ('.msi' in rfile and not 'amd64' in rfile), ) -- Repository URL: https://hg.python.org/release From python-checkins at python.org Sat Sep 20 23:49:08 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 20 Sep 2014 21:49:08 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIxMDc5OiBpc19h?= =?utf-8?q?ttachment_now_looks_only_at_the_value=2C_ignoring_parameters=2E?= Message-ID: <20140920214906.10316.21414@mail.hg.python.org> https://hg.python.org/cpython/rev/0044ed0af96f changeset: 92492:0044ed0af96f branch: 3.4 parent: 92490:bf6163635eda user: R David Murray date: Sat Sep 20 17:44:53 2014 -0400 summary: #21079: is_attachment now looks only at the value, ignoring parameters. files: Lib/email/message.py | 4 +--- Lib/test/test_email/test_message.py | 3 ++- Misc/NEWS | 3 +++ 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Lib/email/message.py b/Lib/email/message.py --- a/Lib/email/message.py +++ b/Lib/email/message.py @@ -941,9 +941,7 @@ @property def is_attachment(self): c_d = self.get('content-disposition') - if c_d is None: - return False - return c_d.lower() == 'attachment' + return False if c_d is None else c_d.content_disposition == 'attachment' def _find_body(self, part, preferencelist): if part.is_attachment: diff --git a/Lib/test/test_email/test_message.py b/Lib/test/test_email/test_message.py --- a/Lib/test/test_email/test_message.py +++ b/Lib/test/test_email/test_message.py @@ -729,7 +729,8 @@ self.assertTrue(m.is_attachment) m.replace_header('Content-Disposition', 'AtTachMent') self.assertTrue(m.is_attachment) - + m.set_param('filename', 'abc.png', 'Content-Disposition') + self.assertTrue(m.is_attachment) class TestEmailMessage(TestEmailMessageBase, TestEmailBase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #21079: Fix email.message.EmailMessage.is_attachment to return the + correct result when the header has parameters as well as a value. + - Issue #22247: Add NNTPError to nntplib.__all__. - Issue #4180: The warnings registries are now reset when the filters -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 20 23:50:01 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 20 Sep 2014 21:50:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2321079=3A_is=5Fattachment_now_looks_only_at_t?= =?utf-8?q?he_value=2C_ignoring_parameters=2E?= Message-ID: <20140920215000.31466.74059@mail.hg.python.org> https://hg.python.org/cpython/rev/54392c4a8880 changeset: 92493:54392c4a8880 parent: 92491:9e5389c92577 parent: 92492:0044ed0af96f user: R David Murray date: Sat Sep 20 17:49:48 2014 -0400 summary: Merge: #21079: is_attachment now looks only at the value, ignoring parameters. files: Lib/email/message.py | 4 +--- Lib/test/test_email/test_message.py | 3 ++- Misc/NEWS | 3 +++ 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Lib/email/message.py b/Lib/email/message.py --- a/Lib/email/message.py +++ b/Lib/email/message.py @@ -941,9 +941,7 @@ @property def is_attachment(self): c_d = self.get('content-disposition') - if c_d is None: - return False - return c_d.lower() == 'attachment' + return False if c_d is None else c_d.content_disposition == 'attachment' def _find_body(self, part, preferencelist): if part.is_attachment: diff --git a/Lib/test/test_email/test_message.py b/Lib/test/test_email/test_message.py --- a/Lib/test/test_email/test_message.py +++ b/Lib/test/test_email/test_message.py @@ -729,7 +729,8 @@ self.assertTrue(m.is_attachment) m.replace_header('Content-Disposition', 'AtTachMent') self.assertTrue(m.is_attachment) - + m.set_param('filename', 'abc.png', 'Content-Disposition') + self.assertTrue(m.is_attachment) class TestEmailMessage(TestEmailMessageBase, TestEmailBase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #21079: Fix email.message.EmailMessage.is_attachment to return the + correct result when the header has parameters as well as a value. + - Issue #22247: Add NNTPError to nntplib.__all__. - Issue #22366: urllib.request.urlopen will accept a context object -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:17:15 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 20 Sep 2014 22:17:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2321091=3A_make_is=5Fattachment_a_method=2E?= Message-ID: <20140920221715.58653.42059@mail.hg.python.org> https://hg.python.org/cpython/rev/f7aff40609e7 changeset: 92495:f7aff40609e7 parent: 92493:54392c4a8880 parent: 92494:a3df1c24d586 user: R David Murray date: Sat Sep 20 18:16:39 2014 -0400 summary: Merge: #21091: make is_attachment a method. files: Doc/library/email.contentmanager.rst | 8 ++++++-- Lib/email/message.py | 6 +++--- Lib/test/test_email/test_message.py | 10 +++++----- Misc/NEWS | 3 +++ 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -70,11 +70,15 @@ the following methods: - .. attribute:: is_attachment + .. method:: is_attachment - Set to ``True`` if there is a :mailheader:`Content-Disposition` header + Return ``True`` if there is a :mailheader:`Content-Disposition` header and its (case insensitive) value is ``attachment``, ``False`` otherwise. + .. versionchanged:: 3.4.2 + is_attachment is now a method instead of a property, for consistency + with :meth:`~email.message.Message.is_multipart`. + .. method:: get_body(preferencelist=('related', 'html', 'plain')) diff --git a/Lib/email/message.py b/Lib/email/message.py --- a/Lib/email/message.py +++ b/Lib/email/message.py @@ -9,6 +9,7 @@ import re import uu import quopri +import warnings from io import BytesIO, StringIO # Intrapackage imports @@ -938,13 +939,12 @@ policy = default Message.__init__(self, policy) - @property def is_attachment(self): c_d = self.get('content-disposition') return False if c_d is None else c_d.content_disposition == 'attachment' def _find_body(self, part, preferencelist): - if part.is_attachment: + if part.is_attachment(): return maintype, subtype = part.get_content_type().split('/') if maintype == 'text': @@ -1037,7 +1037,7 @@ for part in parts: maintype, subtype = part.get_content_type().split('/') if ((maintype, subtype) in self._body_types and - not part.is_attachment and subtype not in seen): + not part.is_attachment() and subtype not in seen): seen.append(subtype) continue yield part diff --git a/Lib/test/test_email/test_message.py b/Lib/test/test_email/test_message.py --- a/Lib/test/test_email/test_message.py +++ b/Lib/test/test_email/test_message.py @@ -722,15 +722,15 @@ def test_is_attachment(self): m = self._make_message() - self.assertFalse(m.is_attachment) + self.assertFalse(m.is_attachment()) m['Content-Disposition'] = 'inline' - self.assertFalse(m.is_attachment) + self.assertFalse(m.is_attachment()) m.replace_header('Content-Disposition', 'attachment') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) m.replace_header('Content-Disposition', 'AtTachMent') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) m.set_param('filename', 'abc.png', 'Content-Disposition') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) class TestEmailMessage(TestEmailMessageBase, TestEmailBase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now + a method. + - Issue #21079: Fix email.message.EmailMessage.is_attachment to return the correct result when the header has parameters as well as a value. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:17:15 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 20 Sep 2014 22:17:15 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIxMDkxOiBtYWtl?= =?utf-8?q?_is=5Fattachment_a_method=2E?= Message-ID: <20140920221714.74533.11759@mail.hg.python.org> https://hg.python.org/cpython/rev/a3df1c24d586 changeset: 92494:a3df1c24d586 branch: 3.4 parent: 92492:0044ed0af96f user: R David Murray date: Sat Sep 20 18:05:28 2014 -0400 summary: #21091: make is_attachment a method. Since EmailMessage is a provisional API we can fix API bugs in a maintenance release, but I used a trick suggested by Serhiy to maintain backward compatibility with 3.4.0/1. files: Doc/library/email.contentmanager.rst | 8 ++++- Lib/email/message.py | 20 +++++++++++++-- Lib/test/test_email/test_message.py | 20 ++++++++++++---- Misc/NEWS | 5 ++++ 4 files changed, 43 insertions(+), 10 deletions(-) diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -70,11 +70,15 @@ the following methods: - .. attribute:: is_attachment + .. method:: is_attachment - Set to ``True`` if there is a :mailheader:`Content-Disposition` header + Return ``True`` if there is a :mailheader:`Content-Disposition` header and its (case insensitive) value is ``attachment``, ``False`` otherwise. + .. versionchanged:: 3.4.2 + is_attachment is now a method instead of a property, for consistency + with :meth:`~email.message.Message.is_multipart`. + .. method:: get_body(preferencelist=('related', 'html', 'plain')) diff --git a/Lib/email/message.py b/Lib/email/message.py --- a/Lib/email/message.py +++ b/Lib/email/message.py @@ -9,6 +9,7 @@ import re import uu import quopri +import warnings from io import BytesIO, StringIO # Intrapackage imports @@ -929,6 +930,17 @@ # I.e. def walk(self): ... from email.iterators import walk +# XXX Support for temporary deprecation hack for is_attachment property. +class _IsAttachment: + def __init__(self, value): + self.value = value + def __call__(self): + return self.value + def __bool__(self): + warnings.warn("is_attachment will be a method, not a property, in 3.5", + DeprecationWarning, + stacklevel=3) + return self.value class MIMEPart(Message): @@ -941,10 +953,12 @@ @property def is_attachment(self): c_d = self.get('content-disposition') - return False if c_d is None else c_d.content_disposition == 'attachment' + result = False if c_d is None else c_d.content_disposition == 'attachment' + # XXX transitional hack to raise deprecation if not called. + return _IsAttachment(result) def _find_body(self, part, preferencelist): - if part.is_attachment: + if part.is_attachment(): return maintype, subtype = part.get_content_type().split('/') if maintype == 'text': @@ -1037,7 +1051,7 @@ for part in parts: maintype, subtype = part.get_content_type().split('/') if ((maintype, subtype) in self._body_types and - not part.is_attachment and subtype not in seen): + not part.is_attachment() and subtype not in seen): seen.append(subtype) continue yield part diff --git a/Lib/test/test_email/test_message.py b/Lib/test/test_email/test_message.py --- a/Lib/test/test_email/test_message.py +++ b/Lib/test/test_email/test_message.py @@ -722,15 +722,25 @@ def test_is_attachment(self): m = self._make_message() - self.assertFalse(m.is_attachment) + self.assertFalse(m.is_attachment()) + with self.assertWarns(DeprecationWarning): + self.assertFalse(m.is_attachment) m['Content-Disposition'] = 'inline' - self.assertFalse(m.is_attachment) + self.assertFalse(m.is_attachment()) + with self.assertWarns(DeprecationWarning): + self.assertFalse(m.is_attachment) m.replace_header('Content-Disposition', 'attachment') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) + with self.assertWarns(DeprecationWarning): + self.assertTrue(m.is_attachment) m.replace_header('Content-Disposition', 'AtTachMent') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) + with self.assertWarns(DeprecationWarning): + self.assertTrue(m.is_attachment) m.set_param('filename', 'abc.png', 'Content-Disposition') - self.assertTrue(m.is_attachment) + self.assertTrue(m.is_attachment()) + with self.assertWarns(DeprecationWarning): + self.assertTrue(m.is_attachment) class TestEmailMessage(TestEmailMessageBase, TestEmailBase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,11 @@ Library ------- +- Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now + a method. Since EmailMessage is provisional, we can change the API in a + maintenance release, but we use a trick to remain backward compatible with + 3.4.0/1. + - Issue #21079: Fix email.message.EmailMessage.is_attachment to return the correct result when the header has parameters as well as a value. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:24:14 2014 From: python-checkins at python.org (antoine.pitrou) Date: Sat, 20 Sep 2014 22:24:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322359=3A_Remove_i?= =?utf-8?q?ncorrect_uses_of_recursive_make=2E__Patch_by_Jonas_Wagner=2E?= Message-ID: <20140920222413.87478.18846@mail.hg.python.org> https://hg.python.org/cpython/rev/c2a53aa27cad changeset: 92496:c2a53aa27cad user: Antoine Pitrou date: Sun Sep 21 00:21:58 2014 +0200 summary: Issue #22359: Remove incorrect uses of recursive make. Patch by Jonas Wagner. files: Makefile.pre.in | 13 ++++++------- Misc/NEWS | 3 +++ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -686,11 +686,12 @@ ############################################################################ # Importlib +Programs/_freeze_importlib.o: Programs/_freeze_importlib.c + Programs/_freeze_importlib: Programs/_freeze_importlib.o $(LIBRARY_OBJS_OMIT_FROZEN) $(LINKCC) $(PY_LDFLAGS) -o $@ Programs/_freeze_importlib.o $(LIBRARY_OBJS_OMIT_FROZEN) $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST) -Python/importlib.h: $(srcdir)/Lib/importlib/_bootstrap.py Programs/_freeze_importlib.c - $(MAKE) Programs/_freeze_importlib +Python/importlib.h: $(srcdir)/Lib/importlib/_bootstrap.py Programs/_freeze_importlib ./Programs/_freeze_importlib \ $(srcdir)/Lib/importlib/_bootstrap.py Python/importlib.h @@ -752,15 +753,13 @@ $(IO_OBJS): $(IO_H) -$(GRAMMAR_H): $(GRAMMAR_INPUT) $(PGENSRCS) +$(GRAMMAR_H): $(GRAMMAR_INPUT) $(PGEN) @$(MKDIR_P) Include - $(MAKE) $(PGEN) $(PGEN) $(GRAMMAR_INPUT) $(GRAMMAR_H) $(GRAMMAR_C) -$(GRAMMAR_C): $(GRAMMAR_H) $(GRAMMAR_INPUT) $(PGENSRCS) - $(MAKE) $(GRAMMAR_H) +$(GRAMMAR_C): $(GRAMMAR_H) touch $(GRAMMAR_C) -$(PGEN): $(PGENOBJS) +$(PGEN): $(PGENOBJS) $(CC) $(OPT) $(PY_LDFLAGS) $(PGENOBJS) $(LIBS) -o $(PGEN) Parser/grammar.o: $(srcdir)/Parser/grammar.c \ diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -936,6 +936,9 @@ Build ----- +- Issue #22359: Remove incorrect uses of recursive make. Patch by Jonas + Wagner. + - Issue #21958: Define HAVE_ROUND when building with Visual Studio 2013 and above. Patch by Zachary Turner. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:38:20 2014 From: python-checkins at python.org (georg.brandl) Date: Sat, 20 Sep 2014 22:38:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogRG9jOiByZW1vdmUg?= =?utf-8?q?invalid_uses_of_=22=3Aoption=3A=22_which_will_emit_warnings_in_?= =?utf-8?q?Sphinx_1=2E3=2E?= Message-ID: <20140920223820.105633.25423@mail.hg.python.org> https://hg.python.org/cpython/rev/7af0315bdfe0 changeset: 92497:7af0315bdfe0 branch: 3.4 parent: 92494:a3df1c24d586 user: Georg Brandl date: Sun Sep 21 00:35:08 2014 +0200 summary: Doc: remove invalid uses of ":option:" which will emit warnings in Sphinx 1.3. files: Doc/distutils/builtdist.rst | 40 +++++++++++----------- Doc/distutils/configfile.rst | 4 +- Doc/distutils/examples.rst | 16 ++++---- Doc/distutils/extending.rst | 4 +- Doc/distutils/setupscript.rst | 38 ++++++++++---------- Doc/distutils/sourcedist.rst | 12 +++--- Doc/whatsnew/2.1.rst | 2 +- 7 files changed, 58 insertions(+), 58 deletions(-) diff --git a/Doc/distutils/builtdist.rst b/Doc/distutils/builtdist.rst --- a/Doc/distutils/builtdist.rst +++ b/Doc/distutils/builtdist.rst @@ -186,21 +186,21 @@ +------------------------------------------+----------------------------------------------+ | RPM :file:`.spec` file option or section | Distutils setup script option | +==========================================+==============================================+ -| Name | :option:`name` | +| Name | ``name`` | +------------------------------------------+----------------------------------------------+ -| Summary (in preamble) | :option:`description` | +| Summary (in preamble) | ``description`` | +------------------------------------------+----------------------------------------------+ -| Version | :option:`version` | +| Version | ``version`` | +------------------------------------------+----------------------------------------------+ -| Vendor | :option:`author` and :option:`author_email`, | -| | or --- & :option:`maintainer` and | -| | :option:`maintainer_email` | +| Vendor | ``author`` and ``author_email``, | +| | or --- & ``maintainer`` and | +| | ``maintainer_email`` | +------------------------------------------+----------------------------------------------+ -| Copyright | :option:`license` | +| Copyright | ``license`` | +------------------------------------------+----------------------------------------------+ -| Url | :option:`url` | +| Url | ``url`` | +------------------------------------------+----------------------------------------------+ -| %description (section) | :option:`long_description` | +| %description (section) | ``long_description`` | +------------------------------------------+----------------------------------------------+ Additionally, there are many options in :file:`.spec` files that don't have @@ -211,27 +211,27 @@ | RPM :file:`.spec` file option | :command:`bdist_rpm` option | default value | | or section | | | +===============================+=============================+=========================+ -| Release | :option:`release` | "1" | +| Release | ``release`` | "1" | +-------------------------------+-----------------------------+-------------------------+ -| Group | :option:`group` | "Development/Libraries" | +| Group | ``group`` | "Development/Libraries" | +-------------------------------+-----------------------------+-------------------------+ -| Vendor | :option:`vendor` | (see above) | +| Vendor | ``vendor`` | (see above) | +-------------------------------+-----------------------------+-------------------------+ -| Packager | :option:`packager` | (none) | +| Packager | ``packager`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Provides | :option:`provides` | (none) | +| Provides | ``provides`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Requires | :option:`requires` | (none) | +| Requires | ``requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Conflicts | :option:`conflicts` | (none) | +| Conflicts | ``conflicts`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Obsoletes | :option:`obsoletes` | (none) | +| Obsoletes | ``obsoletes`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Distribution | :option:`distribution_name` | (none) | +| Distribution | ``distribution_name`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| BuildRequires | :option:`build_requires` | (none) | +| BuildRequires | ``build_requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Icon | :option:`icon` | (none) | +| Icon | ``icon`` | (none) | +-------------------------------+-----------------------------+-------------------------+ Obviously, supplying even a few of these options on the command-line would be diff --git a/Doc/distutils/configfile.rst b/Doc/distutils/configfile.rst --- a/Doc/distutils/configfile.rst +++ b/Doc/distutils/configfile.rst @@ -67,7 +67,7 @@ [...] Note that an option spelled :option:`--foo-bar` on the command-line is spelled -:option:`foo_bar` in configuration files. +``foo_bar`` in configuration files. .. _distutils-build-ext-inplace: @@ -114,7 +114,7 @@ doc/ examples/ -Note that the :option:`doc_files` option is simply a whitespace-separated string +Note that the ``doc_files`` option is simply a whitespace-separated string split across multiple lines for readability. diff --git a/Doc/distutils/examples.rst b/Doc/distutils/examples.rst --- a/Doc/distutils/examples.rst +++ b/Doc/distutils/examples.rst @@ -22,7 +22,7 @@ If you're just distributing a couple of modules, especially if they don't live in a particular package, you can specify them individually using the -:option:`py_modules` option in the setup script. +``py_modules`` option in the setup script. In the simplest case, you'll have two files to worry about: a setup script and the single module you're distributing, :file:`foo.py` in this example:: @@ -41,12 +41,12 @@ ) Note that the name of the distribution is specified independently with the -:option:`name` option, and there's no rule that says it has to be the same as +``name`` option, and there's no rule that says it has to be the same as the name of the sole module in the distribution (although that's probably a good convention to follow). However, the distribution name is used to generate filenames, so you should stick to letters, digits, underscores, and hyphens. -Since :option:`py_modules` is a list, you can of course specify multiple +Since ``py_modules`` is a list, you can of course specify multiple modules, eg. if you're distributing modules :mod:`foo` and :mod:`bar`, your setup might look like this:: @@ -130,7 +130,7 @@ ) If you want to put modules in directories not named for their package, then you -need to use the :option:`package_dir` option again. For example, if the +need to use the ``package_dir`` option again. For example, if the :file:`src` directory holds modules in the :mod:`foobar` package:: / @@ -169,8 +169,8 @@ (The empty string also stands for the current directory.) -If you have sub-packages, they must be explicitly listed in :option:`packages`, -but any entries in :option:`package_dir` automatically extend to sub-packages. +If you have sub-packages, they must be explicitly listed in ``packages``, +but any entries in ``package_dir`` automatically extend to sub-packages. (In other words, the Distutils does *not* scan your source tree, trying to figure out which directories correspond to Python packages by looking for :file:`__init__.py` files.) Thus, if the default layout grows a sub-package:: @@ -199,8 +199,8 @@ Single extension module ======================= -Extension modules are specified using the :option:`ext_modules` option. -:option:`package_dir` has no effect on where extension source files are found; +Extension modules are specified using the ``ext_modules`` option. +``package_dir`` has no effect on where extension source files are found; it only affects the source for pure Python modules. The simplest case, a single extension module in a single C source file, is:: diff --git a/Doc/distutils/extending.rst b/Doc/distutils/extending.rst --- a/Doc/distutils/extending.rst +++ b/Doc/distutils/extending.rst @@ -61,7 +61,7 @@ requiring modifications to the Python installation. This is expected to allow third-party extensions to provide support for additional packaging systems, but the commands can be used for anything distutils commands can be used for. A new -configuration option, :option:`command_packages` (command-line option +configuration option, ``command_packages`` (command-line option :option:`--command-packages`), can be used to specify additional packages to be searched for modules implementing commands. Like all distutils options, this can be specified on the command line or in a configuration file. This option @@ -75,7 +75,7 @@ packages searched for command implementations; multiple package names should be separated by commas. When not specified, the search is only performed in the :mod:`distutils.command` package. When :file:`setup.py` is run with the option -:option:`--command-packages` :option:`distcmds,buildcmds`, however, the packages +``--command-packages distcmds,buildcmds``, however, the packages :mod:`distutils.command`, :mod:`distcmds`, and :mod:`buildcmds` will be searched in that order. New commands are expected to be implemented in modules of the same name as the command by classes sharing the same name. Given the example diff --git a/Doc/distutils/setupscript.rst b/Doc/distutils/setupscript.rst --- a/Doc/distutils/setupscript.rst +++ b/Doc/distutils/setupscript.rst @@ -62,9 +62,9 @@ Listing whole packages ====================== -The :option:`packages` option tells the Distutils to process (build, distribute, +The ``packages`` option tells the Distutils to process (build, distribute, install, etc.) all pure Python modules found in each package mentioned in the -:option:`packages` list. In order to do this, of course, there has to be a +``packages`` list. In order to do this, of course, there has to be a correspondence between package names and directories in the filesystem. The default correspondence is the most obvious one, i.e. package :mod:`distutils` is found in the directory :file:`distutils` relative to the distribution root. @@ -75,7 +75,7 @@ Distutils will issue a warning but still process the broken package anyway. If you use a different convention to lay out your source directory, that's no -problem: you just have to supply the :option:`package_dir` option to tell the +problem: you just have to supply the ``package_dir`` option to tell the Distutils about your convention. For example, say you keep all Python source under :file:`lib`, so that modules in the "root package" (i.e., not in any package at all) are in :file:`lib`, modules in the :mod:`foo` package are in @@ -94,13 +94,13 @@ package_dir = {'foo': 'lib'} -A ``package: dir`` entry in the :option:`package_dir` dictionary implicitly +A ``package: dir`` entry in the ``package_dir`` dictionary implicitly applies to all packages below *package*, so the :mod:`foo.bar` case is automatically handled here. In this example, having ``packages = ['foo', 'foo.bar']`` tells the Distutils to look for :file:`lib/__init__.py` and -:file:`lib/bar/__init__.py`. (Keep in mind that although :option:`package_dir` +:file:`lib/bar/__init__.py`. (Keep in mind that although ``package_dir`` applies recursively, you must explicitly list all packages in -:option:`packages`: the Distutils will *not* recursively scan your source tree +``packages``: the Distutils will *not* recursively scan your source tree looking for any directory with an :file:`__init__.py` file.) @@ -120,7 +120,7 @@ :mod:`pkg` package. Again, the default package/directory layout implies that these two modules can be found in :file:`mod1.py` and :file:`pkg/mod2.py`, and that :file:`pkg/__init__.py` exists as well. And again, you can override the -package/directory correspondence using the :option:`package_dir` option. +package/directory correspondence using the ``package_dir`` option. .. _describing-extensions: @@ -138,7 +138,7 @@ .. XXX read over this section All of this is done through another keyword argument to :func:`setup`, the -:option:`ext_modules` option. :option:`ext_modules` is just a list of +``ext_modules`` option. ``ext_modules`` is just a list of :class:`~distutils.core.Extension` instances, each of which describes a single extension module. Suppose your distribution includes a single extension, called :mod:`foo` and @@ -181,7 +181,7 @@ resulting extension lives. If you have a number of extensions all in the same package (or all under the -same base package), use the :option:`ext_package` keyword argument to +same base package), use the ``ext_package`` keyword argument to :func:`setup`. For example, :: setup(..., @@ -336,24 +336,24 @@ There are still some other options which can be used to handle special cases. -The :option:`optional` option is a boolean; if it is true, +The ``optional`` option is a boolean; if it is true, a build failure in the extension will not abort the build process, but instead simply not install the failing extension. -The :option:`extra_objects` option is a list of object files to be passed to the +The ``extra_objects`` option is a list of object files to be passed to the linker. These files must not have extensions, as the default extension for the compiler is used. -:option:`extra_compile_args` and :option:`extra_link_args` can be used to +``extra_compile_args`` and ``extra_link_args`` can be used to specify additional command line options for the respective compiler and linker command lines. -:option:`export_symbols` is only useful on Windows. It can contain a list of +``export_symbols`` is only useful on Windows. It can contain a list of symbols (functions or variables) to be exported. This option is not needed when building compiled extensions: Distutils will automatically add ``initmodule`` to the list of exported symbols. -The :option:`depends` option is a list of files that the extension depends on +The ``depends`` option is a list of files that the extension depends on (for example header files). The build command will call the compiler on the sources to rebuild extension if any on this files has been modified since the previous build. @@ -449,7 +449,7 @@ the current interpreter location. The :option:`--executable` (or :option:`-e`) option will allow the interpreter path to be explicitly overridden. -The :option:`scripts` option simply is a list of files to be handled in this +The ``scripts`` option simply is a list of files to be handled in this way. From the PyXML setup script:: setup(..., @@ -514,11 +514,11 @@ Installing Additional Files =========================== -The :option:`data_files` option can be used to specify additional files needed +The ``data_files`` option can be used to specify additional files needed by the module distribution: configuration files, message catalogs, data files, anything which doesn't fit in the previous categories. -:option:`data_files` specifies a sequence of (*directory*, *files*) pairs in the +``data_files`` specifies a sequence of (*directory*, *files*) pairs in the following way:: setup(..., @@ -539,7 +539,7 @@ directory information from *files* is used to determine the final location of the installed file; only the name of the file is used. -You can specify the :option:`data_files` options as a simple sequence of files +You can specify the ``data_files`` options as a simple sequence of files without specifying a target directory, but this is not recommended, and the :command:`install` command will print a warning in this case. To install data files directly in the target directory, an empty string should be given as the @@ -650,7 +650,7 @@ 1.0.1a2 the second alpha release of the first patch version of 1.0 -:option:`classifiers` are specified in a Python list:: +``classifiers`` are specified in a Python list:: setup(..., classifiers=[ diff --git a/Doc/distutils/sourcedist.rst b/Doc/distutils/sourcedist.rst --- a/Doc/distutils/sourcedist.rst +++ b/Doc/distutils/sourcedist.rst @@ -72,16 +72,16 @@ generate one), the :command:`sdist` command puts a minimal default set into the source distribution: -* all Python source files implied by the :option:`py_modules` and - :option:`packages` options +* all Python source files implied by the ``py_modules`` and + ``packages`` options -* all C source files mentioned in the :option:`ext_modules` or - :option:`libraries` options +* all C source files mentioned in the ``ext_modules`` or + ``libraries`` options .. XXX getting C library sources currently broken---no :meth:`get_source_files` method in :file:`build_clib.py`! -* scripts identified by the :option:`scripts` option +* scripts identified by the ``scripts`` option See :ref:`distutils-installing-scripts`. * anything that looks like a test script: :file:`test/test\*.py` (currently, the @@ -167,7 +167,7 @@ #. include all Python source files in the :file:`distutils` and :file:`distutils/command` subdirectories (because packages corresponding to - those two directories were mentioned in the :option:`packages` option in the + those two directories were mentioned in the ``packages`` option in the setup script---see section :ref:`setup-script`) #. include :file:`README.txt`, :file:`setup.py`, and :file:`setup.cfg` (standard diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst --- a/Doc/whatsnew/2.1.rst +++ b/Doc/whatsnew/2.1.rst @@ -555,7 +555,7 @@ and experiment with them. With the result experience, perhaps it'll be possible to design a really good catalog and then build support for it into Python 2.2. For example, the Distutils :command:`sdist` and :command:`bdist_\*` commands -could support a :option:`upload` option that would automatically upload your +could support a ``upload`` option that would automatically upload your package to a catalog server. You can start creating packages containing :file:`PKG-INFO` even if you're not -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:38:21 2014 From: python-checkins at python.org (georg.brandl) Date: Sat, 20 Sep 2014 22:38:21 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_with_3=2E4?= Message-ID: <20140920223820.58653.64193@mail.hg.python.org> https://hg.python.org/cpython/rev/25c52f89ce26 changeset: 92498:25c52f89ce26 parent: 92496:c2a53aa27cad parent: 92497:7af0315bdfe0 user: Georg Brandl date: Sun Sep 21 00:38:13 2014 +0200 summary: merge with 3.4 files: Doc/distutils/builtdist.rst | 40 +++++++++++----------- Doc/distutils/configfile.rst | 4 +- Doc/distutils/examples.rst | 16 ++++---- Doc/distutils/extending.rst | 4 +- Doc/distutils/setupscript.rst | 38 ++++++++++---------- Doc/distutils/sourcedist.rst | 12 +++--- Doc/library/json.rst | 2 +- Doc/whatsnew/2.1.rst | 2 +- 8 files changed, 59 insertions(+), 59 deletions(-) diff --git a/Doc/distutils/builtdist.rst b/Doc/distutils/builtdist.rst --- a/Doc/distutils/builtdist.rst +++ b/Doc/distutils/builtdist.rst @@ -186,21 +186,21 @@ +------------------------------------------+----------------------------------------------+ | RPM :file:`.spec` file option or section | Distutils setup script option | +==========================================+==============================================+ -| Name | :option:`name` | +| Name | ``name`` | +------------------------------------------+----------------------------------------------+ -| Summary (in preamble) | :option:`description` | +| Summary (in preamble) | ``description`` | +------------------------------------------+----------------------------------------------+ -| Version | :option:`version` | +| Version | ``version`` | +------------------------------------------+----------------------------------------------+ -| Vendor | :option:`author` and :option:`author_email`, | -| | or --- & :option:`maintainer` and | -| | :option:`maintainer_email` | +| Vendor | ``author`` and ``author_email``, | +| | or --- & ``maintainer`` and | +| | ``maintainer_email`` | +------------------------------------------+----------------------------------------------+ -| Copyright | :option:`license` | +| Copyright | ``license`` | +------------------------------------------+----------------------------------------------+ -| Url | :option:`url` | +| Url | ``url`` | +------------------------------------------+----------------------------------------------+ -| %description (section) | :option:`long_description` | +| %description (section) | ``long_description`` | +------------------------------------------+----------------------------------------------+ Additionally, there are many options in :file:`.spec` files that don't have @@ -211,27 +211,27 @@ | RPM :file:`.spec` file option | :command:`bdist_rpm` option | default value | | or section | | | +===============================+=============================+=========================+ -| Release | :option:`release` | "1" | +| Release | ``release`` | "1" | +-------------------------------+-----------------------------+-------------------------+ -| Group | :option:`group` | "Development/Libraries" | +| Group | ``group`` | "Development/Libraries" | +-------------------------------+-----------------------------+-------------------------+ -| Vendor | :option:`vendor` | (see above) | +| Vendor | ``vendor`` | (see above) | +-------------------------------+-----------------------------+-------------------------+ -| Packager | :option:`packager` | (none) | +| Packager | ``packager`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Provides | :option:`provides` | (none) | +| Provides | ``provides`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Requires | :option:`requires` | (none) | +| Requires | ``requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Conflicts | :option:`conflicts` | (none) | +| Conflicts | ``conflicts`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Obsoletes | :option:`obsoletes` | (none) | +| Obsoletes | ``obsoletes`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Distribution | :option:`distribution_name` | (none) | +| Distribution | ``distribution_name`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| BuildRequires | :option:`build_requires` | (none) | +| BuildRequires | ``build_requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Icon | :option:`icon` | (none) | +| Icon | ``icon`` | (none) | +-------------------------------+-----------------------------+-------------------------+ Obviously, supplying even a few of these options on the command-line would be diff --git a/Doc/distutils/configfile.rst b/Doc/distutils/configfile.rst --- a/Doc/distutils/configfile.rst +++ b/Doc/distutils/configfile.rst @@ -67,7 +67,7 @@ [...] Note that an option spelled :option:`--foo-bar` on the command-line is spelled -:option:`foo_bar` in configuration files. +``foo_bar`` in configuration files. .. _distutils-build-ext-inplace: @@ -114,7 +114,7 @@ doc/ examples/ -Note that the :option:`doc_files` option is simply a whitespace-separated string +Note that the ``doc_files`` option is simply a whitespace-separated string split across multiple lines for readability. diff --git a/Doc/distutils/examples.rst b/Doc/distutils/examples.rst --- a/Doc/distutils/examples.rst +++ b/Doc/distutils/examples.rst @@ -22,7 +22,7 @@ If you're just distributing a couple of modules, especially if they don't live in a particular package, you can specify them individually using the -:option:`py_modules` option in the setup script. +``py_modules`` option in the setup script. In the simplest case, you'll have two files to worry about: a setup script and the single module you're distributing, :file:`foo.py` in this example:: @@ -41,12 +41,12 @@ ) Note that the name of the distribution is specified independently with the -:option:`name` option, and there's no rule that says it has to be the same as +``name`` option, and there's no rule that says it has to be the same as the name of the sole module in the distribution (although that's probably a good convention to follow). However, the distribution name is used to generate filenames, so you should stick to letters, digits, underscores, and hyphens. -Since :option:`py_modules` is a list, you can of course specify multiple +Since ``py_modules`` is a list, you can of course specify multiple modules, eg. if you're distributing modules :mod:`foo` and :mod:`bar`, your setup might look like this:: @@ -130,7 +130,7 @@ ) If you want to put modules in directories not named for their package, then you -need to use the :option:`package_dir` option again. For example, if the +need to use the ``package_dir`` option again. For example, if the :file:`src` directory holds modules in the :mod:`foobar` package:: / @@ -169,8 +169,8 @@ (The empty string also stands for the current directory.) -If you have sub-packages, they must be explicitly listed in :option:`packages`, -but any entries in :option:`package_dir` automatically extend to sub-packages. +If you have sub-packages, they must be explicitly listed in ``packages``, +but any entries in ``package_dir`` automatically extend to sub-packages. (In other words, the Distutils does *not* scan your source tree, trying to figure out which directories correspond to Python packages by looking for :file:`__init__.py` files.) Thus, if the default layout grows a sub-package:: @@ -199,8 +199,8 @@ Single extension module ======================= -Extension modules are specified using the :option:`ext_modules` option. -:option:`package_dir` has no effect on where extension source files are found; +Extension modules are specified using the ``ext_modules`` option. +``package_dir`` has no effect on where extension source files are found; it only affects the source for pure Python modules. The simplest case, a single extension module in a single C source file, is:: diff --git a/Doc/distutils/extending.rst b/Doc/distutils/extending.rst --- a/Doc/distutils/extending.rst +++ b/Doc/distutils/extending.rst @@ -61,7 +61,7 @@ requiring modifications to the Python installation. This is expected to allow third-party extensions to provide support for additional packaging systems, but the commands can be used for anything distutils commands can be used for. A new -configuration option, :option:`command_packages` (command-line option +configuration option, ``command_packages`` (command-line option :option:`--command-packages`), can be used to specify additional packages to be searched for modules implementing commands. Like all distutils options, this can be specified on the command line or in a configuration file. This option @@ -75,7 +75,7 @@ packages searched for command implementations; multiple package names should be separated by commas. When not specified, the search is only performed in the :mod:`distutils.command` package. When :file:`setup.py` is run with the option -:option:`--command-packages` :option:`distcmds,buildcmds`, however, the packages +``--command-packages distcmds,buildcmds``, however, the packages :mod:`distutils.command`, :mod:`distcmds`, and :mod:`buildcmds` will be searched in that order. New commands are expected to be implemented in modules of the same name as the command by classes sharing the same name. Given the example diff --git a/Doc/distutils/setupscript.rst b/Doc/distutils/setupscript.rst --- a/Doc/distutils/setupscript.rst +++ b/Doc/distutils/setupscript.rst @@ -62,9 +62,9 @@ Listing whole packages ====================== -The :option:`packages` option tells the Distutils to process (build, distribute, +The ``packages`` option tells the Distutils to process (build, distribute, install, etc.) all pure Python modules found in each package mentioned in the -:option:`packages` list. In order to do this, of course, there has to be a +``packages`` list. In order to do this, of course, there has to be a correspondence between package names and directories in the filesystem. The default correspondence is the most obvious one, i.e. package :mod:`distutils` is found in the directory :file:`distutils` relative to the distribution root. @@ -75,7 +75,7 @@ Distutils will issue a warning but still process the broken package anyway. If you use a different convention to lay out your source directory, that's no -problem: you just have to supply the :option:`package_dir` option to tell the +problem: you just have to supply the ``package_dir`` option to tell the Distutils about your convention. For example, say you keep all Python source under :file:`lib`, so that modules in the "root package" (i.e., not in any package at all) are in :file:`lib`, modules in the :mod:`foo` package are in @@ -94,13 +94,13 @@ package_dir = {'foo': 'lib'} -A ``package: dir`` entry in the :option:`package_dir` dictionary implicitly +A ``package: dir`` entry in the ``package_dir`` dictionary implicitly applies to all packages below *package*, so the :mod:`foo.bar` case is automatically handled here. In this example, having ``packages = ['foo', 'foo.bar']`` tells the Distutils to look for :file:`lib/__init__.py` and -:file:`lib/bar/__init__.py`. (Keep in mind that although :option:`package_dir` +:file:`lib/bar/__init__.py`. (Keep in mind that although ``package_dir`` applies recursively, you must explicitly list all packages in -:option:`packages`: the Distutils will *not* recursively scan your source tree +``packages``: the Distutils will *not* recursively scan your source tree looking for any directory with an :file:`__init__.py` file.) @@ -120,7 +120,7 @@ :mod:`pkg` package. Again, the default package/directory layout implies that these two modules can be found in :file:`mod1.py` and :file:`pkg/mod2.py`, and that :file:`pkg/__init__.py` exists as well. And again, you can override the -package/directory correspondence using the :option:`package_dir` option. +package/directory correspondence using the ``package_dir`` option. .. _describing-extensions: @@ -138,7 +138,7 @@ .. XXX read over this section All of this is done through another keyword argument to :func:`setup`, the -:option:`ext_modules` option. :option:`ext_modules` is just a list of +``ext_modules`` option. ``ext_modules`` is just a list of :class:`~distutils.core.Extension` instances, each of which describes a single extension module. Suppose your distribution includes a single extension, called :mod:`foo` and @@ -181,7 +181,7 @@ resulting extension lives. If you have a number of extensions all in the same package (or all under the -same base package), use the :option:`ext_package` keyword argument to +same base package), use the ``ext_package`` keyword argument to :func:`setup`. For example, :: setup(..., @@ -336,24 +336,24 @@ There are still some other options which can be used to handle special cases. -The :option:`optional` option is a boolean; if it is true, +The ``optional`` option is a boolean; if it is true, a build failure in the extension will not abort the build process, but instead simply not install the failing extension. -The :option:`extra_objects` option is a list of object files to be passed to the +The ``extra_objects`` option is a list of object files to be passed to the linker. These files must not have extensions, as the default extension for the compiler is used. -:option:`extra_compile_args` and :option:`extra_link_args` can be used to +``extra_compile_args`` and ``extra_link_args`` can be used to specify additional command line options for the respective compiler and linker command lines. -:option:`export_symbols` is only useful on Windows. It can contain a list of +``export_symbols`` is only useful on Windows. It can contain a list of symbols (functions or variables) to be exported. This option is not needed when building compiled extensions: Distutils will automatically add ``initmodule`` to the list of exported symbols. -The :option:`depends` option is a list of files that the extension depends on +The ``depends`` option is a list of files that the extension depends on (for example header files). The build command will call the compiler on the sources to rebuild extension if any on this files has been modified since the previous build. @@ -449,7 +449,7 @@ the current interpreter location. The :option:`--executable` (or :option:`-e`) option will allow the interpreter path to be explicitly overridden. -The :option:`scripts` option simply is a list of files to be handled in this +The ``scripts`` option simply is a list of files to be handled in this way. From the PyXML setup script:: setup(..., @@ -514,11 +514,11 @@ Installing Additional Files =========================== -The :option:`data_files` option can be used to specify additional files needed +The ``data_files`` option can be used to specify additional files needed by the module distribution: configuration files, message catalogs, data files, anything which doesn't fit in the previous categories. -:option:`data_files` specifies a sequence of (*directory*, *files*) pairs in the +``data_files`` specifies a sequence of (*directory*, *files*) pairs in the following way:: setup(..., @@ -539,7 +539,7 @@ directory information from *files* is used to determine the final location of the installed file; only the name of the file is used. -You can specify the :option:`data_files` options as a simple sequence of files +You can specify the ``data_files`` options as a simple sequence of files without specifying a target directory, but this is not recommended, and the :command:`install` command will print a warning in this case. To install data files directly in the target directory, an empty string should be given as the @@ -650,7 +650,7 @@ 1.0.1a2 the second alpha release of the first patch version of 1.0 -:option:`classifiers` are specified in a Python list:: +``classifiers`` are specified in a Python list:: setup(..., classifiers=[ diff --git a/Doc/distutils/sourcedist.rst b/Doc/distutils/sourcedist.rst --- a/Doc/distutils/sourcedist.rst +++ b/Doc/distutils/sourcedist.rst @@ -72,16 +72,16 @@ generate one), the :command:`sdist` command puts a minimal default set into the source distribution: -* all Python source files implied by the :option:`py_modules` and - :option:`packages` options +* all Python source files implied by the ``py_modules`` and + ``packages`` options -* all C source files mentioned in the :option:`ext_modules` or - :option:`libraries` options +* all C source files mentioned in the ``ext_modules`` or + ``libraries`` options .. XXX getting C library sources currently broken---no :meth:`get_source_files` method in :file:`build_clib.py`! -* scripts identified by the :option:`scripts` option +* scripts identified by the ``scripts`` option See :ref:`distutils-installing-scripts`. * anything that looks like a test script: :file:`test/test\*.py` (currently, the @@ -167,7 +167,7 @@ #. include all Python source files in the :file:`distutils` and :file:`distutils/command` subdirectories (because packages corresponding to - those two directories were mentioned in the :option:`packages` option in the + those two directories were mentioned in the ``packages`` option in the setup script---see section :ref:`setup-script`) #. include :file:`README.txt`, :file:`setup.py`, and :file:`setup.cfg` (standard diff --git a/Doc/library/json.rst b/Doc/library/json.rst --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -576,7 +576,7 @@ The :mod:`json.tool` module provides a simple command line interface to validate and pretty-print JSON objects. -If the optional :option:`infile` and :option:`outfile` arguments are not +If the optional ``infile`` and ``outfile`` arguments are not specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively:: $ echo '{"json": "obj"}' | python -m json.tool diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst --- a/Doc/whatsnew/2.1.rst +++ b/Doc/whatsnew/2.1.rst @@ -555,7 +555,7 @@ and experiment with them. With the result experience, perhaps it'll be possible to design a really good catalog and then build support for it into Python 2.2. For example, the Distutils :command:`sdist` and :command:`bdist_\*` commands -could support a :option:`upload` option that would automatically upload your +could support a ``upload`` option that would automatically upload your package to a catalog server. You can start creating packages containing :file:`PKG-INFO` even if you're not -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:42:57 2014 From: python-checkins at python.org (georg.brandl) Date: Sat, 20 Sep 2014 22:42:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogRG9jOiBmaXggdXNh?= =?utf-8?q?ge_of_deprecated_config_value_=22unused=5Fdocs=22=2C_and_a_dupl?= =?utf-8?q?icate_object?= Message-ID: <20140920224256.49097.86876@mail.hg.python.org> https://hg.python.org/cpython/rev/f75f446dba92 changeset: 92500:f75f446dba92 branch: 2.7 user: Georg Brandl date: Sun Sep 21 00:42:40 2014 +0200 summary: Doc: fix usage of deprecated config value "unused_docs", and a duplicate object name. files: Doc/conf.py | 8 ++++---- Doc/library/repr.rst | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -40,10 +40,10 @@ today_fmt = '%B %d, %Y' # List of files that shouldn't be included in the build. -unused_docs = [ - 'maclib/scrap', - 'library/xmllib', - 'library/xml.etree', +exclude_patterns = [ + 'maclib/scrap.rst', + 'library/xmllib.rst', + 'library/xml.etree.rst', ] # Ignore .rst in Sphinx its self. diff --git a/Doc/library/repr.rst b/Doc/library/repr.rst --- a/Doc/library/repr.rst +++ b/Doc/library/repr.rst @@ -3,6 +3,7 @@ .. module:: repr :synopsis: Alternate repr() implementation with size limits. + :noindex: .. sectionauthor:: Fred L. Drake, Jr. .. note:: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 00:42:57 2014 From: python-checkins at python.org (georg.brandl) Date: Sat, 20 Sep 2014 22:42:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogRG9jOiByZW1vdmUg?= =?utf-8?q?invalid_uses_of_=22=3Aoption=3A=22_which_will_emit_warnings_in_?= =?utf-8?q?Sphinx_1=2E3=2E?= Message-ID: <20140920224255.31466.88356@mail.hg.python.org> https://hg.python.org/cpython/rev/c3160f275c89 changeset: 92499:c3160f275c89 branch: 2.7 parent: 92483:2b8b4943fce3 user: Georg Brandl date: Sun Sep 21 00:35:08 2014 +0200 summary: Doc: remove invalid uses of ":option:" which will emit warnings in Sphinx 1.3. files: Doc/distutils/builtdist.rst | 40 +++++++++++----------- Doc/distutils/configfile.rst | 4 +- Doc/distutils/examples.rst | 16 ++++---- Doc/distutils/extending.rst | 4 +- Doc/distutils/setupscript.rst | 36 ++++++++++---------- Doc/distutils/sourcedist.rst | 12 +++--- Doc/whatsnew/2.1.rst | 2 +- 7 files changed, 57 insertions(+), 57 deletions(-) diff --git a/Doc/distutils/builtdist.rst b/Doc/distutils/builtdist.rst --- a/Doc/distutils/builtdist.rst +++ b/Doc/distutils/builtdist.rst @@ -186,21 +186,21 @@ +------------------------------------------+----------------------------------------------+ | RPM :file:`.spec` file option or section | Distutils setup script option | +==========================================+==============================================+ -| Name | :option:`name` | +| Name | ``name`` | +------------------------------------------+----------------------------------------------+ -| Summary (in preamble) | :option:`description` | +| Summary (in preamble) | ``description`` | +------------------------------------------+----------------------------------------------+ -| Version | :option:`version` | +| Version | ``version`` | +------------------------------------------+----------------------------------------------+ -| Vendor | :option:`author` and :option:`author_email`, | -| | or --- & :option:`maintainer` and | -| | :option:`maintainer_email` | +| Vendor | ``author`` and ``author_email``, | +| | or --- & ``maintainer`` and | +| | ``maintainer_email`` | +------------------------------------------+----------------------------------------------+ -| Copyright | :option:`license` | +| Copyright | ``license`` | +------------------------------------------+----------------------------------------------+ -| Url | :option:`url` | +| Url | ``url`` | +------------------------------------------+----------------------------------------------+ -| %description (section) | :option:`long_description` | +| %description (section) | ``long_description`` | +------------------------------------------+----------------------------------------------+ Additionally, there are many options in :file:`.spec` files that don't have @@ -211,27 +211,27 @@ | RPM :file:`.spec` file option | :command:`bdist_rpm` option | default value | | or section | | | +===============================+=============================+=========================+ -| Release | :option:`release` | "1" | +| Release | ``release`` | "1" | +-------------------------------+-----------------------------+-------------------------+ -| Group | :option:`group` | "Development/Libraries" | +| Group | ``group`` | "Development/Libraries" | +-------------------------------+-----------------------------+-------------------------+ -| Vendor | :option:`vendor` | (see above) | +| Vendor | ``vendor`` | (see above) | +-------------------------------+-----------------------------+-------------------------+ -| Packager | :option:`packager` | (none) | +| Packager | ``packager`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Provides | :option:`provides` | (none) | +| Provides | ``provides`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Requires | :option:`requires` | (none) | +| Requires | ``requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Conflicts | :option:`conflicts` | (none) | +| Conflicts | ``conflicts`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Obsoletes | :option:`obsoletes` | (none) | +| Obsoletes | ``obsoletes`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Distribution | :option:`distribution_name` | (none) | +| Distribution | ``distribution_name`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| BuildRequires | :option:`build_requires` | (none) | +| BuildRequires | ``build_requires`` | (none) | +-------------------------------+-----------------------------+-------------------------+ -| Icon | :option:`icon` | (none) | +| Icon | ``icon`` | (none) | +-------------------------------+-----------------------------+-------------------------+ Obviously, supplying even a few of these options on the command-line would be diff --git a/Doc/distutils/configfile.rst b/Doc/distutils/configfile.rst --- a/Doc/distutils/configfile.rst +++ b/Doc/distutils/configfile.rst @@ -67,7 +67,7 @@ [...] Note that an option spelled :option:`--foo-bar` on the command-line is spelled -:option:`foo_bar` in configuration files. +``foo_bar`` in configuration files. .. _distutils-build-ext-inplace: @@ -114,7 +114,7 @@ doc/ examples/ -Note that the :option:`doc_files` option is simply a whitespace-separated string +Note that the ``doc_files`` option is simply a whitespace-separated string split across multiple lines for readability. diff --git a/Doc/distutils/examples.rst b/Doc/distutils/examples.rst --- a/Doc/distutils/examples.rst +++ b/Doc/distutils/examples.rst @@ -22,7 +22,7 @@ If you're just distributing a couple of modules, especially if they don't live in a particular package, you can specify them individually using the -:option:`py_modules` option in the setup script. +``py_modules`` option in the setup script. In the simplest case, you'll have two files to worry about: a setup script and the single module you're distributing, :file:`foo.py` in this example:: @@ -41,12 +41,12 @@ ) Note that the name of the distribution is specified independently with the -:option:`name` option, and there's no rule that says it has to be the same as +``name`` option, and there's no rule that says it has to be the same as the name of the sole module in the distribution (although that's probably a good convention to follow). However, the distribution name is used to generate filenames, so you should stick to letters, digits, underscores, and hyphens. -Since :option:`py_modules` is a list, you can of course specify multiple +Since ``py_modules`` is a list, you can of course specify multiple modules, eg. if you're distributing modules :mod:`foo` and :mod:`bar`, your setup might look like this:: @@ -130,7 +130,7 @@ ) If you want to put modules in directories not named for their package, then you -need to use the :option:`package_dir` option again. For example, if the +need to use the ``package_dir`` option again. For example, if the :file:`src` directory holds modules in the :mod:`foobar` package:: / @@ -169,8 +169,8 @@ (The empty string also stands for the current directory.) -If you have sub-packages, they must be explicitly listed in :option:`packages`, -but any entries in :option:`package_dir` automatically extend to sub-packages. +If you have sub-packages, they must be explicitly listed in ``packages``, +but any entries in ``package_dir`` automatically extend to sub-packages. (In other words, the Distutils does *not* scan your source tree, trying to figure out which directories correspond to Python packages by looking for :file:`__init__.py` files.) Thus, if the default layout grows a sub-package:: @@ -199,8 +199,8 @@ Single extension module ======================= -Extension modules are specified using the :option:`ext_modules` option. -:option:`package_dir` has no effect on where extension source files are found; +Extension modules are specified using the ``ext_modules`` option. +``package_dir`` has no effect on where extension source files are found; it only affects the source for pure Python modules. The simplest case, a single extension module in a single C source file, is:: diff --git a/Doc/distutils/extending.rst b/Doc/distutils/extending.rst --- a/Doc/distutils/extending.rst +++ b/Doc/distutils/extending.rst @@ -61,7 +61,7 @@ requiring modifications to the Python installation. This is expected to allow third-party extensions to provide support for additional packaging systems, but the commands can be used for anything distutils commands can be used for. A new -configuration option, :option:`command_packages` (command-line option +configuration option, ``command_packages`` (command-line option :option:`--command-packages`), can be used to specify additional packages to be searched for modules implementing commands. Like all distutils options, this can be specified on the command line or in a configuration file. This option @@ -75,7 +75,7 @@ packages searched for command implementations; multiple package names should be separated by commas. When not specified, the search is only performed in the :mod:`distutils.command` package. When :file:`setup.py` is run with the option -:option:`--command-packages` :option:`distcmds,buildcmds`, however, the packages +``--command-packages distcmds,buildcmds``, however, the packages :mod:`distutils.command`, :mod:`distcmds`, and :mod:`buildcmds` will be searched in that order. New commands are expected to be implemented in modules of the same name as the command by classes sharing the same name. Given the example diff --git a/Doc/distutils/setupscript.rst b/Doc/distutils/setupscript.rst --- a/Doc/distutils/setupscript.rst +++ b/Doc/distutils/setupscript.rst @@ -62,9 +62,9 @@ Listing whole packages ====================== -The :option:`packages` option tells the Distutils to process (build, distribute, +The ``packages`` option tells the Distutils to process (build, distribute, install, etc.) all pure Python modules found in each package mentioned in the -:option:`packages` list. In order to do this, of course, there has to be a +``packages`` list. In order to do this, of course, there has to be a correspondence between package names and directories in the filesystem. The default correspondence is the most obvious one, i.e. package :mod:`distutils` is found in the directory :file:`distutils` relative to the distribution root. @@ -75,7 +75,7 @@ Distutils will issue a warning but still process the broken package anyway. If you use a different convention to lay out your source directory, that's no -problem: you just have to supply the :option:`package_dir` option to tell the +problem: you just have to supply the ``package_dir`` option to tell the Distutils about your convention. For example, say you keep all Python source under :file:`lib`, so that modules in the "root package" (i.e., not in any package at all) are in :file:`lib`, modules in the :mod:`foo` package are in @@ -94,13 +94,13 @@ package_dir = {'foo': 'lib'} -A ``package: dir`` entry in the :option:`package_dir` dictionary implicitly +A ``package: dir`` entry in the ``package_dir`` dictionary implicitly applies to all packages below *package*, so the :mod:`foo.bar` case is automatically handled here. In this example, having ``packages = ['foo', 'foo.bar']`` tells the Distutils to look for :file:`lib/__init__.py` and -:file:`lib/bar/__init__.py`. (Keep in mind that although :option:`package_dir` +:file:`lib/bar/__init__.py`. (Keep in mind that although ``package_dir`` applies recursively, you must explicitly list all packages in -:option:`packages`: the Distutils will *not* recursively scan your source tree +``packages``: the Distutils will *not* recursively scan your source tree looking for any directory with an :file:`__init__.py` file.) @@ -120,7 +120,7 @@ :mod:`pkg` package. Again, the default package/directory layout implies that these two modules can be found in :file:`mod1.py` and :file:`pkg/mod2.py`, and that :file:`pkg/__init__.py` exists as well. And again, you can override the -package/directory correspondence using the :option:`package_dir` option. +package/directory correspondence using the ``package_dir`` option. .. _describing-extensions: @@ -138,7 +138,7 @@ .. XXX read over this section All of this is done through another keyword argument to :func:`setup`, the -:option:`ext_modules` option. :option:`ext_modules` is just a list of +``ext_modules`` option. ``ext_modules`` is just a list of :class:`~distutils.core.Extension` instances, each of which describes a single extension module. Suppose your distribution includes a single extension, called :mod:`foo` and @@ -181,7 +181,7 @@ resulting extension lives. If you have a number of extensions all in the same package (or all under the -same base package), use the :option:`ext_package` keyword argument to +same base package), use the ``ext_package`` keyword argument to :func:`setup`. For example, :: setup(..., @@ -336,20 +336,20 @@ There are still some other options which can be used to handle special cases. -The :option:`extra_objects` option is a list of object files to be passed to the +The ``extra_objects`` option is a list of object files to be passed to the linker. These files must not have extensions, as the default extension for the compiler is used. -:option:`extra_compile_args` and :option:`extra_link_args` can be used to +``extra_compile_args`` and ``extra_link_args`` can be used to specify additional command line options for the respective compiler and linker command lines. -:option:`export_symbols` is only useful on Windows. It can contain a list of +``export_symbols`` is only useful on Windows. It can contain a list of symbols (functions or variables) to be exported. This option is not needed when building compiled extensions: Distutils will automatically add ``initmodule`` to the list of exported symbols. -The :option:`depends` option is a list of files that the extension depends on +The ``depends`` option is a list of files that the extension depends on (for example header files). The build command will call the compiler on the sources to rebuild extension if any on this files has been modified since the previous build. @@ -445,7 +445,7 @@ the current interpreter location. The :option:`--executable` (or :option:`-e`) option will allow the interpreter path to be explicitly overridden. -The :option:`scripts` option simply is a list of files to be handled in this +The ``scripts`` option simply is a list of files to be handled in this way. From the PyXML setup script:: setup(..., @@ -510,11 +510,11 @@ Installing Additional Files =========================== -The :option:`data_files` option can be used to specify additional files needed +The ``data_files`` option can be used to specify additional files needed by the module distribution: configuration files, message catalogs, data files, anything which doesn't fit in the previous categories. -:option:`data_files` specifies a sequence of (*directory*, *files*) pairs in the +``data_files`` specifies a sequence of (*directory*, *files*) pairs in the following way:: setup(..., @@ -535,7 +535,7 @@ directory information from *files* is used to determine the final location of the installed file; only the name of the file is used. -You can specify the :option:`data_files` options as a simple sequence of files +You can specify the ``data_files`` options as a simple sequence of files without specifying a target directory, but this is not recommended, and the :command:`install` command will print a warning in this case. To install data files directly in the target directory, an empty string should be given as the @@ -649,7 +649,7 @@ 1.0.1a2 the second alpha release of the first patch version of 1.0 -:option:`classifiers` are specified in a Python list:: +``classifiers`` are specified in a Python list:: setup(..., classifiers=[ diff --git a/Doc/distutils/sourcedist.rst b/Doc/distutils/sourcedist.rst --- a/Doc/distutils/sourcedist.rst +++ b/Doc/distutils/sourcedist.rst @@ -71,16 +71,16 @@ generate one), the :command:`sdist` command puts a minimal default set into the source distribution: -* all Python source files implied by the :option:`py_modules` and - :option:`packages` options +* all Python source files implied by the ``py_modules`` and + ``packages`` options -* all C source files mentioned in the :option:`ext_modules` or - :option:`libraries` options +* all C source files mentioned in the ``ext_modules`` or + ``libraries`` options .. XXX Getting C library sources is currently broken -- no :meth:`get_source_files` method in :file:`build_clib.py`! -* scripts identified by the :option:`scripts` option +* scripts identified by the ``scripts`` option See :ref:`distutils-installing-scripts`. * anything that looks like a test script: :file:`test/test\*.py` (currently, the @@ -215,7 +215,7 @@ #. include all Python source files in the :file:`distutils` and :file:`distutils/command` subdirectories (because packages corresponding to - those two directories were mentioned in the :option:`packages` option in the + those two directories were mentioned in the ``packages`` option in the setup script---see section :ref:`setup-script`) #. include :file:`README.txt`, :file:`setup.py`, and :file:`setup.cfg` (standard diff --git a/Doc/whatsnew/2.1.rst b/Doc/whatsnew/2.1.rst --- a/Doc/whatsnew/2.1.rst +++ b/Doc/whatsnew/2.1.rst @@ -555,7 +555,7 @@ and experiment with them. With the result experience, perhaps it'll be possible to design a really good catalog and then build support for it into Python 2.2. For example, the Distutils :command:`sdist` and :command:`bdist_\*` commands -could support a :option:`upload` option that would automatically upload your +could support a ``upload`` option that would automatically upload your package to a catalog server. You can start creating packages containing :file:`PKG-INFO` even if you're not -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sun Sep 21 09:53:45 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 21 Sep 2014 09:53:45 +0200 Subject: [Python-checkins] Daily reference leaks (25c52f89ce26): sum=3 Message-ID: results for 25c52f89ce26 on branch "default" -------------------------------------------- test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 0] references, sum=0 test_site leaked [2, -2, 0] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog7JnhhZ', '-x'] From python-checkins at python.org Sun Sep 21 21:15:52 2014 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 21 Sep 2014 19:15:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogSXNzdWUgIzIxMzMyOiBFbnN1cmUgdGhhdCBgYGJ1ZnNpemU9MWBgIGlu?= =?utf-8?q?_subprocess=2EPopen=28=29_selects_line?= Message-ID: <20140921191550.62655.18143@mail.hg.python.org> https://hg.python.org/cpython/rev/763d565e5840 changeset: 92502:763d565e5840 parent: 92498:25c52f89ce26 parent: 92501:38867f90f1d9 user: Antoine Pitrou date: Sun Sep 21 21:15:42 2014 +0200 summary: Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects line buffering, rather than block buffering. files: Doc/library/subprocess.rst | 18 +++++++++---- Lib/subprocess.py | 3 +- Lib/test/test_subprocess.py | 33 +++++++++++++++++++++++++ Misc/NEWS | 3 ++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -406,12 +406,18 @@ Read the `Security Considerations`_ section before using ``shell=True``. - *bufsize* will be supplied as the corresponding argument to the :func:`open` - function when creating the stdin/stdout/stderr pipe file objects: :const:`0` - means unbuffered (read and write are one system call and can return short), - :const:`1` means line buffered, any other positive value means use a buffer - of approximately that size. A negative bufsize (the default) means the - system default of io.DEFAULT_BUFFER_SIZE will be used. + *bufsize* will be supplied as the corresponding argument to the + :func:`open` function when creating the stdin/stdout/stderr pipe + file objects: + + - :const:`0` means unbuffered (read and write are one + system call and can return short) + - :const:`1` means line buffered + (only usable if ``universal_newlines=True`` i.e., in a text mode) + - any other positive value means use a buffer of approximately that + size + - negative bufsize (the default) means the system default of + io.DEFAULT_BUFFER_SIZE will be used. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -841,7 +841,8 @@ if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True) + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=(bufsize == 1)) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1008,6 +1008,39 @@ p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) + def _test_bufsize_equal_one(self, line, expected, universal_newlines): + # subprocess may deadlock with bufsize=1, see issue #21332 + with subprocess.Popen([sys.executable, "-c", "import sys;" + "sys.stdout.write(sys.stdin.readline());" + "sys.stdout.flush()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=1, + universal_newlines=universal_newlines) as p: + p.stdin.write(line) # expect that it flushes the line in text mode + os.close(p.stdin.fileno()) # close it without flushing the buffer + read_line = p.stdout.readline() + try: + p.stdin.close() + except OSError: + pass + p.stdin = None + self.assertEqual(p.returncode, 0) + self.assertEqual(read_line, expected) + + def test_bufsize_equal_one_text_mode(self): + # line is flushed in text mode with bufsize=1. + # we should get the full line in return + line = "line\n" + self._test_bufsize_equal_one(line, line, universal_newlines=True) + + def test_bufsize_equal_one_binary_mode(self): + # line is not flushed in binary mode with bufsize=1. + # we should get empty response + line = b'line' + os.linesep.encode() # assume ascii-based locale + self._test_bufsize_equal_one(line, b'', universal_newlines=False) + def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:15:52 2014 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 21 Sep 2014 19:15:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxMzMy?= =?utf-8?q?=3A_Ensure_that_=60=60bufsize=3D1=60=60_in_subprocess=2EPopen?= =?utf-8?q?=28=29_selects_line?= Message-ID: <20140921191550.93791.85585@mail.hg.python.org> https://hg.python.org/cpython/rev/38867f90f1d9 changeset: 92501:38867f90f1d9 branch: 3.4 parent: 92497:7af0315bdfe0 user: Antoine Pitrou date: Sun Sep 21 21:10:56 2014 +0200 summary: Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects line buffering, rather than block buffering. files: Doc/library/subprocess.rst | 18 +++++++++---- Lib/subprocess.py | 3 +- Lib/test/test_subprocess.py | 33 +++++++++++++++++++++++++ Misc/NEWS | 3 ++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -406,12 +406,18 @@ Read the `Security Considerations`_ section before using ``shell=True``. - *bufsize* will be supplied as the corresponding argument to the :func:`open` - function when creating the stdin/stdout/stderr pipe file objects: :const:`0` - means unbuffered (read and write are one system call and can return short), - :const:`1` means line buffered, any other positive value means use a buffer - of approximately that size. A negative bufsize (the default) means the - system default of io.DEFAULT_BUFFER_SIZE will be used. + *bufsize* will be supplied as the corresponding argument to the + :func:`open` function when creating the stdin/stdout/stderr pipe + file objects: + + - :const:`0` means unbuffered (read and write are one + system call and can return short) + - :const:`1` means line buffered + (only usable if ``universal_newlines=True`` i.e., in a text mode) + - any other positive value means use a buffer of approximately that + size + - negative bufsize (the default) means the system default of + io.DEFAULT_BUFFER_SIZE will be used. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -837,7 +837,8 @@ if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True) + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=(bufsize == 1)) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1008,6 +1008,39 @@ p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) + def _test_bufsize_equal_one(self, line, expected, universal_newlines): + # subprocess may deadlock with bufsize=1, see issue #21332 + with subprocess.Popen([sys.executable, "-c", "import sys;" + "sys.stdout.write(sys.stdin.readline());" + "sys.stdout.flush()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=1, + universal_newlines=universal_newlines) as p: + p.stdin.write(line) # expect that it flushes the line in text mode + os.close(p.stdin.fileno()) # close it without flushing the buffer + read_line = p.stdout.readline() + try: + p.stdin.close() + except OSError: + pass + p.stdin = None + self.assertEqual(p.returncode, 0) + self.assertEqual(read_line, expected) + + def test_bufsize_equal_one_text_mode(self): + # line is flushed in text mode with bufsize=1. + # we should get the full line in return + line = "line\n" + self._test_bufsize_equal_one(line, line, universal_newlines=True) + + def test_bufsize_equal_one_binary_mode(self): + # line is not flushed in binary mode with bufsize=1. + # we should get empty response + line = b'line' + os.linesep.encode() # assume ascii-based locale + self._test_bufsize_equal_one(line, b'', universal_newlines=False) + def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. Since EmailMessage is provisional, we can change the API in a maintenance release, but we use a trick to remain backward compatible with -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <20140921193043.88259.72769@mail.hg.python.org> https://hg.python.org/cpython/rev/b0ebb8fdadc9 changeset: 92508:b0ebb8fdadc9 parent: 92507:7ad223210f89 parent: 92506:ba5d3b4b4260 user: Serhiy Storchaka date: Sun Sep 21 22:26:31 2014 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?q?=29=3A_Merge_heads?= Message-ID: <20140921193043.91982.63911@mail.hg.python.org> https://hg.python.org/cpython/rev/7ad223210f89 changeset: 92507:7ad223210f89 parent: 92505:644b677c2ae5 parent: 92502:763d565e5840 user: Serhiy Storchaka date: Sun Sep 21 22:26:11 2014 +0300 summary: Merge heads files: Doc/library/subprocess.rst | 18 +++++++++---- Lib/subprocess.py | 3 +- Lib/test/test_subprocess.py | 33 +++++++++++++++++++++++++ Misc/NEWS | 3 ++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -406,12 +406,18 @@ Read the `Security Considerations`_ section before using ``shell=True``. - *bufsize* will be supplied as the corresponding argument to the :func:`open` - function when creating the stdin/stdout/stderr pipe file objects: :const:`0` - means unbuffered (read and write are one system call and can return short), - :const:`1` means line buffered, any other positive value means use a buffer - of approximately that size. A negative bufsize (the default) means the - system default of io.DEFAULT_BUFFER_SIZE will be used. + *bufsize* will be supplied as the corresponding argument to the + :func:`open` function when creating the stdin/stdout/stderr pipe + file objects: + + - :const:`0` means unbuffered (read and write are one + system call and can return short) + - :const:`1` means line buffered + (only usable if ``universal_newlines=True`` i.e., in a text mode) + - any other positive value means use a buffer of approximately that + size + - negative bufsize (the default) means the system default of + io.DEFAULT_BUFFER_SIZE will be used. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -841,7 +841,8 @@ if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True) + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=(bufsize == 1)) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1008,6 +1008,39 @@ p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) + def _test_bufsize_equal_one(self, line, expected, universal_newlines): + # subprocess may deadlock with bufsize=1, see issue #21332 + with subprocess.Popen([sys.executable, "-c", "import sys;" + "sys.stdout.write(sys.stdin.readline());" + "sys.stdout.flush()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=1, + universal_newlines=universal_newlines) as p: + p.stdin.write(line) # expect that it flushes the line in text mode + os.close(p.stdin.fileno()) # close it without flushing the buffer + read_line = p.stdout.readline() + try: + p.stdin.close() + except OSError: + pass + p.stdin = None + self.assertEqual(p.returncode, 0) + self.assertEqual(read_line, expected) + + def test_bufsize_equal_one_text_mode(self): + # line is flushed in text mode with bufsize=1. + # we should get the full line in return + line = "line\n" + self._test_bufsize_equal_one(line, line, universal_newlines=True) + + def test_bufsize_equal_one_binary_mode(self): + # line is not flushed in binary mode with bufsize=1. + # we should get empty response + line = b'line' + os.linesep.encode() # assume ascii-based locale + self._test_bufsize_equal_one(line, b'', universal_newlines=False) + def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -140,6 +140,9 @@ - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDIz?= =?utf-8?q?=3A_Unhandled_exception_in_thread_no_longer_causes_unhandled?= Message-ID: <20140921193042.31466.55430@mail.hg.python.org> https://hg.python.org/cpython/rev/176579df4edd changeset: 92504:176579df4edd branch: 3.4 parent: 92497:7af0315bdfe0 user: Serhiy Storchaka date: Sun Sep 21 22:08:13 2014 +0300 summary: Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. files: Lib/test/test_threading.py | 85 +++++++++++++++++++++++++- Lib/threading.py | 16 ++-- Misc/NEWS | 3 + 3 files changed, 94 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py --- a/Lib/test/test_threading.py +++ b/Lib/test/test_threading.py @@ -4,7 +4,7 @@ import test.support from test.support import verbose, strip_python_stderr, import_module, cpython_only -from test.script_helper import assert_python_ok +from test.script_helper import assert_python_ok, assert_python_failure import random import re @@ -15,7 +15,6 @@ import unittest import weakref import os -from test.script_helper import assert_python_ok, assert_python_failure import subprocess from test import lock_tests @@ -962,6 +961,88 @@ self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode()) self.assertEqual(data, expected_output) + def test_print_exception(self): + script = r"""if True: + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_1(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + sys.stderr = None + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_2(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + sys.stderr = None + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + self.assertNotIn("Unhandled exception", err.decode()) + + class TimerTests(BaseTestCase): def setUp(self): diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -248,7 +248,7 @@ def _is_owned(self): # Return True if lock is owned by current_thread. - # This method is called only if __lock doesn't have _is_owned(). + # This method is called only if _lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False @@ -749,12 +749,12 @@ """ - __initialized = False + _initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType - __exc_info = _sys.exc_info + _exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear @@ -926,10 +926,10 @@ # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. - if _sys: - _sys.stderr.write("Exception in thread %s:\n%s\n" % - (self.name, _format_exc())) - else: + if _sys and _sys.stderr is not None: + print("Exception in thread %s:\n%s" % + (self.name, _format_exc()), file=self._stderr) + elif self._stderr is not None: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) @@ -957,7 +957,7 @@ # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. - #XXX self.__exc_clear() + #XXX self._exc_clear() pass finally: with _active_limbo_lock: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #22423: Unhandled exception in thread no longer causes unhandled + AttributeError when sys.stderr is None. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. Since EmailMessage is provisional, we can change the API in a maintenance release, but we use a trick to remain backward compatible with -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322423=3A_Unhandled_exception_in_thread_no_longe?= =?utf-8?q?r_causes_unhandled?= Message-ID: <20140921193043.56804.92298@mail.hg.python.org> https://hg.python.org/cpython/rev/644b677c2ae5 changeset: 92505:644b677c2ae5 parent: 92498:25c52f89ce26 parent: 92504:176579df4edd user: Serhiy Storchaka date: Sun Sep 21 22:09:20 2014 +0300 summary: Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. files: Lib/test/test_threading.py | 85 +++++++++++++++++++++++++- Lib/threading.py | 16 ++-- Misc/NEWS | 3 + 3 files changed, 94 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py --- a/Lib/test/test_threading.py +++ b/Lib/test/test_threading.py @@ -4,7 +4,7 @@ import test.support from test.support import verbose, strip_python_stderr, import_module, cpython_only -from test.script_helper import assert_python_ok +from test.script_helper import assert_python_ok, assert_python_failure import random import re @@ -15,7 +15,6 @@ import unittest import weakref import os -from test.script_helper import assert_python_ok, assert_python_failure import subprocess from test import lock_tests @@ -962,6 +961,88 @@ self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode()) self.assertEqual(data, expected_output) + def test_print_exception(self): + script = r"""if True: + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_1(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + sys.stderr = None + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_2(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + sys.stderr = None + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + self.assertNotIn("Unhandled exception", err.decode()) + + class TimerTests(BaseTestCase): def setUp(self): diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -251,7 +251,7 @@ def _is_owned(self): # Return True if lock is owned by current_thread. - # This method is called only if __lock doesn't have _is_owned(). + # This method is called only if _lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False @@ -752,12 +752,12 @@ """ - __initialized = False + _initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType - __exc_info = _sys.exc_info + _exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear @@ -929,10 +929,10 @@ # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. - if _sys: - _sys.stderr.write("Exception in thread %s:\n%s\n" % - (self.name, _format_exc())) - else: + if _sys and _sys.stderr is not None: + print("Exception in thread %s:\n%s" % + (self.name, _format_exc()), file=self._stderr) + elif self._stderr is not None: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) @@ -960,7 +960,7 @@ # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. - #XXX self.__exc_clear() + #XXX self._exc_clear() pass finally: with _active_limbo_lock: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22423: Unhandled exception in thread no longer causes unhandled + AttributeError when sys.stderr is None. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge_heads?= Message-ID: <20140921193043.62823.52216@mail.hg.python.org> https://hg.python.org/cpython/rev/ba5d3b4b4260 changeset: 92506:ba5d3b4b4260 branch: 3.4 parent: 92504:176579df4edd parent: 92501:38867f90f1d9 user: Serhiy Storchaka date: Sun Sep 21 22:25:25 2014 +0300 summary: Merge heads files: Doc/library/subprocess.rst | 18 +++++++++---- Lib/subprocess.py | 3 +- Lib/test/test_subprocess.py | 33 +++++++++++++++++++++++++ Misc/NEWS | 3 ++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -406,12 +406,18 @@ Read the `Security Considerations`_ section before using ``shell=True``. - *bufsize* will be supplied as the corresponding argument to the :func:`open` - function when creating the stdin/stdout/stderr pipe file objects: :const:`0` - means unbuffered (read and write are one system call and can return short), - :const:`1` means line buffered, any other positive value means use a buffer - of approximately that size. A negative bufsize (the default) means the - system default of io.DEFAULT_BUFFER_SIZE will be used. + *bufsize* will be supplied as the corresponding argument to the + :func:`open` function when creating the stdin/stdout/stderr pipe + file objects: + + - :const:`0` means unbuffered (read and write are one + system call and can return short) + - :const:`1` means line buffered + (only usable if ``universal_newlines=True`` i.e., in a text mode) + - any other positive value means use a buffer of approximately that + size + - negative bufsize (the default) means the system default of + io.DEFAULT_BUFFER_SIZE will be used. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -837,7 +837,8 @@ if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True) + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=(bufsize == 1)) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1008,6 +1008,39 @@ p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) + def _test_bufsize_equal_one(self, line, expected, universal_newlines): + # subprocess may deadlock with bufsize=1, see issue #21332 + with subprocess.Popen([sys.executable, "-c", "import sys;" + "sys.stdout.write(sys.stdin.readline());" + "sys.stdout.flush()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=1, + universal_newlines=universal_newlines) as p: + p.stdin.write(line) # expect that it flushes the line in text mode + os.close(p.stdin.fileno()) # close it without flushing the buffer + read_line = p.stdout.readline() + try: + p.stdin.close() + except OSError: + pass + p.stdin = None + self.assertEqual(p.returncode, 0) + self.assertEqual(read_line, expected) + + def test_bufsize_equal_one_text_mode(self): + # line is flushed in text mode with bufsize=1. + # we should get the full line in return + line = "line\n" + self._test_bufsize_equal_one(line, line, universal_newlines=True) + + def test_bufsize_equal_one_binary_mode(self): + # line is not flushed in binary mode with bufsize=1. + # we should get empty response + line = b'line' + os.linesep.encode() # assume ascii-based locale + self._test_bufsize_equal_one(line, b'', universal_newlines=False) + def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -35,6 +35,9 @@ - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + - Issue #21091: Fix API bug: email.message.EmailMessage.is_attachment is now a method. Since EmailMessage is provisional, we can change the API in a maintenance release, but we use a trick to remain backward compatible with -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:30:45 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:30:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDIz?= =?utf-8?q?=3A_Unhandled_exception_in_thread_no_longer_causes_unhandled?= Message-ID: <20140921193042.11345.28128@mail.hg.python.org> https://hg.python.org/cpython/rev/4baa474b4f31 changeset: 92503:4baa474b4f31 branch: 2.7 parent: 92500:f75f446dba92 user: Serhiy Storchaka date: Sun Sep 21 22:08:00 2014 +0300 summary: Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. files: Lib/test/test_threading.py | 79 ++++++++++++++++++++++++++ Lib/threading.py | 8 +- Misc/NEWS | 4 + 3 files changed, 87 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py --- a/Lib/test/test_threading.py +++ b/Lib/test/test_threading.py @@ -829,6 +829,85 @@ thread.start() self.assertRaises(RuntimeError, setattr, thread, "daemon", True) + def test_print_exception(self): + script = r"""if 1: + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, '') + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_1(self): + script = r"""if 1: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + sys.stderr = None + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, '') + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_2(self): + script = r"""if 1: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + sys.stderr = None + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, '') + self.assertNotIn("Unhandled exception", err) + class LockTests(lock_tests.LockTests): locktype = staticmethod(threading.Lock) diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -818,10 +818,10 @@ # shutdown) use self.__stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. - if _sys: - _sys.stderr.write("Exception in thread %s:\n%s\n" % - (self.name, _format_exc())) - else: + if _sys and _sys.stderr is not None: + print>>_sys.stderr, ("Exception in thread %s:\n%s" % + (self.name, _format_exc())) + elif self.__stderr is not None: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -21,6 +21,10 @@ Library ------- + +- Issue #22423: Unhandled exception in thread no longer causes unhandled + AttributeError when sys.stderr is None. + - Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:52:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:52:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDIz?= =?utf-8?q?=3A_Fixed_debugging_output_of_the_GROUPREF=5FEXISTS_opcode_in_t?= =?utf-8?q?he_re?= Message-ID: <20140921195157.56804.45378@mail.hg.python.org> https://hg.python.org/cpython/rev/c925b436467a changeset: 92509:c925b436467a branch: 2.7 parent: 92503:4baa474b4f31 user: Serhiy Storchaka date: Sun Sep 21 22:47:30 2014 +0300 summary: Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. files: Lib/sre_parse.py | 45 +++++++++++++++++++------------- Lib/test/test_re.py | 29 ++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 53 insertions(+), 24 deletions(-) diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -94,33 +94,42 @@ self.data = data self.width = None def dump(self, level=0): - nl = 1 - seqtypes = type(()), type([]) + seqtypes = (tuple, list) for op, av in self.data: - print level*" " + op,; nl = 0 - if op == "in": + print level*" " + op, + if op == IN: # member sublanguage - print; nl = 1 + print for op, a in av: print (level+1)*" " + op, a - elif op == "branch": - print; nl = 1 - i = 0 - for a in av[1]: - if i > 0: + elif op == BRANCH: + print + for i, a in enumerate(av[1]): + if i: print level*" " + "or" - a.dump(level+1); nl = 1 - i = i + 1 - elif type(av) in seqtypes: + a.dump(level+1) + elif op == GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print condgroup + item_yes.dump(level+1) + if item_no: + print level*" " + "else" + item_no.dump(level+1) + elif isinstance(av, seqtypes): + nl = 0 for a in av: if isinstance(a, SubPattern): - if not nl: print - a.dump(level+1); nl = 1 + if not nl: + print + a.dump(level+1) + nl = 1 else: - print a, ; nl = 0 + print a, + nl = 0 + if not nl: + print else: - print av, ; nl = 0 - if not nl: print + print av def __repr__(self): return repr(self.data) def __len__(self): diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -930,16 +930,33 @@ self.assertEqual(m.group(2), "y") def test_debug_flag(self): + pat = r'(\.)(?:[ch]|py)(?(1)$|: )' with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102', 'literal 111', 'literal 111']) + re.compile(pat, re.DEBUG) + dump = '''\ +subpattern 1 + literal 46 +subpattern None + branch + in + literal 99 + literal 104 + or + literal 112 + literal 121 +subpattern None + groupref_exists 1 + at at_end + else + literal 58 + literal 32 +''' + self.assertEqual(out.getvalue(), dump) # Debug output is output again even a second time (bypassing # the cache -- issue #20426). with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102', 'literal 111', 'literal 111']) + re.compile(pat, re.DEBUG) + self.assertEqual(out.getvalue(), dump) def test_keyword_parameters(self): # Issue #20283: Accepting the string keyword parameter. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. + - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:52:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:52:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322423=3A_Fixed_debugging_output_of_the_GROUPREF?= =?utf-8?q?=5FEXISTS_opcode_in_the_re?= Message-ID: <20140921195157.61462.82784@mail.hg.python.org> https://hg.python.org/cpython/rev/fe287268e97b changeset: 92511:fe287268e97b parent: 92508:b0ebb8fdadc9 parent: 92510:e99a1df8db36 user: Serhiy Storchaka date: Sun Sep 21 22:48:24 2014 +0300 summary: Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. files: Lib/sre_parse.py | 44 +++++++++++++++++++++------------ Lib/test/test_re.py | 29 +++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 54 insertions(+), 22 deletions(-) diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -94,33 +94,45 @@ self.data = data self.width = None def dump(self, level=0): - nl = 1 + nl = True seqtypes = (tuple, list) for op, av in self.data: - print(level*" " + op, end=' '); nl = 0 - if op == "in": + print(level*" " + op, end='') + if op == IN: # member sublanguage - print(); nl = 1 + print() for op, a in av: print((level+1)*" " + op, a) - elif op == "branch": - print(); nl = 1 - i = 0 - for a in av[1]: - if i > 0: + elif op == BRANCH: + print() + for i, a in enumerate(av[1]): + if i: print(level*" " + "or") - a.dump(level+1); nl = 1 - i = i + 1 + a.dump(level+1) + elif op == GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "else") + item_no.dump(level+1) elif isinstance(av, seqtypes): + nl = False for a in av: if isinstance(a, SubPattern): - if not nl: print() - a.dump(level+1); nl = 1 + if not nl: + print() + a.dump(level+1) + nl = True else: - print(a, end=' ') ; nl = 0 + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() else: - print(av, end=' ') ; nl = 0 - if not nl: print() + print('', av) def __repr__(self): return repr(self.data) def __len__(self): diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -1203,16 +1203,33 @@ self.assertEqual(m.group(2), "y") def test_debug_flag(self): + pat = r'(\.)(?:[ch]|py)(?(1)$|: )' with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + dump = '''\ +subpattern 1 + literal 46 +subpattern None + branch + in + literal 99 + literal 104 + or + literal 112 + literal 121 +subpattern None + groupref_exists 1 + at at_end + else + literal 58 + literal 32 +''' + self.assertEqual(out.getvalue(), dump) # Debug output is output again even a second time (bypassing # the cache -- issue #20426). with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + self.assertEqual(out.getvalue(), dump) def test_keyword_parameters(self): # Issue #20283: Accepting the string keyword parameter. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. + - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 21:52:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 19:52:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDIz?= =?utf-8?q?=3A_Fixed_debugging_output_of_the_GROUPREF=5FEXISTS_opcode_in_t?= =?utf-8?q?he_re?= Message-ID: <20140921195157.17735.62071@mail.hg.python.org> https://hg.python.org/cpython/rev/e99a1df8db36 changeset: 92510:e99a1df8db36 branch: 3.4 parent: 92506:ba5d3b4b4260 user: Serhiy Storchaka date: Sun Sep 21 22:47:55 2014 +0300 summary: Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. files: Lib/sre_parse.py | 44 +++++++++++++++++++++------------ Lib/test/test_re.py | 29 +++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 54 insertions(+), 22 deletions(-) diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -94,33 +94,45 @@ self.data = data self.width = None def dump(self, level=0): - nl = 1 + nl = True seqtypes = (tuple, list) for op, av in self.data: - print(level*" " + op, end=' '); nl = 0 - if op == "in": + print(level*" " + op, end='') + if op == IN: # member sublanguage - print(); nl = 1 + print() for op, a in av: print((level+1)*" " + op, a) - elif op == "branch": - print(); nl = 1 - i = 0 - for a in av[1]: - if i > 0: + elif op == BRANCH: + print() + for i, a in enumerate(av[1]): + if i: print(level*" " + "or") - a.dump(level+1); nl = 1 - i = i + 1 + a.dump(level+1) + elif op == GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "else") + item_no.dump(level+1) elif isinstance(av, seqtypes): + nl = False for a in av: if isinstance(a, SubPattern): - if not nl: print() - a.dump(level+1); nl = 1 + if not nl: + print() + a.dump(level+1) + nl = True else: - print(a, end=' ') ; nl = 0 + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() else: - print(av, end=' ') ; nl = 0 - if not nl: print() + print('', av) def __repr__(self): return repr(self.data) def __len__(self): diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -1203,16 +1203,33 @@ self.assertEqual(m.group(2), "y") def test_debug_flag(self): + pat = r'(\.)(?:[ch]|py)(?(1)$|: )' with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + dump = '''\ +subpattern 1 + literal 46 +subpattern None + branch + in + literal 99 + literal 104 + or + literal 112 + literal 121 +subpattern None + groupref_exists 1 + at at_end + else + literal 58 + literal 32 +''' + self.assertEqual(out.getvalue(), dump) # Debug output is output again even a second time (bypassing # the cache -- issue #20426). with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + self.assertEqual(out.getvalue(), dump) def test_keyword_parameters(self): # Issue #20283: Accepting the string keyword parameter. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. + - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 22:02:10 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 20:02:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Fixed_issue_?= =?utf-8?q?=2322415_number_in_Misc/NEWS_for_changeset_c925b436467a=2E?= Message-ID: <20140921200209.97868.50561@mail.hg.python.org> https://hg.python.org/cpython/rev/d4630b1c8792 changeset: 92512:d4630b1c8792 branch: 2.7 parent: 92509:c925b436467a user: Serhiy Storchaka date: Sun Sep 21 22:57:45 2014 +0300 summary: Fixed issue #22415 number in Misc/NEWS for changeset c925b436467a. files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,7 +22,7 @@ Library ------- -- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re +- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. - Issue #22423: Unhandled exception in thread no longer causes unhandled -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 22:02:10 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 20:02:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fixed_issue_?= =?utf-8?q?=2322415_number_in_Misc/NEWS_for_changeset_e99a1df8db36=2E?= Message-ID: <20140921200210.43654.54835@mail.hg.python.org> https://hg.python.org/cpython/rev/7b92518b2c21 changeset: 92513:7b92518b2c21 branch: 3.4 parent: 92510:e99a1df8db36 user: Serhiy Storchaka date: Sun Sep 21 22:58:31 2014 +0300 summary: Fixed issue #22415 number in Misc/NEWS for changeset e99a1df8db36. files: Misc/NEWS | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,8 +32,8 @@ Library ------- -- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re - module. +- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. Removed trailing spaces in debugging output. - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 21 22:02:10 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 21 Sep 2014 20:02:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fixed_issue_=2322415_number_in_Misc/NEWS_for_changeset_f?= =?utf-8?q?e287268e97b=2E?= Message-ID: <20140921200210.93791.33013@mail.hg.python.org> https://hg.python.org/cpython/rev/fb93a04832df changeset: 92514:fb93a04832df parent: 92511:fe287268e97b parent: 92513:7b92518b2c21 user: Serhiy Storchaka date: Sun Sep 21 22:59:06 2014 +0300 summary: Fixed issue #22415 number in Misc/NEWS for changeset fe287268e97b. files: Misc/NEWS | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,8 +137,8 @@ Library ------- -- Issue #22423: Fixed debugging output of the GROUPREF_EXISTS opcode in the re - module. +- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. Removed trailing spaces in debugging output. - Issue #22423: Unhandled exception in thread no longer causes unhandled AttributeError when sys.stderr is None. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 00:44:27 2014 From: python-checkins at python.org (jesus.cea) Date: Sun, 21 Sep 2014 22:44:27 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_MERGE=3A_Typo=3A_headeronly_-=3E_headersonly?= Message-ID: <20140921224426.102826.82348@mail.hg.python.org> https://hg.python.org/cpython/rev/850a62354402 changeset: 92516:850a62354402 parent: 92514:fb93a04832df parent: 92515:f674f16a70bc user: Jesus Cea date: Mon Sep 22 00:44:17 2014 +0200 summary: MERGE: Typo: headeronly -> headersonly files: Doc/library/email.parser.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst --- a/Doc/library/email.parser.rst +++ b/Doc/library/email.parser.rst @@ -181,7 +181,7 @@ .. versionchanged:: 3.3 Removed the *strict* argument. Added the *policy* keyword. - .. method:: parse(fp, headeronly=False) + .. method:: parse(fp, headersonly=False) Read all the data from the binary file-like object *fp*, parse the resulting bytes, and return the message object. *fp* must support -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 00:44:27 2014 From: python-checkins at python.org (jesus.cea) Date: Sun, 21 Sep 2014 22:44:27 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogVHlwbzogaGVhZGVy?= =?utf-8?q?only_-=3E_headersonly?= Message-ID: <20140921224426.41167.21297@mail.hg.python.org> https://hg.python.org/cpython/rev/f674f16a70bc changeset: 92515:f674f16a70bc branch: 3.4 parent: 92513:7b92518b2c21 user: Jesus Cea date: Mon Sep 22 00:43:39 2014 +0200 summary: Typo: headeronly -> headersonly files: Doc/library/email.parser.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst --- a/Doc/library/email.parser.rst +++ b/Doc/library/email.parser.rst @@ -181,7 +181,7 @@ .. versionchanged:: 3.3 Removed the *strict* argument. Added the *policy* keyword. - .. method:: parse(fp, headeronly=False) + .. method:: parse(fp, headersonly=False) Read all the data from the binary file-like object *fp*, parse the resulting bytes, and return the message object. *fp* must support -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Mon Sep 22 08:55:50 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 22 Sep 2014 08:55:50 +0200 Subject: [Python-checkins] Daily reference leaks (850a62354402): sum=1 Message-ID: results for 850a62354402 on branch "default" -------------------------------------------- test_collections leaked [0, 2, 0] references, sum=2 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [-2, 0, 0] references, sum=-2 test_site leaked [-2, 0, 0] memory blocks, sum=-2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog6VAGJ8', '-x'] From python-checkins at python.org Mon Sep 22 09:49:27 2014 From: python-checkins at python.org (senthil.kumaran) Date: Mon, 22 Sep 2014 07:49:27 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322278=3A_Fix_urlj?= =?utf-8?q?oin_problem_with_relative_urls=2C_a_regression_observed?= Message-ID: <20140922074927.100843.69157@mail.hg.python.org> https://hg.python.org/cpython/rev/901e4e52b20a changeset: 92517:901e4e52b20a user: Senthil Kumaran date: Mon Sep 22 15:49:16 2014 +0800 summary: Issue #22278: Fix urljoin problem with relative urls, a regression observed after changes to issue22118 were submitted. Patch contributed by Demian Brecht and reviewed by Antoine Pitrou. files: Lib/test/test_urlparse.py | 12 ++++++++++++ Lib/urllib/parse.py | 6 +++++- Misc/NEWS | 3 +++ 3 files changed, 20 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py --- a/Lib/test/test_urlparse.py +++ b/Lib/test/test_urlparse.py @@ -380,6 +380,18 @@ # self.checkJoin(SIMPLE_BASE, '../../../g','http://a/../g') # self.checkJoin(SIMPLE_BASE, '/./g','http://a/./g') + # test for issue22118 duplicate slashes + self.checkJoin(SIMPLE_BASE + '/', 'foo', SIMPLE_BASE + '/foo') + + # Non-RFC-defined tests, covering variations of base and trailing + # slashes + self.checkJoin('http://a/b/c/d/e/', '../../f/g/', 'http://a/b/c/f/g/') + self.checkJoin('http://a/b/c/d/e', '../../f/g/', 'http://a/b/f/g/') + self.checkJoin('http://a/b/c/d/e/', '/../../f/g/', 'http://a/f/g/') + self.checkJoin('http://a/b/c/d/e', '/../../f/g/', 'http://a/f/g/') + self.checkJoin('http://a/b/c/d/e/', '../../f/g', 'http://a/b/c/f/g') + self.checkJoin('http://a/b/', '../../f/g/', 'http://a/f/g/') + def test_RFC2732(self): str_cases = [ ('http://Test.python.org:5432/foo/', 'test.python.org', 5432), diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py --- a/Lib/urllib/parse.py +++ b/Lib/urllib/parse.py @@ -443,6 +443,10 @@ segments = path.split('/') else: segments = base_parts + path.split('/') + # filter out elements that would cause redundant slashes on re-joining + # the resolved_path + segments = segments[0:1] + [ + s for s in segments[1:-1] if len(s) > 0] + segments[-1:] resolved_path = [] @@ -465,7 +469,7 @@ resolved_path.append('') return _coerce_result(urlunparse((scheme, netloc, '/'.join( - resolved_path), params, query, fragment))) + resolved_path) or '/', params, query, fragment))) def urldefrag(url): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22278: Fix urljoin problem with relative urls, a regression observed + after changes to issue22118 were submitted. + - Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. Removed trailing spaces in debugging output. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 16:21:30 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 14:21:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Updated_pydoc_?= =?utf-8?q?topics_for_3=2E4=2E1rc1_release=2E?= Message-ID: <20140922142130.19539.15107@mail.hg.python.org> https://hg.python.org/cpython/rev/ba1dd18c3089 changeset: 92518:ba1dd18c3089 branch: 3.4 parent: 92497:7af0315bdfe0 user: Larry Hastings date: Sun Sep 21 00:05:05 2014 +0100 summary: Updated pydoc topics for 3.4.1rc1 release. files: Lib/pydoc_data/topics.py | 156 +++++++++++++------------- 1 files changed, 78 insertions(+), 78 deletions(-) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,79 +1,79 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sat May 17 21:42:09 2014 -topics = {'assert': '\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', - 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an asterisk,\n called a "starred" target: The object must be a sequence with at\n least as many items as there are targets in the target list, minus\n one. The first items of the sequence are assigned, from left to\n right, to the targets before the starred target. The final items\n of the sequence are assigned to the targets after the starred\n target. A list of the remaining items in the sequence is then\n assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of items\n as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global namespace\n or the outer namespace determined by "nonlocal", respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to integers. If either bound is\n negative, the sequence\'s length is added to it. The resulting\n bounds are clipped to lie between zero and the sequence\'s length,\n inclusive. Finally, the sequence object is asked to replace the\n slice with the items of the assigned sequence. The length of the\n slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints "[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print(x)\n\nSee also:\n\n **PEP 3132** - Extended Iterable Unpacking\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', - 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', - 'atom-literals': "\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", - 'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add "\'__weakref__\'" to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', - 'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier (which can\nbe customized by overriding the "__getattr__()" method). If this\nattribute is not available, the exception "AttributeError" is raised.\nOtherwise, the type and value of the object produced is determined by\nthe object. Multiple evaluations of the same attribute reference may\nyield different objects.\n', - 'augassign': '\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', - 'binary': '\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)". Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', - 'bitwise': '\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', - 'bltin-code-objects': '\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "__code__" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec()" or "eval()" built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', - 'bltin-ellipsis-object': '\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the\n"Ellipsis" singleton.\n\nIt is written as "Ellipsis" or "...".\n', - 'bltin-null-object': '\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name). "type(None)()" produces the\nsame singleton.\n\nIt is written as "None".\n', - 'bltin-type-objects': '\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "".\n', - 'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a "__bool__()" method.\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n', - 'break': '\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', - 'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', - 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', - 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a "finally" clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a "return", "continue", or "break"\n statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s "__doc__" attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The are\n identical to themselves, "x is x" but are not equal to themselves,\n "x != x". Additionally, comparing any value to a not-a-number value\n will return "False". For example, both "3 < float(\'NaN\')" and\n "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function "ord()") of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', - 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item") is\n evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return value\n from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()" method\n returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n "with" statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a "finally" clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a "return", "continue", or "break"\n statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s "__doc__" attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n "with" statement.\n', - 'continue': '\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', - 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', - 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which "__del__()"\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to "sys.stderr" instead.\n Also, when "__del__()" is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the "__del__()" method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, "__del__()"\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s custom\n "__hash__()" method to the size of a "Py_ssize_t". This is\n typically 8 bytes on 64-bit builds and 4 bytes on 32-bit builds.\n If an object\'s "__hash__()" must interoperate on builds of\n different bit sizes, be sure to check the width on all supported\n builds. An easy way to do this is with "python -c "import sys;\n print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', - 'debugger': '\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command ---\n this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the "__name__" in the frame globals.\n', - 'del': '\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', - 'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', - 'else': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', - 'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', - 'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their contents\n may change from one version of Python to the next without warning\n and should not be relied on by code which will run under multiple\n versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n', - 'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', - 'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', - 'for': '\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', - 'formatstrings': '\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s" | "a"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'", but converts "nan" to "NAN" |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n +-----------+------------------------------------------------------------+\n | None | Similar to "\'g\'", except with at least one digit past the |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', - 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n', - 'global': '\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call. The same applies to the "eval()"\nand "compile()" functions.\n', - 'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', - 'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= \n id_continue ::= \n xid_start ::= \n xid_continue ::= \n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', - 'if': '\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', - 'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', - 'import': '\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope where\n the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules is\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following "as"\n is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is bound in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. The wild\ncard form of import --- "import *" --- is only allowed at the module\nlevel. Attempting to use it in class or function definitions will\nraise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n', - 'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The are\n identical to themselves, "x is x" but are not equal to themselves,\n "x != x". Additionally, comparing any value to a not-a-number value\n will return "False". For example, both "3 < float(\'NaN\')" and\n "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function "ord()") of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another. When cross-type\ncomparison is not supported, the comparison method returns\n"NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether a the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', - 'integers': '\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', - 'lambda': '\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def (arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n', - 'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', - 'naming': '\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', - 'nonlocal': '\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope. This is\nimportant because the default behavior for binding is to search the\nlocal namespace first. The statement allows encapsulated code to\nrebind variables outside of the local scope besides the global\n(module) scope.\n\nNames listed in a "nonlocal" statement, unlike to those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also:\n\n **PEP 3104** - Access to Names in Outer Scopes\n The specification for the "nonlocal" statement.\n', - 'numbers': '\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', - 'numeric-types': '\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: When "__index__()" is defined, "__int__()" should also be\n defined, and both shuld return the same value, in order to have a\n coherent integer type class.\n', - 'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', - 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] While comparisons between strings make sense at the byte level,\n they may be counter-intuitive to users. For example, the strings\n ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even though\n they both represent the same unicode character (LATIN CAPITAL\n LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the "is" operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', - 'pass': '\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', - 'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n', - 'raise': '\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', - 'return': '\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n', - 'sequence-types': '\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n', - 'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is required to\n be at most "sys.maxsize". If the right-hand operand is larger than\n "sys.maxsize" an "OverflowError" exception is raised.\n', - 'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same "__getitem__()"\nmethod as normal subscription) with a key that is constructed from the\nslice list, as follows. If the slice list contains at least one\ncomma, the key is a tuple containing the conversion of the slice\nitems; otherwise, the conversion of the lone slice item is the key.\nThe conversion of a slice item that is an expression is that\nexpression. The conversion of a proper slice is a slice object (see\nsection *The standard type hierarchy*) whose "start", "stop" and\n"step" attributes are the values of the expressions given as lower\nbound, upper bound and stride, respectively, substituting "None" for\nmissing expressions.\n', - 'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to "[1.0,\n 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property being\n one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n', - 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which "__del__()"\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to "sys.stderr" instead.\n Also, when "__del__()" is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the "__del__()" method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, "__del__()"\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s custom\n "__hash__()" method to the size of a "Py_ssize_t". This is\n typically 8 bytes on 64-bit builds and 4 bytes on 32-bit builds.\n If an object\'s "__hash__()" must interoperate on builds of\n different bit sizes, be sure to check the width on all supported\n builds. An easy way to do this is with "python -c "import sys;\n print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and datetime\n objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add "\'__weakref__\'" to the\n sequence of strings in the *__slots__* declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3000\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also:\n\n **PEP 3135** - New super\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class members\nwere defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods. A\n call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: When "__index__()" is defined, "__int__()" should also be\n defined, and both shuld return the same value, in order to have a\n coherent integer type class.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n "with" statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', - 'string-methods': '\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Codec Base Classes*. For a\n list of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, "\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()" returns "[\'ab\n c\', \'\', \'de fg\', \'kl\']", while the same call with\n "splitlines(True)" returns "[\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']".\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or "None". Unmapped\n characters are left untouched. Characters mapped to "None" are\n deleted.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the "codecs" module (see "encodings.cp1251"\n for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n', - 'strings': '\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= \n longstringchar ::= \n stringescapeseq ::= "\\" \n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= \n longbyteschar ::= \n bytesescapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n been added as a synonym of "\'br\'".\n\n New in version 3.3: Support for the unicode legacy literal\n ("u\'value\'") was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the byte\n with the given value. In a string literal, these escapes denote a\n Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight hex\n digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n', - 'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription,\ne.g. a list or dictionary. User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', - 'truth': '\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', - 'try': '\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be access via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n', - 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode\n codepoints. All the codepoints in range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "chr"\n type, and every character in the string is represented as a\n string object with length "1". The built-in function "ord()"\n converts a character to its codepoint (as an integer);\n "chr()" converts an integer in range "0 - 10FFFF" to the\n corresponding character. "str.encode()" can be used to\n convert a "str" to "bytes" using the given encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', - 'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', - 'typesmapping': '\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterator*\n object. Each item in the iterable must itself be an iterator with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()", if the\n key *key* is not present, the "d[key]" operation calls that\n method with the key *key* as argument. The "d[key]" operation\n then returns or raises whatever is returned or raised by the\n "__missing__(key)" call if the key is not present. No other\n operations or methods invoke "__missing__()". If "__missing__()"\n is not defined, "KeyError" is raised. "__missing__()" must be a\n method; it cannot be an instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See "collections.Counter" for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also:\n\n "types.MappingProxyType" can be used to create a read-only view of\n a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', - 'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "", line 1, in \n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', - 'typesmodules': '\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "". If loaded from a file, they are written as\n"".\n', - 'typesseq': '\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | *n* shallow copies of *s* | (2)(7) |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n+----------------------------+----------------------------------+------------+\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: "len(s) + i" or "len(s) + j" is substituted. But note that\n "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new object.\n This means that building up a sequence by repeated concatenation\n will have a quadratic runtime cost in the total sequence length.\n To get a linear runtime cost, you must switch to one of the\n alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to a "io.StringIO" instance\n and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item sequences\n that follow specific patterns, and hence don\'t support sequence\n concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default the\n last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]", "[a,\n b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', - 'typesseq-mutable': '\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default the\n last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for economy\n of space when reversing a large sequence. To remind users that it\n operates by side effect, it does not return the reversed sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n', - 'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', - 'while': '\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', - 'with': '\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item") is\n evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return value\n from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()" method\n returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n "with" statement.\n', - 'yield': '\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n yield \n yield from \n\nare equivalent to the yield expression statements\n\n (yield )\n (yield from )\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction. Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'} +# Autogenerated by Sphinx on Sun Sep 21 00:02:20 2014 +topics = {'assert': b'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', + 'assignment': b'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', + 'atom-identifiers': b'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', + 'atom-literals': b"\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", + 'attribute-access': b'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', + 'attribute-references': b'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier. This\nproduction can be customized by overriding the "__getattr__()" method.\nIf this attribute is not available, the exception "AttributeError" is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n', + 'augassign': b'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', + 'binary': b'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)". Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both be sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', + 'bitwise': b'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', + 'bltin-code-objects': b'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "__code__" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec()" or "eval()" built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', + 'bltin-ellipsis-object': b'\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the\n"Ellipsis" singleton.\n\nIt is written as "Ellipsis" or "...".\n', + 'bltin-null-object': b'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name). "type(None)()" produces the\nsame singleton.\n\nIt is written as "None".\n', + 'bltin-type-objects': b'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "".\n', + 'booleans': b'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a "__bool__()" method.\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to create a\nnew value, it returns a boolean value regardless of the type of its\nargument (for example, "not \'foo\'" produces "False" rather than "\'\'".)\n', + 'break': b'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', + 'callable-types': b'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', + 'calls': b'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', + 'class': b'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', + 'comparisons': b'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', + 'compound': b'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nA compound statement consists of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of a suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', + 'context-managers': b'\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', + 'continue': b'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', + 'conversions': b'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works as follows:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string as a\nleft argument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', + 'customization': b'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', + 'debugger': b'\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n', + 'del': b'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', + 'dict': b'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', + 'dynamic-features': b'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'else': b'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', + 'exceptions': b'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', + 'execmodel': b'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', + 'exprlists': b'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', + 'floating': b'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', + 'for': b'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', + 'formatstrings': b'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s" | "a"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'", but converts "nan" to "NAN" |\n | | and "inf" to "INF". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to "\'g\'", except with at least one digit past the |\n | | decimal point and a default precision of 12. This is |\n | | intended to match "str()", except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', + 'function': b'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n', + 'global': b'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the two restrictions, but programs should not abuse this\nfreedom, as future implementations may enforce them or silently change\nthe meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call. The same applies to the "eval()"\nand "compile()" functions.\n', + 'id-classes': b'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'identifiers': b'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= \n id_continue ::= \n xid_start ::= \n xid_continue ::= \n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', + 'if': b'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', + 'imaginary': b'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', + 'import': b'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules are\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause, loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is stored in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. The wild\ncard form of import --- "from module import *" --- is only allowed at\nthe module level. Attempting to use it in class or function\ndefinitions will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine dynamically the modules to be loaded.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python where the feature\nbecomes standard.\n\nThe future statement is intended to ease migration to future versions\nof Python that introduce incompatible changes to the language. It\nallows use of the new features on a per-module basis before the\nrelease in which the feature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n', + 'in': b'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', + 'integers': b'\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', + 'lambda': b'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) are used to create\nanonymous functions. The expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def (arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n', + 'lists': b'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', + 'naming': b'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'nonlocal': b'\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope excluding\nglobals. This is important because the default behavior for binding is\nto search the local namespace first. The statement allows\nencapsulated code to rebind variables outside of the local scope\nbesides the global (module) scope.\n\nNames listed in a "nonlocal" statement, unlike those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n The specification for the "nonlocal" statement.\n', + 'numbers': b'\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', + 'numeric-types': b'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n', + 'objects': b'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (so\nyou should always close files explicitly).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', + 'operator-summary': b'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] While comparisons between strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', + 'pass': b'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', + 'power': b'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n', + 'raise': b'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', + 'return': b'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n', + 'sequence-types': b'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n', + 'shifting': b'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', + 'slicings': b'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same "__getitem__()"\nmethod as normal subscription) with a key that is constructed from the\nslice list, as follows. If the slice list contains at least one\ncomma, the key is a tuple containing the conversion of the slice\nitems; otherwise, the conversion of the lone slice item is the key.\nThe conversion of a slice item that is an expression is that\nexpression. The conversion of a proper slice is a slice object (see\nsection *The standard type hierarchy*) whose "start", "stop" and\n"step" attributes are the values of the expressions given as lower\nbound, upper bound and stride, respectively, substituting "None" for\nmissing expressions.\n', + 'specialattrs': b'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n', + 'specialnames': b'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', + 'string-methods': b'\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Codec Base Classes*. For a\n list of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2 3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']``\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or "None". Unmapped\n characters are left untouched. Characters mapped to "None" are\n deleted.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom\n character mapping codec using the "codecs" module (see\n "encodings.cp1251" for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix ("\'+\'"/"\'-\'"\n is handled by inserting the padding *after* the sign character\n rather than before. The original string is returned if *width* is\n less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n', + 'strings': b'\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= \n longstringchar ::= \n stringescapeseq ::= "\\" \n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= \n longbyteschar ::= \n bytesescapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n been added as a synonym of "\'br\'".\n\n New in version 3.3: Support for the unicode legacy literal\n ("u\'value\'") was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n byte with the given value. In a string literal, these escapes\n denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight\n hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n', + 'subscriptions': b'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription\n(lists or dictionaries for example). User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', + 'truth': b'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', + 'try': b'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n', + 'types': b'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode code\n points. All the code points in the range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "char"\n type; instead, every code point in the string is represented\n as a string object with length "1". The built-in function\n "ord()" converts a code point from its string form to an\n integer in the range "0 - 10FFFF"; "chr()" converts an\n integer in the range "0 - 10FFFF" to the corresponding length\n "1" string object. "str.encode()" can be used to convert a\n "str" to "bytes" using the given text encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value | |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | | contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | and "\'return\'" for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', + 'typesfunctions': b'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', + 'typesmapping': b'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()", if the\n key *key* is not present, the "d[key]" operation calls that\n method with the key *key* as argument. The "d[key]" operation\n then returns or raises whatever is returned or raised by the\n "__missing__(key)" call if the key is not present. No other\n operations or methods invoke "__missing__()". If "__missing__()"\n is not defined, "KeyError" is raised. "__missing__()" must be a\n method; it cannot be an instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See "collections.Counter" for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', + 'typesmethods': b'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "", line 1, in \n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', + 'typesmodules': b'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "". If loaded from a file, they are written as\n"".\n', + 'typesseq': b'\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to a "io.StringIO" instance\n and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', + 'typesseq-mutable': b'\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n', + 'unary': b'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', + 'while': b'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', + 'with': b'\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', + 'yield': b'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n yield \n yield from \n\nare equivalent to the yield expression statements\n\n (yield )\n (yield from )\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction. Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'} -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 16:21:39 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 14:21:39 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge=2E?= Message-ID: <20140922142133.65240.65062@mail.hg.python.org> https://hg.python.org/cpython/rev/97f9dcabfb58 changeset: 92522:97f9dcabfb58 branch: 3.4 parent: 92521:20264c0be22a parent: 92515:f674f16a70bc user: Larry Hastings date: Mon Sep 22 15:21:08 2014 +0100 summary: Merge. files: Doc/library/email.parser.rst | 2 +- Doc/library/subprocess.rst | 18 +++- Lib/sre_parse.py | 44 +++++++---- Lib/subprocess.py | 3 +- Lib/test/test_re.py | 29 ++++++- Lib/test/test_subprocess.py | 33 +++++++++ Lib/test/test_threading.py | 85 +++++++++++++++++++++++- Lib/threading.py | 16 ++-- Misc/NEWS | 9 ++ 9 files changed, 199 insertions(+), 40 deletions(-) diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst --- a/Doc/library/email.parser.rst +++ b/Doc/library/email.parser.rst @@ -181,7 +181,7 @@ .. versionchanged:: 3.3 Removed the *strict* argument. Added the *policy* keyword. - .. method:: parse(fp, headeronly=False) + .. method:: parse(fp, headersonly=False) Read all the data from the binary file-like object *fp*, parse the resulting bytes, and return the message object. *fp* must support diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -406,12 +406,18 @@ Read the `Security Considerations`_ section before using ``shell=True``. - *bufsize* will be supplied as the corresponding argument to the :func:`open` - function when creating the stdin/stdout/stderr pipe file objects: :const:`0` - means unbuffered (read and write are one system call and can return short), - :const:`1` means line buffered, any other positive value means use a buffer - of approximately that size. A negative bufsize (the default) means the - system default of io.DEFAULT_BUFFER_SIZE will be used. + *bufsize* will be supplied as the corresponding argument to the + :func:`open` function when creating the stdin/stdout/stderr pipe + file objects: + + - :const:`0` means unbuffered (read and write are one + system call and can return short) + - :const:`1` means line buffered + (only usable if ``universal_newlines=True`` i.e., in a text mode) + - any other positive value means use a buffer of approximately that + size + - negative bufsize (the default) means the system default of + io.DEFAULT_BUFFER_SIZE will be used. .. versionchanged:: 3.3.1 *bufsize* now defaults to -1 to enable buffering by default to match the diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -94,33 +94,45 @@ self.data = data self.width = None def dump(self, level=0): - nl = 1 + nl = True seqtypes = (tuple, list) for op, av in self.data: - print(level*" " + op, end=' '); nl = 0 - if op == "in": + print(level*" " + op, end='') + if op == IN: # member sublanguage - print(); nl = 1 + print() for op, a in av: print((level+1)*" " + op, a) - elif op == "branch": - print(); nl = 1 - i = 0 - for a in av[1]: - if i > 0: + elif op == BRANCH: + print() + for i, a in enumerate(av[1]): + if i: print(level*" " + "or") - a.dump(level+1); nl = 1 - i = i + 1 + a.dump(level+1) + elif op == GROUPREF_EXISTS: + condgroup, item_yes, item_no = av + print('', condgroup) + item_yes.dump(level+1) + if item_no: + print(level*" " + "else") + item_no.dump(level+1) elif isinstance(av, seqtypes): + nl = False for a in av: if isinstance(a, SubPattern): - if not nl: print() - a.dump(level+1); nl = 1 + if not nl: + print() + a.dump(level+1) + nl = True else: - print(a, end=' ') ; nl = 0 + if not nl: + print(' ', end='') + print(a, end='') + nl = False + if not nl: + print() else: - print(av, end=' ') ; nl = 0 - if not nl: print() + print('', av) def __repr__(self): return repr(self.data) def __len__(self): diff --git a/Lib/subprocess.py b/Lib/subprocess.py --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -837,7 +837,8 @@ if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: - self.stdin = io.TextIOWrapper(self.stdin, write_through=True) + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=(bufsize == 1)) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -1203,16 +1203,33 @@ self.assertEqual(m.group(2), "y") def test_debug_flag(self): + pat = r'(\.)(?:[ch]|py)(?(1)$|: )' with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + dump = '''\ +subpattern 1 + literal 46 +subpattern None + branch + in + literal 99 + literal 104 + or + literal 112 + literal 121 +subpattern None + groupref_exists 1 + at at_end + else + literal 58 + literal 32 +''' + self.assertEqual(out.getvalue(), dump) # Debug output is output again even a second time (bypassing # the cache -- issue #20426). with captured_stdout() as out: - re.compile('foo', re.DEBUG) - self.assertEqual(out.getvalue().splitlines(), - ['literal 102 ', 'literal 111 ', 'literal 111 ']) + re.compile(pat, re.DEBUG) + self.assertEqual(out.getvalue(), dump) def test_keyword_parameters(self): # Issue #20283: Accepting the string keyword parameter. diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1008,6 +1008,39 @@ p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) + def _test_bufsize_equal_one(self, line, expected, universal_newlines): + # subprocess may deadlock with bufsize=1, see issue #21332 + with subprocess.Popen([sys.executable, "-c", "import sys;" + "sys.stdout.write(sys.stdin.readline());" + "sys.stdout.flush()"], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=1, + universal_newlines=universal_newlines) as p: + p.stdin.write(line) # expect that it flushes the line in text mode + os.close(p.stdin.fileno()) # close it without flushing the buffer + read_line = p.stdout.readline() + try: + p.stdin.close() + except OSError: + pass + p.stdin = None + self.assertEqual(p.returncode, 0) + self.assertEqual(read_line, expected) + + def test_bufsize_equal_one_text_mode(self): + # line is flushed in text mode with bufsize=1. + # we should get the full line in return + line = "line\n" + self._test_bufsize_equal_one(line, line, universal_newlines=True) + + def test_bufsize_equal_one_binary_mode(self): + # line is not flushed in binary mode with bufsize=1. + # we should get empty response + line = b'line' + os.linesep.encode() # assume ascii-based locale + self._test_bufsize_equal_one(line, b'', universal_newlines=False) + def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py --- a/Lib/test/test_threading.py +++ b/Lib/test/test_threading.py @@ -4,7 +4,7 @@ import test.support from test.support import verbose, strip_python_stderr, import_module, cpython_only -from test.script_helper import assert_python_ok +from test.script_helper import assert_python_ok, assert_python_failure import random import re @@ -15,7 +15,6 @@ import unittest import weakref import os -from test.script_helper import assert_python_ok, assert_python_failure import subprocess from test import lock_tests @@ -962,6 +961,88 @@ self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode()) self.assertEqual(data, expected_output) + def test_print_exception(self): + script = r"""if True: + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_1(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + sys.stderr = None + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + err = err.decode() + self.assertIn("Exception in thread", err) + self.assertIn("Traceback (most recent call last):", err) + self.assertIn("ZeroDivisionError", err) + self.assertNotIn("Unhandled exception", err) + + def test_print_exception_stderr_is_none_2(self): + script = r"""if True: + import sys + import threading + import time + + running = False + def run(): + global running + running = True + while running: + time.sleep(0.01) + 1/0 + sys.stderr = None + t = threading.Thread(target=run) + t.start() + while not running: + time.sleep(0.01) + running = False + t.join() + """ + rc, out, err = assert_python_ok("-c", script) + self.assertEqual(out, b'') + self.assertNotIn("Unhandled exception", err.decode()) + + class TimerTests(BaseTestCase): def setUp(self): diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -248,7 +248,7 @@ def _is_owned(self): # Return True if lock is owned by current_thread. - # This method is called only if __lock doesn't have _is_owned(). + # This method is called only if _lock doesn't have _is_owned(). if self._lock.acquire(0): self._lock.release() return False @@ -749,12 +749,12 @@ """ - __initialized = False + _initialized = False # Need to store a reference to sys.exc_info for printing # out exceptions when a thread tries to use a global var. during interp. # shutdown and thus raises an exception about trying to perform some # operation on/with a NoneType - __exc_info = _sys.exc_info + _exc_info = _sys.exc_info # Keep sys.exc_clear too to clear the exception just before # allowing .join() to return. #XXX __exc_clear = _sys.exc_clear @@ -926,10 +926,10 @@ # shutdown) use self._stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined since the creation of # self. - if _sys: - _sys.stderr.write("Exception in thread %s:\n%s\n" % - (self.name, _format_exc())) - else: + if _sys and _sys.stderr is not None: + print("Exception in thread %s:\n%s" % + (self.name, _format_exc()), file=self._stderr) + elif self._stderr is not None: # Do the best job possible w/o a huge amt. of code to # approximate a traceback (code ideas from # Lib/traceback.py) @@ -957,7 +957,7 @@ # test_threading.test_no_refcycle_through_target when # the exception keeps the target alive past when we # assert that it's dead. - #XXX self.__exc_clear() + #XXX self._exc_clear() pass finally: with _active_limbo_lock: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,15 @@ Library ------- +- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. Removed trailing spaces in debugging output. + +- Issue #22423: Unhandled exception in thread no longer causes unhandled + AttributeError when sys.stderr is None. + +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + What's New in Python 3.4.2rc1? ============================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 16:21:39 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 14:21:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Bump_version_n?= =?utf-8?q?umber_for_3=2E4=2E2rc1_release=2E?= Message-ID: <20140922142131.6936.89297@mail.hg.python.org> https://hg.python.org/cpython/rev/8711a0951384 changeset: 92519:8711a0951384 branch: 3.4 tag: v3.4.2rc1 user: Larry Hastings date: Sun Sep 21 00:09:56 2014 +0100 summary: Bump version number for 3.4.2rc1 release. files: Include/patchlevel.h | 8 ++++---- Lib/distutils/__init__.py | 2 +- Lib/idlelib/idlever.py | 2 +- Misc/NEWS | 2 +- Misc/RPM/python-3.4.spec | 2 +- README | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -18,12 +18,12 @@ /*--start constants--*/ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 4 -#define PY_MICRO_VERSION 1 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL -#define PY_RELEASE_SERIAL 0 +#define PY_MICRO_VERSION 2 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA +#define PY_RELEASE_SERIAL 1 /* Version as a string */ -#define PY_VERSION "3.4.1+" +#define PY_VERSION "3.4.2rc1" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py --- a/Lib/distutils/__init__.py +++ b/Lib/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.4.1" +__version__ = "3.4.2rc1" #--end constants-- diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py --- a/Lib/idlelib/idlever.py +++ b/Lib/idlelib/idlever.py @@ -1,1 +1,1 @@ -IDLE_VERSION = "3.4.1" +IDLE_VERSION = "3.4.2rc1" diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5,7 +5,7 @@ What's New in Python 3.4.2? =========================== -Release date: XXXX-XX-XX +Release date: 2014-10-06 Core and Builtins ----------------- diff --git a/Misc/RPM/python-3.4.spec b/Misc/RPM/python-3.4.spec --- a/Misc/RPM/python-3.4.spec +++ b/Misc/RPM/python-3.4.spec @@ -39,7 +39,7 @@ %define name python #--start constants-- -%define version 3.4.1 +%define version 3.4.2rc1 %define libvers 3.4 #--end constants-- %define release 1pydotorg diff --git a/README b/README --- a/README +++ b/README @@ -1,5 +1,5 @@ -This is Python version 3.4.1 -============================ +This is Python version 3.4.2rc1 +=============================== Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Python Software Foundation. All rights reserved. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 16:21:39 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 14:21:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Post-release_e?= =?utf-8?q?ngineering_work_for_3=2E4=2E2rc1=2E?= Message-ID: <20140922142131.74909.5508@mail.hg.python.org> https://hg.python.org/cpython/rev/20264c0be22a changeset: 92521:20264c0be22a branch: 3.4 user: Larry Hastings date: Mon Sep 22 15:19:30 2014 +0100 summary: Post-release engineering work for 3.4.2rc1. files: Include/patchlevel.h | 2 +- Misc/NEWS | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,7 +23,7 @@ #define PY_RELEASE_SERIAL 1 /* Version as a string */ -#define PY_VERSION "3.4.2rc1" +#define PY_VERSION "3.4.2rc1+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,18 @@ Core and Builtins ----------------- +Library +------- + + +What's New in Python 3.4.2rc1? +============================== + +Release date: 2014-09-22 + +Core and Builtins +----------------- + - Issue #22258: Fix the the internal function set_inheritable() on Illumos. This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 16:21:39 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 14:21:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Added_tag_v3?= =?utf-8?q?=2E4=2E2rc1_for_changeset_8711a0951384?= Message-ID: <20140922142131.97868.86556@mail.hg.python.org> https://hg.python.org/cpython/rev/0349f93c3162 changeset: 92520:0349f93c3162 branch: 3.4 user: Larry Hastings date: Sun Sep 21 00:10:20 2014 +0100 summary: Added tag v3.4.2rc1 for changeset 8711a0951384 files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -136,3 +136,4 @@ 04f714765c13824c3bc2835d7b008908862e083a v3.4.0 c67a19e11a7191baf30f313bf55e2e0b6c6f574e v3.4.1rc1 c0e311e010fcb5bae8d87ca22051cd0845ea0ca0 v3.4.1 +8711a09513848cfc48c689d983495ee64f4668ca v3.4.2rc1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 21:23:31 2014 From: python-checkins at python.org (georg.brandl) Date: Mon, 22 Sep 2014 19:23:31 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogQ2xvc2VzICMyMTQz?= =?utf-8?q?1=3A_make_docs_depend_on_Sphinx_1=2E2_and_fix_pydoc-topics_buil?= =?utf-8?q?der_to?= Message-ID: <20140922192234.10228.20863@mail.hg.python.org> https://hg.python.org/cpython/rev/d71c351a6a0f changeset: 92523:d71c351a6a0f branch: 3.4 user: Georg Brandl date: Mon Sep 22 21:18:24 2014 +0200 summary: Closes #21431: make docs depend on Sphinx 1.2 and fix pydoc-topics builder to return Unicode strings on Python 3. files: Doc/conf.py | 2 +- Doc/tools/sphinxext/pyspecific.py | 32 +++--------------- 2 files changed, 7 insertions(+), 27 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -61,7 +61,7 @@ # By default, highlight as Python 3. highlight_language = 'python3' -needs_sphinx = '1.1' +needs_sphinx = '1.2' # Options for HTML output diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -14,12 +14,10 @@ from docutils import nodes, utils -import sphinx from sphinx.util.nodes import split_explicit_title from sphinx.util.compat import Directive from sphinx.writers.html import HTMLTranslator from sphinx.writers.latex import LaTeXTranslator -from sphinx.locale import versionlabels # monkey-patch reST parser to disable alphabetic and roman enumerated lists from docutils.parsers.rst.states import Body @@ -28,20 +26,6 @@ Body.enum.converters['lowerroman'] = \ Body.enum.converters['upperroman'] = lambda x: None -SPHINX11 = sphinx.__version__[:3] < '1.2' - -if SPHINX11: - # monkey-patch HTML translator to give versionmodified paragraphs a class - def new_visit_versionmodified(self, node): - self.body.append(self.starttag(node, 'p', CLASS=node['type'])) - text = versionlabels[node['type']] % node['version'] - if len(node): - text += ':' - else: - text += '.' - self.body.append('%s ' % text) - HTMLTranslator.visit_versionmodified = new_visit_versionmodified - # monkey-patch HTML and LaTeX translators to keep doctest blocks in the # doctest docs themselves orig_visit_literal_block = HTMLTranslator.visit_literal_block @@ -174,10 +158,9 @@ content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content)) - if not SPHINX11: node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) - elif not SPHINX11: + else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified'])) if len(node): @@ -188,9 +171,6 @@ env.note_versionchange('deprecated', version[0], node, self.lineno) return [node] + messages -# for Sphinx < 1.2 -versionlabels['deprecated-removed'] = DeprecatedRemoved._label - # Support for including Misc/NEWS @@ -289,14 +269,14 @@ document.append(doctree.ids[labelid]) destination = StringOutput(encoding='utf-8') writer.write(document, destination) - self.topics[label] = writer.output.encode('utf-8') + self.topics[label] = writer.output def finish(self): - f = open(path.join(self.outdir, 'topics.py'), 'w') + f = open(path.join(self.outdir, 'topics.py'), 'wb') try: - f.write('# -*- coding: utf-8 -*-\n') - f.write('# Autogenerated by Sphinx on %s\n' % asctime()) - f.write('topics = ' + pformat(self.topics) + '\n') + f.write('# -*- coding: utf-8 -*-\n'.encode('utf-8')) + f.write(('# Autogenerated by Sphinx on %s\n' % asctime()).encode('utf-8')) + f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8')) finally: f.close() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 23:40:32 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 22 Sep 2014 21:40:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_v3=2E4=2E2rc1_release_stuff_to_default?= Message-ID: <20140922214029.112519.63652@mail.hg.python.org> https://hg.python.org/cpython/rev/00a11276ae6d changeset: 92524:00a11276ae6d parent: 92517:901e4e52b20a parent: 92523:d71c351a6a0f user: Ned Deily date: Mon Sep 22 14:38:58 2014 -0700 summary: merge v3.4.2rc1 release stuff to default files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -136,3 +136,4 @@ 04f714765c13824c3bc2835d7b008908862e083a v3.4.0 c67a19e11a7191baf30f313bf55e2e0b6c6f574e v3.4.1rc1 c0e311e010fcb5bae8d87ca22051cd0845ea0ca0 v3.4.1 +8711a09513848cfc48c689d983495ee64f4668ca v3.4.2rc1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 22 23:45:29 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 22 Sep 2014 21:45:29 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2321431=3A_merge_fr?= =?utf-8?q?om_3=2E4?= Message-ID: <20140922214511.44082.16170@mail.hg.python.org> https://hg.python.org/cpython/rev/1248796b7945 changeset: 92525:1248796b7945 user: Ned Deily date: Mon Sep 22 14:44:22 2014 -0700 summary: Issue #21431: merge from 3.4 files: Doc/conf.py | 2 +- Doc/tools/sphinxext/pyspecific.py | 32 +++--------------- 2 files changed, 7 insertions(+), 27 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -61,7 +61,7 @@ # By default, highlight as Python 3. highlight_language = 'python3' -needs_sphinx = '1.1' +needs_sphinx = '1.2' # Options for HTML output diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -14,12 +14,10 @@ from docutils import nodes, utils -import sphinx from sphinx.util.nodes import split_explicit_title from sphinx.util.compat import Directive from sphinx.writers.html import HTMLTranslator from sphinx.writers.latex import LaTeXTranslator -from sphinx.locale import versionlabels # monkey-patch reST parser to disable alphabetic and roman enumerated lists from docutils.parsers.rst.states import Body @@ -28,20 +26,6 @@ Body.enum.converters['lowerroman'] = \ Body.enum.converters['upperroman'] = lambda x: None -SPHINX11 = sphinx.__version__[:3] < '1.2' - -if SPHINX11: - # monkey-patch HTML translator to give versionmodified paragraphs a class - def new_visit_versionmodified(self, node): - self.body.append(self.starttag(node, 'p', CLASS=node['type'])) - text = versionlabels[node['type']] % node['version'] - if len(node): - text += ':' - else: - text += '.' - self.body.append('%s ' % text) - HTMLTranslator.visit_versionmodified = new_visit_versionmodified - # monkey-patch HTML and LaTeX translators to keep doctest blocks in the # doctest docs themselves orig_visit_literal_block = HTMLTranslator.visit_literal_block @@ -174,10 +158,9 @@ content.line = node[0].line content += node[0].children node[0].replace_self(nodes.paragraph('', '', content)) - if not SPHINX11: node[0].insert(0, nodes.inline('', '%s: ' % text, classes=['versionmodified'])) - elif not SPHINX11: + else: para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified'])) if len(node): @@ -188,9 +171,6 @@ env.note_versionchange('deprecated', version[0], node, self.lineno) return [node] + messages -# for Sphinx < 1.2 -versionlabels['deprecated-removed'] = DeprecatedRemoved._label - # Support for including Misc/NEWS @@ -289,14 +269,14 @@ document.append(doctree.ids[labelid]) destination = StringOutput(encoding='utf-8') writer.write(document, destination) - self.topics[label] = writer.output.encode('utf-8') + self.topics[label] = writer.output def finish(self): - f = open(path.join(self.outdir, 'topics.py'), 'w') + f = open(path.join(self.outdir, 'topics.py'), 'wb') try: - f.write('# -*- coding: utf-8 -*-\n') - f.write('# Autogenerated by Sphinx on %s\n' % asctime()) - f.write('topics = ' + pformat(self.topics) + '\n') + f.write('# -*- coding: utf-8 -*-\n'.encode('utf-8')) + f.write(('# Autogenerated by Sphinx on %s\n' % asctime()).encode('utf-8')) + f.write(('topics = ' + pformat(self.topics) + '\n').encode('utf-8')) finally: f.close() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 00:36:44 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 22 Sep 2014 22:36:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fix_formatting?= =?utf-8?q?_typo_in_Misc/NEWS=2E?= Message-ID: <20140922223641.50582.93650@mail.hg.python.org> https://hg.python.org/cpython/rev/fc06a261cd2e changeset: 92526:fc06a261cd2e branch: 3.4 parent: 92523:d71c351a6a0f user: Ned Deily date: Mon Sep 22 15:34:19 2014 -0700 summary: Fix formatting typo in Misc/NEWS. files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -104,7 +104,7 @@ selection or control(command) '-' or '+' or control-mousewheel. Original patch by Lita Cho. -_ Issue #21597: The separator between the turtledemo text pane and the drawing +- Issue #21597: The separator between the turtledemo text pane and the drawing canvas can now be grabbed and dragged with a mouse. The code text pane can be widened to easily view or copy the full width of the text. The canvas can be widened on small screens. Original patches by Jan Kanis and Lita Cho. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 00:36:46 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 22 Sep 2014 22:36:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fix_formatting_typo_in_Misc/NEWS=2E?= Message-ID: <20140922223642.50566.24116@mail.hg.python.org> https://hg.python.org/cpython/rev/f64e90680acd changeset: 92527:f64e90680acd parent: 92525:1248796b7945 parent: 92526:fc06a261cd2e user: Ned Deily date: Mon Sep 22 15:36:12 2014 -0700 summary: Fix formatting typo in Misc/NEWS. files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -227,7 +227,7 @@ selection or control(command) '-' or '+' or control-mousewheel. Original patch by Lita Cho. -_ Issue #21597: The separator between the turtledemo text pane and the drawing +- Issue #21597: The separator between the turtledemo text pane and the drawing canvas can now be grabbed and dragged with a mouse. The code text pane can be widened to easily view or copy the full width of the text. The canvas can be widened on small screens. Original patches by Jan Kanis and Lita Cho. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 00:40:32 2014 From: python-checkins at python.org (ned.deily) Date: Mon, 22 Sep 2014 22:40:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Fix_formatting?= =?utf-8?q?_typo_in_Misc/NEWS=2E?= Message-ID: <20140922224030.112656.59478@mail.hg.python.org> https://hg.python.org/cpython/rev/66e6807442c9 changeset: 92528:66e6807442c9 branch: 2.7 parent: 92512:d4630b1c8792 user: Ned Deily date: Mon Sep 22 15:39:43 2014 -0700 summary: Fix formatting typo in Misc/NEWS. files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -52,7 +52,7 @@ Initialization of variables and gui setup should be done in main(), which is called each time a demo is run, but not on import. -_ Issue #21597: The separator between the turtledemo text pane and the drawing +- Issue #21597: The separator between the turtledemo text pane and the drawing canvas can now be grabbed and dragged with a mouse. The code text pane can be widened to easily view or copy the full width of the text. The canvas can be widened on small screens. Original patches by Jan Kanis and Lita Cho. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 00:51:58 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 22:51:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fix_pydoc_topi?= =?utf-8?q?cs=2E__I=27ll_regenerate_this_again_for_3=2E4=2E2_final=2C?= Message-ID: <20140922225158.50578.80710@mail.hg.python.org> https://hg.python.org/cpython/rev/81f2d5071da3 changeset: 92529:81f2d5071da3 branch: 3.4 parent: 92526:fc06a261cd2e user: Larry Hastings date: Mon Sep 22 23:51:20 2014 +0100 summary: Fix pydoc topics. I'll regenerate this again for 3.4.2 final, but fixing it now means the test suite can have zero errors during the next two weeks. files: Lib/pydoc_data/topics.py | 12676 ++++++++++++++++++++++++- 1 files changed, 12598 insertions(+), 78 deletions(-) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,79 +1,12599 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sun Sep 21 00:02:20 2014 -topics = {'assert': b'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', - 'assignment': b'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', - 'atom-identifiers': b'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', - 'atom-literals': b"\nLiterals\n********\n\nPython supports string and bytes literals and various numeric\nliterals:\n\n literal ::= stringliteral | bytesliteral\n | integer | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\nbytes, integer, floating point number, complex number) with the given\nvalue. The value may be approximated in the case of floating point\nand imaginary (complex) literals. See section *Literals* for details.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n", - 'attribute-access': b'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n--------------------------\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n', - 'attribute-references': b'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, which most objects do. This object is then\nasked to produce the attribute whose name is the identifier. This\nproduction can be customized by overriding the "__getattr__()" method.\nIf this attribute is not available, the exception "AttributeError" is\nraised. Otherwise, the type and value of the object produced is\ndetermined by the object. Multiple evaluations of the same attribute\nreference may yield different objects.\n', - 'augassign': b'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', - 'binary': b'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger and the other must be a sequence. In the former case, the\nnumbers are converted to a common type and then multiplied together.\nIn the latter case, sequence repetition is performed; a negative\nrepetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Division of integers yields a float, while\nfloor division of integers results in an integer; the result is that\nof mathematical division with the \'floor\' function applied to the\nresult. Division by zero raises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [1].\n\nThe floor division and modulo operators are connected by the following\nidentity: "x == (x//y)*y + (x%y)". Floor division and modulo are also\nconnected with the built-in function "divmod()": "divmod(x, y) ==\n(x//y, x%y)". [2].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string objects to perform old-style\nstring formatting (also known as interpolation). The syntax for\nstring formatting is described in the Python Library Reference,\nsection *printf-style String Formatting*.\n\nThe floor division operator, the modulo operator, and the "divmod()"\nfunction are not defined for complex numbers. Instead, convert to a\nfloating point number using the "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both be sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n', - 'bitwise': b'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe integers.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be integers.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be integers.\n', - 'bltin-code-objects': b'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "__code__" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec()" or "eval()" built-in functions.\n\nSee *The standard type hierarchy* for more information.\n', - 'bltin-ellipsis-object': b'\nThe Ellipsis Object\n*******************\n\nThis object is commonly used by slicing (see *Slicings*). It supports\nno special operations. There is exactly one ellipsis object, named\n"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the\n"Ellipsis" singleton.\n\nIt is written as "Ellipsis" or "...".\n', - 'bltin-null-object': b'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name). "type(None)()" produces the\nsame singleton.\n\nIt is written as "None".\n', - 'bltin-type-objects': b'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "".\n', - 'booleans': b'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. User-defined objects can customize their truth value by\nproviding a "__bool__()" method.\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to create a\nnew value, it returns a boolean value regardless of the type of its\nargument (for example, "not \'foo\'" produces "False" rather than "\'\'".)\n', - 'break': b'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n', - 'callable-types': b'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', - 'calls': b'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', - 'class': b'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'comparisons': b'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', - 'compound': b'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nA compound statement consists of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of a suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'context-managers': b'\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', - 'continue': b'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', - 'conversions': b'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works as follows:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string as a\nleft argument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', - 'customization': b'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', - 'debugger': b'\n"pdb" --- The Python Debugger\n*****************************\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n', - 'del': b'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', - 'dict': b'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': b'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', - 'else': b'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', - 'exceptions': b'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', - 'execmodel': b'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', - 'exprlists': b'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', - 'floating': b'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', - 'for': b'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', - 'formatstrings': b'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s" | "a"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nThree conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, "\'!r\'" which calls "repr()" and "\'!a\'" which\ncalls "ascii()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use "{" and "}" as *fill*\nchar while using the "str.format()" method; this limitation however\ndoesn\'t affect the "format()" function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective "\'0b\'", "\'0o\'", or\n"\'0x\'" to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for "\'g\'" and "\'G\'"\nconversions, trailing zeros are not removed from the result.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 3.1: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero ("\'0\'") character enables sign-\naware zero-padding for numeric types. This is equivalent to a *fill*\ncharacter of "\'0\'" with an *alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand None). When doing so, "float()" is used to convert the integer to\na floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'", but converts "nan" to "NAN" |\n | | and "inf" to "INF". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to "\'g\'", except with at least one digit past the |\n | | decimal point and a default precision of 12. This is |\n | | intended to match "str()", except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12): #doctest: +NORMALIZE_WHITESPACE\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', - 'function': b'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n', - 'global': b'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the two restrictions, but programs should not abuse this\nfreedom, as future implementations may enforce them or silently change\nthe meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in a string\nor code object supplied to the built-in "exec()" function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by "global" statements in the\ncode containing the function call. The same applies to the "eval()"\nand "compile()" functions.\n', - 'id-classes': b'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', - 'identifiers': b'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters "A" through "Z", the underscore "_" and, except for the first\ncharacter, the digits "0" through "9".\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n"unicodedata" module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= \n id_continue ::= \n xid_start ::= \n xid_continue ::= \n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "builtins" module. When not\n in interactive mode, "_" has no special meaning and is not defined.\n See section *The import statement*.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', - 'if': b'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', - 'imaginary': b'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', - 'import': b'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules are\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause, loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is stored in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. The wild\ncard form of import --- "from module import *" --- is only allowed at\nthe module level. Attempting to use it in class or function\ndefinitions will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine dynamically the modules to be loaded.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python where the feature\nbecomes standard.\n\nThe future statement is intended to ease migration to future versions\nof Python that introduce incompatible changes to the language. It\nallows use of the new features on a per-module basis before the\nrelease in which the feature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n', - 'in': b'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', - 'integers': b'\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', - 'lambda': b'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) are used to create\nanonymous functions. The expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def (arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n', - 'lists': b'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', - 'naming': b'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\nglobal statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', - 'nonlocal': b'\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope excluding\nglobals. This is important because the default behavior for binding is\nto search the local namespace first. The statement allows\nencapsulated code to rebind variables outside of the local scope\nbesides the global (module) scope.\n\nNames listed in a "nonlocal" statement, unlike those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n The specification for the "nonlocal" statement.\n', - 'numbers': b'\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', - 'numeric-types': b'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n', - 'objects': b'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (so\nyou should always close files explicitly).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', - 'operator-summary': b'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] While comparisons between strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', - 'pass': b'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', - 'power': b'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n', - 'raise': b'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler: the previous exception is then attached as the\nnew exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', - 'return': b'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement indicates that the\ngenerator is done and will cause "StopIteration" to be raised. The\nreturned value (if any) is used as an argument to construct\n"StopIteration" and becomes the "StopIteration.value" attribute.\n', - 'sequence-types': b'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n', - 'shifting': b'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', - 'slicings': b'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary must evaluate\nto a mapping object, and it is indexed (using the same "__getitem__()"\nmethod as normal subscription) with a key that is constructed from the\nslice list, as follows. If the slice list contains at least one\ncomma, the key is a tuple containing the conversion of the slice\nitems; otherwise, the conversion of the lone slice item is the key.\nThe conversion of a slice item that is an expression is that\nexpression. The conversion of a proper slice is a slice object (see\nsection *The standard type hierarchy*) whose "start", "stop" and\n"step" attributes are the values of the expressions given as lower\nbound, upper bound and stride, respectively, substituting "None" for\nmissing expressions.\n', - 'specialattrs': b'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n', - 'specialnames': b'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])". As a special constraint on\n constructors, no value may be returned; doing so will cause a\n "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.last_traceback". Circular references which are garbage are\n detected and cleaned up when the cyclic garbage collector is\n enabled (it\'s on by default). Refer to the documentation for the\n "gc" module for more information about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n class, *__slots__* reserves space for the declared variables and\n prevents the automatic creation of *__dict__* and *__weakref__* for\n each instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "keys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', - 'string-methods': b'\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xc3\x9f\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xc3\x9f\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Codec Base Classes*. For a\n list of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2 3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']``\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or "None". Unmapped\n characters are left untouched. Characters mapped to "None" are\n deleted.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom\n character mapping codec using the "codecs" module (see\n "encodings.cp1251" for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix ("\'+\'"/"\'-\'"\n is handled by inserting the padding *after* the sign character\n rather than before. The original string is returned if *width* is\n less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n', - 'strings': b'\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= \n longstringchar ::= \n stringescapeseq ::= "\\" \n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= \n longbyteschar ::= \n bytesescapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix unicode strings with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\n New in version 3.3: The "\'rb\'" prefix of raw bytes literals has\n been added as a synonym of "\'br\'".\n\n New in version 3.3: Support for the unicode legacy literal\n ("u\'value\'") was reintroduced to simplify the maintenance of dual\n Python 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n byte with the given value. In a string literal, these escapes\n denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight\n hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw string, string quotes can be escaped with a backslash,\nbut the backslash remains in the string; for example, "r"\\""" is a\nvalid string literal consisting of two characters: a backslash and a\ndouble quote; "r"\\"" is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n', - 'subscriptions': b'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription\n(lists or dictionaries for example). User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', - 'truth': b'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', - 'try': b'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n', - 'types': b'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode code\n points. All the code points in the range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "char"\n type; instead, every code point in the string is represented\n as a string object with length "1". The built-in function\n "ord()" converts a code point from its string form to an\n integer in the range "0 - 10FFFF"; "chr()" converts an\n integer in the range "0 - 10FFFF" to the corresponding length\n "1" string object. "str.encode()" can be used to convert a\n "str" to "bytes" using the given text encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value | |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | | contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | and "\'return\'" for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', - 'typesfunctions': b'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', - 'typesmapping': b'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()", if the\n key *key* is not present, the "d[key]" operation calls that\n method with the key *key* as argument. The "d[key]" operation\n then returns or raises whatever is returned or raised by the\n "__missing__(key)" call if the key is not present. No other\n operations or methods invoke "__missing__()". If "__missing__()"\n is not defined, "KeyError" is raised. "__missing__()" must be a\n method; it cannot be an instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See "collections.Counter" for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', - 'typesmethods': b'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "", line 1, in \n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', - 'typesmodules': b'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "". If loaded from a file, they are written as\n"".\n', - 'typesseq': b'\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type, *n*, *i*, *j* and *k* are\nintegers and *x* is an arbitrary object that meets any type and value\nrestrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to a "io.StringIO" instance\n and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', - 'typesseq-mutable': b'\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n', - 'unary': b'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', - 'while': b'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', - 'with': b'\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', - 'yield': b'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nA "yield" statement is semantically equivalent to a *yield\nexpression*. The yield statement can be used to omit the parentheses\nthat would otherwise be required in the equivalent yield expression\nstatement. For example, the yield statements\n\n yield \n yield from \n\nare equivalent to the yield expression statements\n\n (yield )\n (yield from )\n\nYield expressions and statements are only used when defining a\n*generator* function, and are only used in the body of the generator\nfunction. Using yield in a function definition is sufficient to cause\nthat definition to create a generator function instead of a normal\nfunction.\n\nFor full details of "yield" semantics, refer to the *Yield\nexpressions* section.\n'} +# Autogenerated by Sphinx on Mon Sep 22 23:49:46 2014 +topics = {'assert': '\n' + 'The "assert" statement\n' + '**********************\n' + '\n' + 'Assert statements are a convenient way to insert debugging ' + 'assertions\n' + 'into a program:\n' + '\n' + ' assert_stmt ::= "assert" expression ["," expression]\n' + '\n' + 'The simple form, "assert expression", is equivalent to\n' + '\n' + ' if __debug__:\n' + ' if not expression: raise AssertionError\n' + '\n' + 'The extended form, "assert expression1, expression2", is ' + 'equivalent to\n' + '\n' + ' if __debug__:\n' + ' if not expression1: raise AssertionError(expression2)\n' + '\n' + 'These equivalences assume that "__debug__" and "AssertionError" ' + 'refer\n' + 'to the built-in variables with those names. In the current\n' + 'implementation, the built-in variable "__debug__" is "True" ' + 'under\n' + 'normal circumstances, "False" when optimization is requested ' + '(command\n' + 'line option -O). The current code generator emits no code for ' + 'an\n' + 'assert statement when optimization is requested at compile ' + 'time. Note\n' + 'that it is unnecessary to include the source code for the ' + 'expression\n' + 'that failed in the error message; it will be displayed as part ' + 'of the\n' + 'stack trace.\n' + '\n' + 'Assignments to "__debug__" are illegal. The value for the ' + 'built-in\n' + 'variable is determined when the interpreter starts.\n', + 'assignment': '\n' + 'Assignment statements\n' + '*********************\n' + '\n' + 'Assignment statements are used to (re)bind names to values ' + 'and to\n' + 'modify attributes or items of mutable objects:\n' + '\n' + ' assignment_stmt ::= (target_list "=")+ (expression_list | ' + 'yield_expression)\n' + ' target_list ::= target ("," target)* [","]\n' + ' target ::= identifier\n' + ' | "(" target_list ")"\n' + ' | "[" target_list "]"\n' + ' | attributeref\n' + ' | subscription\n' + ' | slicing\n' + ' | "*" target\n' + '\n' + '(See section *Primaries* for the syntax definitions for\n' + '*attributeref*, *subscription*, and *slicing*.)\n' + '\n' + 'An assignment statement evaluates the expression list ' + '(remember that\n' + 'this can be a single expression or a comma-separated list, ' + 'the latter\n' + 'yielding a tuple) and assigns the single resulting object to ' + 'each of\n' + 'the target lists, from left to right.\n' + '\n' + 'Assignment is defined recursively depending on the form of ' + 'the target\n' + '(list). When a target is part of a mutable object (an ' + 'attribute\n' + 'reference, subscription or slicing), the mutable object ' + 'must\n' + 'ultimately perform the assignment and decide about its ' + 'validity, and\n' + 'may raise an exception if the assignment is unacceptable. ' + 'The rules\n' + 'observed by various types and the exceptions raised are ' + 'given with the\n' + 'definition of the object types (see section *The standard ' + 'type\n' + 'hierarchy*).\n' + '\n' + 'Assignment of an object to a target list, optionally ' + 'enclosed in\n' + 'parentheses or square brackets, is recursively defined as ' + 'follows.\n' + '\n' + '* If the target list is a single target: The object is ' + 'assigned to\n' + ' that target.\n' + '\n' + '* If the target list is a comma-separated list of targets: ' + 'The\n' + ' object must be an iterable with the same number of items ' + 'as there\n' + ' are targets in the target list, and the items are ' + 'assigned, from\n' + ' left to right, to the corresponding targets.\n' + '\n' + ' * If the target list contains one target prefixed with an\n' + ' asterisk, called a "starred" target: The object must be ' + 'a sequence\n' + ' with at least as many items as there are targets in the ' + 'target\n' + ' list, minus one. The first items of the sequence are ' + 'assigned,\n' + ' from left to right, to the targets before the starred ' + 'target. The\n' + ' final items of the sequence are assigned to the targets ' + 'after the\n' + ' starred target. A list of the remaining items in the ' + 'sequence is\n' + ' then assigned to the starred target (the list can be ' + 'empty).\n' + '\n' + ' * Else: The object must be a sequence with the same number ' + 'of\n' + ' items as there are targets in the target list, and the ' + 'items are\n' + ' assigned, from left to right, to the corresponding ' + 'targets.\n' + '\n' + 'Assignment of an object to a single target is recursively ' + 'defined as\n' + 'follows.\n' + '\n' + '* If the target is an identifier (name):\n' + '\n' + ' * If the name does not occur in a "global" or "nonlocal" ' + 'statement\n' + ' in the current code block: the name is bound to the ' + 'object in the\n' + ' current local namespace.\n' + '\n' + ' * Otherwise: the name is bound to the object in the ' + 'global\n' + ' namespace or the outer namespace determined by ' + '"nonlocal",\n' + ' respectively.\n' + '\n' + ' The name is rebound if it was already bound. This may ' + 'cause the\n' + ' reference count for the object previously bound to the ' + 'name to reach\n' + ' zero, causing the object to be deallocated and its ' + 'destructor (if it\n' + ' has one) to be called.\n' + '\n' + '* If the target is a target list enclosed in parentheses or ' + 'in\n' + ' square brackets: The object must be an iterable with the ' + 'same number\n' + ' of items as there are targets in the target list, and its ' + 'items are\n' + ' assigned, from left to right, to the corresponding ' + 'targets.\n' + '\n' + '* If the target is an attribute reference: The primary ' + 'expression in\n' + ' the reference is evaluated. It should yield an object ' + 'with\n' + ' assignable attributes; if this is not the case, ' + '"TypeError" is\n' + ' raised. That object is then asked to assign the assigned ' + 'object to\n' + ' the given attribute; if it cannot perform the assignment, ' + 'it raises\n' + ' an exception (usually but not necessarily ' + '"AttributeError").\n' + '\n' + ' Note: If the object is a class instance and the attribute ' + 'reference\n' + ' occurs on both sides of the assignment operator, the RHS ' + 'expression,\n' + ' "a.x" can access either an instance attribute or (if no ' + 'instance\n' + ' attribute exists) a class attribute. The LHS target "a.x" ' + 'is always\n' + ' set as an instance attribute, creating it if necessary. ' + 'Thus, the\n' + ' two occurrences of "a.x" do not necessarily refer to the ' + 'same\n' + ' attribute: if the RHS expression refers to a class ' + 'attribute, the\n' + ' LHS creates a new instance attribute as the target of the\n' + ' assignment:\n' + '\n' + ' class Cls:\n' + ' x = 3 # class variable\n' + ' inst = Cls()\n' + ' inst.x = inst.x + 1 # writes inst.x as 4 leaving ' + 'Cls.x as 3\n' + '\n' + ' This description does not necessarily apply to descriptor\n' + ' attributes, such as properties created with "property()".\n' + '\n' + '* If the target is a subscription: The primary expression in ' + 'the\n' + ' reference is evaluated. It should yield either a mutable ' + 'sequence\n' + ' object (such as a list) or a mapping object (such as a ' + 'dictionary).\n' + ' Next, the subscript expression is evaluated.\n' + '\n' + ' If the primary is a mutable sequence object (such as a ' + 'list), the\n' + ' subscript must yield an integer. If it is negative, the ' + "sequence's\n" + ' length is added to it. The resulting value must be a ' + 'nonnegative\n' + " integer less than the sequence's length, and the sequence " + 'is asked\n' + ' to assign the assigned object to its item with that ' + 'index. If the\n' + ' index is out of range, "IndexError" is raised (assignment ' + 'to a\n' + ' subscripted sequence cannot add new items to a list).\n' + '\n' + ' If the primary is a mapping object (such as a dictionary), ' + 'the\n' + " subscript must have a type compatible with the mapping's " + 'key type,\n' + ' and the mapping is then asked to create a key/datum pair ' + 'which maps\n' + ' the subscript to the assigned object. This can either ' + 'replace an\n' + ' existing key/value pair with the same key value, or insert ' + 'a new\n' + ' key/value pair (if no key with the same value existed).\n' + '\n' + ' For user-defined objects, the "__setitem__()" method is ' + 'called with\n' + ' appropriate arguments.\n' + '\n' + '* If the target is a slicing: The primary expression in the\n' + ' reference is evaluated. It should yield a mutable ' + 'sequence object\n' + ' (such as a list). The assigned object should be a ' + 'sequence object\n' + ' of the same type. Next, the lower and upper bound ' + 'expressions are\n' + ' evaluated, insofar they are present; defaults are zero and ' + 'the\n' + " sequence's length. The bounds should evaluate to " + 'integers. If\n' + " either bound is negative, the sequence's length is added " + 'to it. The\n' + ' resulting bounds are clipped to lie between zero and the ' + "sequence's\n" + ' length, inclusive. Finally, the sequence object is asked ' + 'to replace\n' + ' the slice with the items of the assigned sequence. The ' + 'length of\n' + ' the slice may be different from the length of the assigned ' + 'sequence,\n' + ' thus changing the length of the target sequence, if the ' + 'target\n' + ' sequence allows it.\n' + '\n' + '**CPython implementation detail:** In the current ' + 'implementation, the\n' + 'syntax for targets is taken to be the same as for ' + 'expressions, and\n' + 'invalid syntax is rejected during the code generation phase, ' + 'causing\n' + 'less detailed error messages.\n' + '\n' + 'Although the definition of assignment implies that overlaps ' + 'between\n' + 'the left-hand side and the right-hand side are ' + "'simultanenous' (for\n" + 'example "a, b = b, a" swaps two variables), overlaps ' + '*within* the\n' + 'collection of assigned-to variables occur left-to-right, ' + 'sometimes\n' + 'resulting in confusion. For instance, the following program ' + 'prints\n' + '"[0, 2]":\n' + '\n' + ' x = [0, 1]\n' + ' i = 0\n' + ' i, x[i] = 1, 2 # i is updated, then x[i] is ' + 'updated\n' + ' print(x)\n' + '\n' + 'See also: **PEP 3132** - Extended Iterable Unpacking\n' + '\n' + ' The specification for the "*target" feature.\n' + '\n' + '\n' + 'Augmented assignment statements\n' + '===============================\n' + '\n' + 'Augmented assignment is the combination, in a single ' + 'statement, of a\n' + 'binary operation and an assignment statement:\n' + '\n' + ' augmented_assignment_stmt ::= augtarget augop ' + '(expression_list | yield_expression)\n' + ' augtarget ::= identifier | attributeref | ' + 'subscription | slicing\n' + ' augop ::= "+=" | "-=" | "*=" | "/=" | ' + '"//=" | "%=" | "**="\n' + ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' + '\n' + '(See section *Primaries* for the syntax definitions of the ' + 'last three\n' + 'symbols.)\n' + '\n' + 'An augmented assignment evaluates the target (which, unlike ' + 'normal\n' + 'assignment statements, cannot be an unpacking) and the ' + 'expression\n' + 'list, performs the binary operation specific to the type of ' + 'assignment\n' + 'on the two operands, and assigns the result to the original ' + 'target.\n' + 'The target is only evaluated once.\n' + '\n' + 'An augmented assignment expression like "x += 1" can be ' + 'rewritten as\n' + '"x = x + 1" to achieve a similar, but not exactly equal ' + 'effect. In the\n' + 'augmented version, "x" is only evaluated once. Also, when ' + 'possible,\n' + 'the actual operation is performed *in-place*, meaning that ' + 'rather than\n' + 'creating a new object and assigning that to the target, the ' + 'old object\n' + 'is modified instead.\n' + '\n' + 'Unlike normal assignments, augmented assignments evaluate ' + 'the left-\n' + 'hand side *before* evaluating the right-hand side. For ' + 'example, "a[i]\n' + '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' + 'performs\n' + 'the addition, and lastly, it writes the result back to ' + '"a[i]".\n' + '\n' + 'With the exception of assigning to tuples and multiple ' + 'targets in a\n' + 'single statement, the assignment done by augmented ' + 'assignment\n' + 'statements is handled the same way as normal assignments. ' + 'Similarly,\n' + 'with the exception of the possible *in-place* behavior, the ' + 'binary\n' + 'operation performed by augmented assignment is the same as ' + 'the normal\n' + 'binary operations.\n' + '\n' + 'For targets which are attribute references, the same *caveat ' + 'about\n' + 'class and instance attributes* applies as for regular ' + 'assignments.\n', + 'atom-identifiers': '\n' + 'Identifiers (Names)\n' + '*******************\n' + '\n' + 'An identifier occurring as an atom is a name. See ' + 'section\n' + '*Identifiers and keywords* for lexical definition and ' + 'section *Naming\n' + 'and binding* for documentation of naming and binding.\n' + '\n' + 'When the name is bound to an object, evaluation of the ' + 'atom yields\n' + 'that object. When a name is not bound, an attempt to ' + 'evaluate it\n' + 'raises a "NameError" exception.\n' + '\n' + '**Private name mangling:** When an identifier that ' + 'textually occurs in\n' + 'a class definition begins with two or more underscore ' + 'characters and\n' + 'does not end in two or more underscores, it is ' + 'considered a *private\n' + 'name* of that class. Private names are transformed to ' + 'a longer form\n' + 'before code is generated for them. The transformation ' + 'inserts the\n' + 'class name, with leading underscores removed and a ' + 'single underscore\n' + 'inserted, in front of the name. For example, the ' + 'identifier "__spam"\n' + 'occurring in a class named "Ham" will be transformed ' + 'to "_Ham__spam".\n' + 'This transformation is independent of the syntactical ' + 'context in which\n' + 'the identifier is used. If the transformed name is ' + 'extremely long\n' + '(longer than 255 characters), implementation defined ' + 'truncation may\n' + 'happen. If the class name consists only of ' + 'underscores, no\n' + 'transformation is done.\n', + 'atom-literals': '\n' + 'Literals\n' + '********\n' + '\n' + 'Python supports string and bytes literals and various ' + 'numeric\n' + 'literals:\n' + '\n' + ' literal ::= stringliteral | bytesliteral\n' + ' | integer | floatnumber | imagnumber\n' + '\n' + 'Evaluation of a literal yields an object of the given ' + 'type (string,\n' + 'bytes, integer, floating point number, complex number) ' + 'with the given\n' + 'value. The value may be approximated in the case of ' + 'floating point\n' + 'and imaginary (complex) literals. See section *Literals* ' + 'for details.\n' + '\n' + 'All literals correspond to immutable data types, and ' + 'hence the\n' + "object's identity is less important than its value. " + 'Multiple\n' + 'evaluations of literals with the same value (either the ' + 'same\n' + 'occurrence in the program text or a different occurrence) ' + 'may obtain\n' + 'the same object or a different object with the same ' + 'value.\n', + 'attribute-access': '\n' + 'Customizing attribute access\n' + '****************************\n' + '\n' + 'The following methods can be defined to customize the ' + 'meaning of\n' + 'attribute access (use of, assignment to, or deletion ' + 'of "x.name") for\n' + 'class instances.\n' + '\n' + 'object.__getattr__(self, name)\n' + '\n' + ' Called when an attribute lookup has not found the ' + 'attribute in the\n' + ' usual places (i.e. it is not an instance attribute ' + 'nor is it found\n' + ' in the class tree for "self"). "name" is the ' + 'attribute name. This\n' + ' method should return the (computed) attribute value ' + 'or raise an\n' + ' "AttributeError" exception.\n' + '\n' + ' Note that if the attribute is found through the ' + 'normal mechanism,\n' + ' "__getattr__()" is not called. (This is an ' + 'intentional asymmetry\n' + ' between "__getattr__()" and "__setattr__()".) This ' + 'is done both for\n' + ' efficiency reasons and because otherwise ' + '"__getattr__()" would have\n' + ' no way to access other attributes of the instance. ' + 'Note that at\n' + ' least for instance variables, you can fake total ' + 'control by not\n' + ' inserting any values in the instance attribute ' + 'dictionary (but\n' + ' instead inserting them in another object). See ' + 'the\n' + ' "__getattribute__()" method below for a way to ' + 'actually get total\n' + ' control over attribute access.\n' + '\n' + 'object.__getattribute__(self, name)\n' + '\n' + ' Called unconditionally to implement attribute ' + 'accesses for\n' + ' instances of the class. If the class also defines ' + '"__getattr__()",\n' + ' the latter will not be called unless ' + '"__getattribute__()" either\n' + ' calls it explicitly or raises an "AttributeError". ' + 'This method\n' + ' should return the (computed) attribute value or ' + 'raise an\n' + ' "AttributeError" exception. In order to avoid ' + 'infinite recursion in\n' + ' this method, its implementation should always call ' + 'the base class\n' + ' method with the same name to access any attributes ' + 'it needs, for\n' + ' example, "object.__getattribute__(self, name)".\n' + '\n' + ' Note: This method may still be bypassed when ' + 'looking up special\n' + ' methods as the result of implicit invocation via ' + 'language syntax\n' + ' or built-in functions. See *Special method ' + 'lookup*.\n' + '\n' + 'object.__setattr__(self, name, value)\n' + '\n' + ' Called when an attribute assignment is attempted. ' + 'This is called\n' + ' instead of the normal mechanism (i.e. store the ' + 'value in the\n' + ' instance dictionary). *name* is the attribute name, ' + '*value* is the\n' + ' value to be assigned to it.\n' + '\n' + ' If "__setattr__()" wants to assign to an instance ' + 'attribute, it\n' + ' should call the base class method with the same ' + 'name, for example,\n' + ' "object.__setattr__(self, name, value)".\n' + '\n' + 'object.__delattr__(self, name)\n' + '\n' + ' Like "__setattr__()" but for attribute deletion ' + 'instead of\n' + ' assignment. This should only be implemented if ' + '"del obj.name" is\n' + ' meaningful for the object.\n' + '\n' + 'object.__dir__(self)\n' + '\n' + ' Called when "dir()" is called on the object. A ' + 'sequence must be\n' + ' returned. "dir()" converts the returned sequence to ' + 'a list and\n' + ' sorts it.\n' + '\n' + '\n' + 'Implementing Descriptors\n' + '========================\n' + '\n' + 'The following methods only apply when an instance of ' + 'the class\n' + 'containing the method (a so-called *descriptor* class) ' + 'appears in an\n' + '*owner* class (the descriptor must be in either the ' + "owner's class\n" + 'dictionary or in the class dictionary for one of its ' + 'parents). In the\n' + 'examples below, "the attribute" refers to the ' + 'attribute whose name is\n' + "the key of the property in the owner class' " + '"__dict__".\n' + '\n' + 'object.__get__(self, instance, owner)\n' + '\n' + ' Called to get the attribute of the owner class ' + '(class attribute\n' + ' access) or of an instance of that class (instance ' + 'attribute\n' + ' access). *owner* is always the owner class, while ' + '*instance* is the\n' + ' instance that the attribute was accessed through, ' + 'or "None" when\n' + ' the attribute is accessed through the *owner*. ' + 'This method should\n' + ' return the (computed) attribute value or raise an ' + '"AttributeError"\n' + ' exception.\n' + '\n' + 'object.__set__(self, instance, value)\n' + '\n' + ' Called to set the attribute on an instance ' + '*instance* of the owner\n' + ' class to a new value, *value*.\n' + '\n' + 'object.__delete__(self, instance)\n' + '\n' + ' Called to delete the attribute on an instance ' + '*instance* of the\n' + ' owner class.\n' + '\n' + 'The attribute "__objclass__" is interpreted by the ' + '"inspect" module as\n' + 'specifying the class where this object was defined ' + '(setting this\n' + 'appropriately can assist in runtime introspection of ' + 'dynamic class\n' + 'attributes). For callables, it may indicate that an ' + 'instance of the\n' + 'given type (or a subclass) is expected or required as ' + 'the first\n' + 'positional argument (for example, CPython sets this ' + 'attribute for\n' + 'unbound methods that are implemented in C).\n' + '\n' + '\n' + 'Invoking Descriptors\n' + '====================\n' + '\n' + 'In general, a descriptor is an object attribute with ' + '"binding\n' + 'behavior", one whose attribute access has been ' + 'overridden by methods\n' + 'in the descriptor protocol: "__get__()", "__set__()", ' + 'and\n' + '"__delete__()". If any of those methods are defined ' + 'for an object, it\n' + 'is said to be a descriptor.\n' + '\n' + 'The default behavior for attribute access is to get, ' + 'set, or delete\n' + "the attribute from an object's dictionary. For " + 'instance, "a.x" has a\n' + 'lookup chain starting with "a.__dict__[\'x\']", then\n' + '"type(a).__dict__[\'x\']", and continuing through the ' + 'base classes of\n' + '"type(a)" excluding metaclasses.\n' + '\n' + 'However, if the looked-up value is an object defining ' + 'one of the\n' + 'descriptor methods, then Python may override the ' + 'default behavior and\n' + 'invoke the descriptor method instead. Where this ' + 'occurs in the\n' + 'precedence chain depends on which descriptor methods ' + 'were defined and\n' + 'how they were called.\n' + '\n' + 'The starting point for descriptor invocation is a ' + 'binding, "a.x". How\n' + 'the arguments are assembled depends on "a":\n' + '\n' + 'Direct Call\n' + ' The simplest and least common call is when user ' + 'code directly\n' + ' invokes a descriptor method: "x.__get__(a)".\n' + '\n' + 'Instance Binding\n' + ' If binding to an object instance, "a.x" is ' + 'transformed into the\n' + ' call: "type(a).__dict__[\'x\'].__get__(a, ' + 'type(a))".\n' + '\n' + 'Class Binding\n' + ' If binding to a class, "A.x" is transformed into ' + 'the call:\n' + ' "A.__dict__[\'x\'].__get__(None, A)".\n' + '\n' + 'Super Binding\n' + ' If "a" is an instance of "super", then the binding ' + '"super(B,\n' + ' obj).m()" searches "obj.__class__.__mro__" for the ' + 'base class "A"\n' + ' immediately preceding "B" and then invokes the ' + 'descriptor with the\n' + ' call: "A.__dict__[\'m\'].__get__(obj, ' + 'obj.__class__)".\n' + '\n' + 'For instance bindings, the precedence of descriptor ' + 'invocation depends\n' + 'on the which descriptor methods are defined. A ' + 'descriptor can define\n' + 'any combination of "__get__()", "__set__()" and ' + '"__delete__()". If it\n' + 'does not define "__get__()", then accessing the ' + 'attribute will return\n' + 'the descriptor object itself unless there is a value ' + "in the object's\n" + 'instance dictionary. If the descriptor defines ' + '"__set__()" and/or\n' + '"__delete__()", it is a data descriptor; if it defines ' + 'neither, it is\n' + 'a non-data descriptor. Normally, data descriptors ' + 'define both\n' + '"__get__()" and "__set__()", while non-data ' + 'descriptors have just the\n' + '"__get__()" method. Data descriptors with "__set__()" ' + 'and "__get__()"\n' + 'defined always override a redefinition in an instance ' + 'dictionary. In\n' + 'contrast, non-data descriptors can be overridden by ' + 'instances.\n' + '\n' + 'Python methods (including "staticmethod()" and ' + '"classmethod()") are\n' + 'implemented as non-data descriptors. Accordingly, ' + 'instances can\n' + 'redefine and override methods. This allows individual ' + 'instances to\n' + 'acquire behaviors that differ from other instances of ' + 'the same class.\n' + '\n' + 'The "property()" function is implemented as a data ' + 'descriptor.\n' + 'Accordingly, instances cannot override the behavior of ' + 'a property.\n' + '\n' + '\n' + '__slots__\n' + '=========\n' + '\n' + 'By default, instances of classes have a dictionary for ' + 'attribute\n' + 'storage. This wastes space for objects having very ' + 'few instance\n' + 'variables. The space consumption can become acute ' + 'when creating large\n' + 'numbers of instances.\n' + '\n' + 'The default can be overridden by defining *__slots__* ' + 'in a class\n' + 'definition. The *__slots__* declaration takes a ' + 'sequence of instance\n' + 'variables and reserves just enough space in each ' + 'instance to hold a\n' + 'value for each variable. Space is saved because ' + '*__dict__* is not\n' + 'created for each instance.\n' + '\n' + 'object.__slots__\n' + '\n' + ' This class variable can be assigned a string, ' + 'iterable, or sequence\n' + ' of strings with variable names used by instances. ' + 'If defined in a\n' + ' class, *__slots__* reserves space for the declared ' + 'variables and\n' + ' prevents the automatic creation of *__dict__* and ' + '*__weakref__* for\n' + ' each instance.\n' + '\n' + '\n' + 'Notes on using *__slots__*\n' + '--------------------------\n' + '\n' + '* When inheriting from a class without *__slots__*, ' + 'the *__dict__*\n' + ' attribute of that class will always be accessible, ' + 'so a *__slots__*\n' + ' definition in the subclass is meaningless.\n' + '\n' + '* Without a *__dict__* variable, instances cannot be ' + 'assigned new\n' + ' variables not listed in the *__slots__* definition. ' + 'Attempts to\n' + ' assign to an unlisted variable name raises ' + '"AttributeError". If\n' + ' dynamic assignment of new variables is desired, then ' + 'add\n' + ' "\'__dict__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* Without a *__weakref__* variable for each instance, ' + 'classes\n' + ' defining *__slots__* do not support weak references ' + 'to its\n' + ' instances. If weak reference support is needed, then ' + 'add\n' + ' "\'__weakref__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* *__slots__* are implemented at the class level by ' + 'creating\n' + ' descriptors (*Implementing Descriptors*) for each ' + 'variable name. As\n' + ' a result, class attributes cannot be used to set ' + 'default values for\n' + ' instance variables defined by *__slots__*; ' + 'otherwise, the class\n' + ' attribute would overwrite the descriptor ' + 'assignment.\n' + '\n' + '* The action of a *__slots__* declaration is limited ' + 'to the class\n' + ' where it is defined. As a result, subclasses will ' + 'have a *__dict__*\n' + ' unless they also define *__slots__* (which must only ' + 'contain names\n' + ' of any *additional* slots).\n' + '\n' + '* If a class defines a slot also defined in a base ' + 'class, the\n' + ' instance variable defined by the base class slot is ' + 'inaccessible\n' + ' (except by retrieving its descriptor directly from ' + 'the base class).\n' + ' This renders the meaning of the program undefined. ' + 'In the future, a\n' + ' check may be added to prevent this.\n' + '\n' + '* Nonempty *__slots__* does not work for classes ' + 'derived from\n' + ' "variable-length" built-in types such as "int", ' + '"bytes" and "tuple".\n' + '\n' + '* Any non-string iterable may be assigned to ' + '*__slots__*. Mappings\n' + ' may also be used; however, in the future, special ' + 'meaning may be\n' + ' assigned to the values corresponding to each key.\n' + '\n' + '* *__class__* assignment works only if both classes ' + 'have the same\n' + ' *__slots__*.\n', + 'attribute-references': '\n' + 'Attribute references\n' + '********************\n' + '\n' + 'An attribute reference is a primary followed by a ' + 'period and a name:\n' + '\n' + ' attributeref ::= primary "." identifier\n' + '\n' + 'The primary must evaluate to an object of a type ' + 'that supports\n' + 'attribute references, which most objects do. This ' + 'object is then\n' + 'asked to produce the attribute whose name is the ' + 'identifier. This\n' + 'production can be customized by overriding the ' + '"__getattr__()" method.\n' + 'If this attribute is not available, the exception ' + '"AttributeError" is\n' + 'raised. Otherwise, the type and value of the ' + 'object produced is\n' + 'determined by the object. Multiple evaluations of ' + 'the same attribute\n' + 'reference may yield different objects.\n', + 'augassign': '\n' + 'Augmented assignment statements\n' + '*******************************\n' + '\n' + 'Augmented assignment is the combination, in a single ' + 'statement, of a\n' + 'binary operation and an assignment statement:\n' + '\n' + ' augmented_assignment_stmt ::= augtarget augop ' + '(expression_list | yield_expression)\n' + ' augtarget ::= identifier | attributeref | ' + 'subscription | slicing\n' + ' augop ::= "+=" | "-=" | "*=" | "/=" | ' + '"//=" | "%=" | "**="\n' + ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' + '\n' + '(See section *Primaries* for the syntax definitions of the ' + 'last three\n' + 'symbols.)\n' + '\n' + 'An augmented assignment evaluates the target (which, unlike ' + 'normal\n' + 'assignment statements, cannot be an unpacking) and the ' + 'expression\n' + 'list, performs the binary operation specific to the type of ' + 'assignment\n' + 'on the two operands, and assigns the result to the original ' + 'target.\n' + 'The target is only evaluated once.\n' + '\n' + 'An augmented assignment expression like "x += 1" can be ' + 'rewritten as\n' + '"x = x + 1" to achieve a similar, but not exactly equal ' + 'effect. In the\n' + 'augmented version, "x" is only evaluated once. Also, when ' + 'possible,\n' + 'the actual operation is performed *in-place*, meaning that ' + 'rather than\n' + 'creating a new object and assigning that to the target, the ' + 'old object\n' + 'is modified instead.\n' + '\n' + 'Unlike normal assignments, augmented assignments evaluate the ' + 'left-\n' + 'hand side *before* evaluating the right-hand side. For ' + 'example, "a[i]\n' + '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' + 'performs\n' + 'the addition, and lastly, it writes the result back to ' + '"a[i]".\n' + '\n' + 'With the exception of assigning to tuples and multiple ' + 'targets in a\n' + 'single statement, the assignment done by augmented ' + 'assignment\n' + 'statements is handled the same way as normal assignments. ' + 'Similarly,\n' + 'with the exception of the possible *in-place* behavior, the ' + 'binary\n' + 'operation performed by augmented assignment is the same as ' + 'the normal\n' + 'binary operations.\n' + '\n' + 'For targets which are attribute references, the same *caveat ' + 'about\n' + 'class and instance attributes* applies as for regular ' + 'assignments.\n', + 'binary': '\n' + 'Binary arithmetic operations\n' + '****************************\n' + '\n' + 'The binary arithmetic operations have the conventional priority\n' + 'levels. Note that some of these operations also apply to ' + 'certain non-\n' + 'numeric types. Apart from the power operator, there are only ' + 'two\n' + 'levels, one for multiplicative operators and one for additive\n' + 'operators:\n' + '\n' + ' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | ' + 'm_expr "/" u_expr\n' + ' | m_expr "%" u_expr\n' + ' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n' + '\n' + 'The "*" (multiplication) operator yields the product of its ' + 'arguments.\n' + 'The arguments must either both be numbers, or one argument must ' + 'be an\n' + 'integer and the other must be a sequence. In the former case, ' + 'the\n' + 'numbers are converted to a common type and then multiplied ' + 'together.\n' + 'In the latter case, sequence repetition is performed; a ' + 'negative\n' + 'repetition factor yields an empty sequence.\n' + '\n' + 'The "/" (division) and "//" (floor division) operators yield ' + 'the\n' + 'quotient of their arguments. The numeric arguments are first\n' + 'converted to a common type. Division of integers yields a float, ' + 'while\n' + 'floor division of integers results in an integer; the result is ' + 'that\n' + "of mathematical division with the 'floor' function applied to " + 'the\n' + 'result. Division by zero raises the "ZeroDivisionError" ' + 'exception.\n' + '\n' + 'The "%" (modulo) operator yields the remainder from the division ' + 'of\n' + 'the first argument by the second. The numeric arguments are ' + 'first\n' + 'converted to a common type. A zero right argument raises the\n' + '"ZeroDivisionError" exception. The arguments may be floating ' + 'point\n' + 'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals ' + '"4*0.7 +\n' + '0.34".) The modulo operator always yields a result with the ' + 'same sign\n' + 'as its second operand (or zero); the absolute value of the ' + 'result is\n' + 'strictly smaller than the absolute value of the second operand ' + '[1].\n' + '\n' + 'The floor division and modulo operators are connected by the ' + 'following\n' + 'identity: "x == (x//y)*y + (x%y)". Floor division and modulo ' + 'are also\n' + 'connected with the built-in function "divmod()": "divmod(x, y) ' + '==\n' + '(x//y, x%y)". [2].\n' + '\n' + 'In addition to performing the modulo operation on numbers, the ' + '"%"\n' + 'operator is also overloaded by string objects to perform ' + 'old-style\n' + 'string formatting (also known as interpolation). The syntax ' + 'for\n' + 'string formatting is described in the Python Library Reference,\n' + 'section *printf-style String Formatting*.\n' + '\n' + 'The floor division operator, the modulo operator, and the ' + '"divmod()"\n' + 'function are not defined for complex numbers. Instead, convert ' + 'to a\n' + 'floating point number using the "abs()" function if ' + 'appropriate.\n' + '\n' + 'The "+" (addition) operator yields the sum of its arguments. ' + 'The\n' + 'arguments must either both be numbers or both be sequences of ' + 'the same\n' + 'type. In the former case, the numbers are converted to a common ' + 'type\n' + 'and then added together. In the latter case, the sequences are\n' + 'concatenated.\n' + '\n' + 'The "-" (subtraction) operator yields the difference of its ' + 'arguments.\n' + 'The numeric arguments are first converted to a common type.\n', + 'bitwise': '\n' + 'Binary bitwise operations\n' + '*************************\n' + '\n' + 'Each of the three bitwise operations has a different priority ' + 'level:\n' + '\n' + ' and_expr ::= shift_expr | and_expr "&" shift_expr\n' + ' xor_expr ::= and_expr | xor_expr "^" and_expr\n' + ' or_expr ::= xor_expr | or_expr "|" xor_expr\n' + '\n' + 'The "&" operator yields the bitwise AND of its arguments, which ' + 'must\n' + 'be integers.\n' + '\n' + 'The "^" operator yields the bitwise XOR (exclusive OR) of its\n' + 'arguments, which must be integers.\n' + '\n' + 'The "|" operator yields the bitwise (inclusive) OR of its ' + 'arguments,\n' + 'which must be integers.\n', + 'bltin-code-objects': '\n' + 'Code Objects\n' + '************\n' + '\n' + 'Code objects are used by the implementation to ' + 'represent "pseudo-\n' + 'compiled" executable Python code such as a function ' + 'body. They differ\n' + "from function objects because they don't contain a " + 'reference to their\n' + 'global execution environment. Code objects are ' + 'returned by the built-\n' + 'in "compile()" function and can be extracted from ' + 'function objects\n' + 'through their "__code__" attribute. See also the ' + '"code" module.\n' + '\n' + 'A code object can be executed or evaluated by ' + 'passing it (instead of a\n' + 'source string) to the "exec()" or "eval()" built-in ' + 'functions.\n' + '\n' + 'See *The standard type hierarchy* for more ' + 'information.\n', + 'bltin-ellipsis-object': '\n' + 'The Ellipsis Object\n' + '*******************\n' + '\n' + 'This object is commonly used by slicing (see ' + '*Slicings*). It supports\n' + 'no special operations. There is exactly one ' + 'ellipsis object, named\n' + '"Ellipsis" (a built-in name). "type(Ellipsis)()" ' + 'produces the\n' + '"Ellipsis" singleton.\n' + '\n' + 'It is written as "Ellipsis" or "...".\n', + 'bltin-null-object': '\n' + 'The Null Object\n' + '***************\n' + '\n' + "This object is returned by functions that don't " + 'explicitly return a\n' + 'value. It supports no special operations. There is ' + 'exactly one null\n' + 'object, named "None" (a built-in name). ' + '"type(None)()" produces the\n' + 'same singleton.\n' + '\n' + 'It is written as "None".\n', + 'bltin-type-objects': '\n' + 'Type Objects\n' + '************\n' + '\n' + 'Type objects represent the various object types. An ' + "object's type is\n" + 'accessed by the built-in function "type()". There ' + 'are no special\n' + 'operations on types. The standard module "types" ' + 'defines names for\n' + 'all standard built-in types.\n' + '\n' + 'Types are written like this: "".\n', + 'booleans': '\n' + 'Boolean operations\n' + '******************\n' + '\n' + ' or_test ::= and_test | or_test "or" and_test\n' + ' and_test ::= not_test | and_test "and" not_test\n' + ' not_test ::= comparison | "not" not_test\n' + '\n' + 'In the context of Boolean operations, and also when ' + 'expressions are\n' + 'used by control flow statements, the following values are ' + 'interpreted\n' + 'as false: "False", "None", numeric zero of all types, and ' + 'empty\n' + 'strings and containers (including strings, tuples, lists,\n' + 'dictionaries, sets and frozensets). All other values are ' + 'interpreted\n' + 'as true. User-defined objects can customize their truth value ' + 'by\n' + 'providing a "__bool__()" method.\n' + '\n' + 'The operator "not" yields "True" if its argument is false, ' + '"False"\n' + 'otherwise.\n' + '\n' + 'The expression "x and y" first evaluates *x*; if *x* is false, ' + 'its\n' + 'value is returned; otherwise, *y* is evaluated and the ' + 'resulting value\n' + 'is returned.\n' + '\n' + 'The expression "x or y" first evaluates *x*; if *x* is true, ' + 'its value\n' + 'is returned; otherwise, *y* is evaluated and the resulting ' + 'value is\n' + 'returned.\n' + '\n' + '(Note that neither "and" nor "or" restrict the value and type ' + 'they\n' + 'return to "False" and "True", but rather return the last ' + 'evaluated\n' + 'argument. This is sometimes useful, e.g., if "s" is a string ' + 'that\n' + 'should be replaced by a default value if it is empty, the ' + 'expression\n' + '"s or \'foo\'" yields the desired value. Because "not" has to ' + 'create a\n' + 'new value, it returns a boolean value regardless of the type ' + 'of its\n' + 'argument (for example, "not \'foo\'" produces "False" rather ' + 'than "\'\'".)\n', + 'break': '\n' + 'The "break" statement\n' + '*********************\n' + '\n' + ' break_stmt ::= "break"\n' + '\n' + '"break" may only occur syntactically nested in a "for" or ' + '"while"\n' + 'loop, but not nested in a function or class definition within ' + 'that\n' + 'loop.\n' + '\n' + 'It terminates the nearest enclosing loop, skipping the optional ' + '"else"\n' + 'clause if the loop has one.\n' + '\n' + 'If a "for" loop is terminated by "break", the loop control ' + 'target\n' + 'keeps its current value.\n' + '\n' + 'When "break" passes control out of a "try" statement with a ' + '"finally"\n' + 'clause, that "finally" clause is executed before really leaving ' + 'the\n' + 'loop.\n', + 'callable-types': '\n' + 'Emulating callable objects\n' + '**************************\n' + '\n' + 'object.__call__(self[, args...])\n' + '\n' + ' Called when the instance is "called" as a function; ' + 'if this method\n' + ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' + ' "x.__call__(arg1, arg2, ...)".\n', + 'calls': '\n' + 'Calls\n' + '*****\n' + '\n' + 'A call calls a callable object (e.g., a *function*) with a ' + 'possibly\n' + 'empty series of *arguments*:\n' + '\n' + ' call ::= primary "(" [argument_list [","] | ' + 'comprehension] ")"\n' + ' argument_list ::= positional_arguments ["," ' + 'keyword_arguments]\n' + ' ["," "*" expression] ["," ' + 'keyword_arguments]\n' + ' ["," "**" expression]\n' + ' | keyword_arguments ["," "*" expression]\n' + ' ["," keyword_arguments] ["," "**" ' + 'expression]\n' + ' | "*" expression ["," keyword_arguments] ' + '["," "**" expression]\n' + ' | "**" expression\n' + ' positional_arguments ::= expression ("," expression)*\n' + ' keyword_arguments ::= keyword_item ("," keyword_item)*\n' + ' keyword_item ::= identifier "=" expression\n' + '\n' + 'An optional trailing comma may be present after the positional ' + 'and\n' + 'keyword arguments but does not affect the semantics.\n' + '\n' + 'The primary must evaluate to a callable object (user-defined\n' + 'functions, built-in functions, methods of built-in objects, ' + 'class\n' + 'objects, methods of class instances, and all objects having a\n' + '"__call__()" method are callable). All argument expressions are\n' + 'evaluated before the call is attempted. Please refer to section\n' + '*Function definitions* for the syntax of formal *parameter* ' + 'lists.\n' + '\n' + 'If keyword arguments are present, they are first converted to\n' + 'positional arguments, as follows. First, a list of unfilled ' + 'slots is\n' + 'created for the formal parameters. If there are N positional\n' + 'arguments, they are placed in the first N slots. Next, for each\n' + 'keyword argument, the identifier is used to determine the\n' + 'corresponding slot (if the identifier is the same as the first ' + 'formal\n' + 'parameter name, the first slot is used, and so on). If the slot ' + 'is\n' + 'already filled, a "TypeError" exception is raised. Otherwise, ' + 'the\n' + 'value of the argument is placed in the slot, filling it (even if ' + 'the\n' + 'expression is "None", it fills the slot). When all arguments ' + 'have\n' + 'been processed, the slots that are still unfilled are filled with ' + 'the\n' + 'corresponding default value from the function definition. ' + '(Default\n' + 'values are calculated, once, when the function is defined; thus, ' + 'a\n' + 'mutable object such as a list or dictionary used as default value ' + 'will\n' + "be shared by all calls that don't specify an argument value for " + 'the\n' + 'corresponding slot; this should usually be avoided.) If there ' + 'are any\n' + 'unfilled slots for which no default value is specified, a ' + '"TypeError"\n' + 'exception is raised. Otherwise, the list of filled slots is used ' + 'as\n' + 'the argument list for the call.\n' + '\n' + '**CPython implementation detail:** An implementation may provide\n' + 'built-in functions whose positional parameters do not have names, ' + 'even\n' + "if they are 'named' for the purpose of documentation, and which\n" + 'therefore cannot be supplied by keyword. In CPython, this is the ' + 'case\n' + 'for functions implemented in C that use "PyArg_ParseTuple()" to ' + 'parse\n' + 'their arguments.\n' + '\n' + 'If there are more positional arguments than there are formal ' + 'parameter\n' + 'slots, a "TypeError" exception is raised, unless a formal ' + 'parameter\n' + 'using the syntax "*identifier" is present; in this case, that ' + 'formal\n' + 'parameter receives a tuple containing the excess positional ' + 'arguments\n' + '(or an empty tuple if there were no excess positional ' + 'arguments).\n' + '\n' + 'If any keyword argument does not correspond to a formal ' + 'parameter\n' + 'name, a "TypeError" exception is raised, unless a formal ' + 'parameter\n' + 'using the syntax "**identifier" is present; in this case, that ' + 'formal\n' + 'parameter receives a dictionary containing the excess keyword\n' + 'arguments (using the keywords as keys and the argument values as\n' + 'corresponding values), or a (new) empty dictionary if there were ' + 'no\n' + 'excess keyword arguments.\n' + '\n' + 'If the syntax "*expression" appears in the function call, ' + '"expression"\n' + 'must evaluate to an iterable. Elements from this iterable are ' + 'treated\n' + 'as if they were additional positional arguments; if there are\n' + 'positional arguments *x1*, ..., *xN*, and "expression" evaluates ' + 'to a\n' + 'sequence *y1*, ..., *yM*, this is equivalent to a call with M+N\n' + 'positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n' + '\n' + 'A consequence of this is that although the "*expression" syntax ' + 'may\n' + 'appear *after* some keyword arguments, it is processed *before* ' + 'the\n' + 'keyword arguments (and the "**expression" argument, if any -- ' + 'see\n' + 'below). So:\n' + '\n' + ' >>> def f(a, b):\n' + ' ... print(a, b)\n' + ' ...\n' + ' >>> f(b=1, *(2,))\n' + ' 2 1\n' + ' >>> f(a=1, *(2,))\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in ?\n' + " TypeError: f() got multiple values for keyword argument 'a'\n" + ' >>> f(1, *(2,))\n' + ' 1 2\n' + '\n' + 'It is unusual for both keyword arguments and the "*expression" ' + 'syntax\n' + 'to be used in the same call, so in practice this confusion does ' + 'not\n' + 'arise.\n' + '\n' + 'If the syntax "**expression" appears in the function call,\n' + '"expression" must evaluate to a mapping, the contents of which ' + 'are\n' + 'treated as additional keyword arguments. In the case of a ' + 'keyword\n' + 'appearing in both "expression" and as an explicit keyword ' + 'argument, a\n' + '"TypeError" exception is raised.\n' + '\n' + 'Formal parameters using the syntax "*identifier" or ' + '"**identifier"\n' + 'cannot be used as positional argument slots or as keyword ' + 'argument\n' + 'names.\n' + '\n' + 'A call always returns some value, possibly "None", unless it ' + 'raises an\n' + 'exception. How this value is computed depends on the type of ' + 'the\n' + 'callable object.\n' + '\n' + 'If it is---\n' + '\n' + 'a user-defined function:\n' + ' The code block for the function is executed, passing it the\n' + ' argument list. The first thing the code block will do is bind ' + 'the\n' + ' formal parameters to the arguments; this is described in ' + 'section\n' + ' *Function definitions*. When the code block executes a ' + '"return"\n' + ' statement, this specifies the return value of the function ' + 'call.\n' + '\n' + 'a built-in function or method:\n' + ' The result is up to the interpreter; see *Built-in Functions* ' + 'for\n' + ' the descriptions of built-in functions and methods.\n' + '\n' + 'a class object:\n' + ' A new instance of that class is returned.\n' + '\n' + 'a class instance method:\n' + ' The corresponding user-defined function is called, with an ' + 'argument\n' + ' list that is one longer than the argument list of the call: ' + 'the\n' + ' instance becomes the first argument.\n' + '\n' + 'a class instance:\n' + ' The class must define a "__call__()" method; the effect is ' + 'then the\n' + ' same as if that method was called.\n', + 'class': '\n' + 'Class definitions\n' + '*****************\n' + '\n' + 'A class definition defines a class object (see section *The ' + 'standard\n' + 'type hierarchy*):\n' + '\n' + ' classdef ::= [decorators] "class" classname [inheritance] ' + '":" suite\n' + ' inheritance ::= "(" [parameter_list] ")"\n' + ' classname ::= identifier\n' + '\n' + 'A class definition is an executable statement. The inheritance ' + 'list\n' + 'usually gives a list of base classes (see *Customizing class ' + 'creation*\n' + 'for more advanced uses), so each item in the list should evaluate ' + 'to a\n' + 'class object which allows subclassing. Classes without an ' + 'inheritance\n' + 'list inherit, by default, from the base class "object"; hence,\n' + '\n' + ' class Foo:\n' + ' pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo(object):\n' + ' pass\n' + '\n' + "The class's suite is then executed in a new execution frame (see\n" + '*Naming and binding*), using a newly created local namespace and ' + 'the\n' + 'original global namespace. (Usually, the suite contains mostly\n' + "function definitions.) When the class's suite finishes " + 'execution, its\n' + 'execution frame is discarded but its local namespace is saved. ' + '[4] A\n' + 'class object is then created using the inheritance list for the ' + 'base\n' + 'classes and the saved local namespace for the attribute ' + 'dictionary.\n' + 'The class name is bound to this class object in the original ' + 'local\n' + 'namespace.\n' + '\n' + 'Class creation can be customized heavily using *metaclasses*.\n' + '\n' + 'Classes can also be decorated: just like when decorating ' + 'functions,\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' class Foo: pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo: pass\n' + ' Foo = f1(arg)(f2(Foo))\n' + '\n' + 'The evaluation rules for the decorator expressions are the same ' + 'as for\n' + 'function decorators. The result must be a class object, which is ' + 'then\n' + 'bound to the class name.\n' + '\n' + "**Programmer's note:** Variables defined in the class definition " + 'are\n' + 'class attributes; they are shared by instances. Instance ' + 'attributes\n' + 'can be set in a method with "self.name = value". Both class and\n' + 'instance attributes are accessible through the notation ' + '""self.name"",\n' + 'and an instance attribute hides a class attribute with the same ' + 'name\n' + 'when accessed in this way. Class attributes can be used as ' + 'defaults\n' + 'for instance attributes, but using mutable values there can lead ' + 'to\n' + 'unexpected results. *Descriptors* can be used to create ' + 'instance\n' + 'variables with different implementation details.\n' + '\n' + 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n' + ' Class Decorators\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] The exception is propagated to the invocation stack unless\n' + ' there is a "finally" clause which happens to raise another\n' + ' exception. That new exception causes the old one to be lost.\n' + '\n' + '[2] Currently, control "flows off the end" except in the case of\n' + ' an exception or the execution of a "return", "continue", or\n' + ' "break" statement.\n' + '\n' + '[3] A string literal appearing as the first statement in the\n' + ' function body is transformed into the function\'s "__doc__"\n' + " attribute and therefore the function's *docstring*.\n" + '\n' + '[4] A string literal appearing as the first statement in the ' + 'class\n' + ' body is transformed into the namespace\'s "__doc__" item and\n' + " therefore the class's *docstring*.\n", + 'comparisons': '\n' + 'Comparisons\n' + '***********\n' + '\n' + 'Unlike C, all comparison operations in Python have the same ' + 'priority,\n' + 'which is lower than that of any arithmetic, shifting or ' + 'bitwise\n' + 'operation. Also unlike C, expressions like "a < b < c" ' + 'have the\n' + 'interpretation that is conventional in mathematics:\n' + '\n' + ' comparison ::= or_expr ( comp_operator or_expr )*\n' + ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n' + ' | "is" ["not"] | ["not"] "in"\n' + '\n' + 'Comparisons yield boolean values: "True" or "False".\n' + '\n' + 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" ' + 'is\n' + 'equivalent to "x < y and y <= z", except that "y" is ' + 'evaluated only\n' + 'once (but in both cases "z" is not evaluated at all when "x ' + '< y" is\n' + 'found to be false).\n' + '\n' + 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions ' + 'and *op1*,\n' + '*op2*, ..., *opN* are comparison operators, then "a op1 b ' + 'op2 c ... y\n' + 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN ' + 'z", except\n' + 'that each expression is evaluated at most once.\n' + '\n' + 'Note that "a op1 b op2 c" doesn\'t imply any kind of ' + 'comparison between\n' + '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal ' + '(though\n' + 'perhaps not pretty).\n' + '\n' + 'The operators "<", ">", "==", ">=", "<=", and "!=" compare ' + 'the values\n' + 'of two objects. The objects need not have the same type. ' + 'If both are\n' + 'numbers, they are converted to a common type. Otherwise, ' + 'the "==" and\n' + '"!=" operators *always* consider objects of different types ' + 'to be\n' + 'unequal, while the "<", ">", ">=" and "<=" operators raise ' + 'a\n' + '"TypeError" when comparing objects of different types that ' + 'do not\n' + 'implement these operators for the given pair of types. You ' + 'can\n' + 'control comparison behavior of objects of non-built-in ' + 'types by\n' + 'defining rich comparison methods like "__gt__()", described ' + 'in section\n' + '*Basic customization*.\n' + '\n' + 'Comparison of objects of the same type depends on the ' + 'type:\n' + '\n' + '* Numbers are compared arithmetically.\n' + '\n' + '* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are ' + 'special. The\n' + ' are identical to themselves, "x is x" but are not equal ' + 'to\n' + ' themselves, "x != x". Additionally, comparing any value ' + 'to a\n' + ' not-a-number value will return "False". For example, ' + 'both "3 <\n' + ' float(\'NaN\')" and "float(\'NaN\') < 3" will return ' + '"False".\n' + '\n' + '* Bytes objects are compared lexicographically using the ' + 'numeric\n' + ' values of their elements.\n' + '\n' + '* Strings are compared lexicographically using the numeric\n' + ' equivalents (the result of the built-in function "ord()") ' + 'of their\n' + " characters. [3] String and bytes object can't be " + 'compared!\n' + '\n' + '* Tuples and lists are compared lexicographically using ' + 'comparison\n' + ' of corresponding elements. This means that to compare ' + 'equal, each\n' + ' element must compare equal and the two sequences must be ' + 'of the same\n' + ' type and have the same length.\n' + '\n' + ' If not equal, the sequences are ordered the same as their ' + 'first\n' + ' differing elements. For example, "[1,2,x] <= [1,2,y]" ' + 'has the same\n' + ' value as "x <= y". If the corresponding element does not ' + 'exist, the\n' + ' shorter sequence is ordered first (for example, "[1,2] < ' + '[1,2,3]").\n' + '\n' + '* Mappings (dictionaries) compare equal if and only if they ' + 'have the\n' + ' same "(key, value)" pairs. Order comparisons "(\'<\', ' + "'<=', '>=',\n" + ' \'>\')" raise "TypeError".\n' + '\n' + '* Sets and frozensets define comparison operators to mean ' + 'subset and\n' + ' superset tests. Those relations do not define total ' + 'orderings (the\n' + ' two sets "{1,2}" and {2,3} are not equal, nor subsets of ' + 'one\n' + ' another, nor supersets of one another). Accordingly, ' + 'sets are not\n' + ' appropriate arguments for functions which depend on total ' + 'ordering.\n' + ' For example, "min()", "max()", and "sorted()" produce ' + 'undefined\n' + ' results given a list of sets as inputs.\n' + '\n' + '* Most other objects of built-in types compare unequal ' + 'unless they\n' + ' are the same object; the choice whether one object is ' + 'considered\n' + ' smaller or larger than another one is made arbitrarily ' + 'but\n' + ' consistently within one execution of a program.\n' + '\n' + 'Comparison of objects of differing types depends on whether ' + 'either of\n' + 'the types provide explicit support for the comparison. ' + 'Most numeric\n' + 'types can be compared with one another. When cross-type ' + 'comparison is\n' + 'not supported, the comparison method returns ' + '"NotImplemented".\n' + '\n' + 'The operators "in" and "not in" test for membership. "x in ' + 's"\n' + 'evaluates to true if *x* is a member of *s*, and false ' + 'otherwise. "x\n' + 'not in s" returns the negation of "x in s". All built-in ' + 'sequences\n' + 'and set types support this as well as dictionary, for which ' + '"in" tests\n' + 'whether the dictionary has a given key. For container types ' + 'such as\n' + 'list, tuple, set, frozenset, dict, or collections.deque, ' + 'the\n' + 'expression "x in y" is equivalent to "any(x is e or x == e ' + 'for e in\n' + 'y)".\n' + '\n' + 'For the string and bytes types, "x in y" is true if and ' + 'only if *x* is\n' + 'a substring of *y*. An equivalent test is "y.find(x) != ' + '-1". Empty\n' + 'strings are always considered to be a substring of any ' + 'other string,\n' + 'so """ in "abc"" will return "True".\n' + '\n' + 'For user-defined classes which define the "__contains__()" ' + 'method, "x\n' + 'in y" is true if and only if "y.__contains__(x)" is true.\n' + '\n' + 'For user-defined classes which do not define ' + '"__contains__()" but do\n' + 'define "__iter__()", "x in y" is true if some value "z" ' + 'with "x == z"\n' + 'is produced while iterating over "y". If an exception is ' + 'raised\n' + 'during the iteration, it is as if "in" raised that ' + 'exception.\n' + '\n' + 'Lastly, the old-style iteration protocol is tried: if a ' + 'class defines\n' + '"__getitem__()", "x in y" is true if and only if there is a ' + 'non-\n' + 'negative integer index *i* such that "x == y[i]", and all ' + 'lower\n' + 'integer indices do not raise "IndexError" exception. (If ' + 'any other\n' + 'exception is raised, it is as if "in" raised that ' + 'exception).\n' + '\n' + 'The operator "not in" is defined to have the inverse true ' + 'value of\n' + '"in".\n' + '\n' + 'The operators "is" and "is not" test for object identity: ' + '"x is y" is\n' + 'true if and only if *x* and *y* are the same object. "x is ' + 'not y"\n' + 'yields the inverse truth value. [4]\n', + 'compound': '\n' + 'Compound statements\n' + '*******************\n' + '\n' + 'Compound statements contain (groups of) other statements; they ' + 'affect\n' + 'or control the execution of those other statements in some ' + 'way. In\n' + 'general, compound statements span multiple lines, although in ' + 'simple\n' + 'incarnations a whole compound statement may be contained in ' + 'one line.\n' + '\n' + 'The "if", "while" and "for" statements implement traditional ' + 'control\n' + 'flow constructs. "try" specifies exception handlers and/or ' + 'cleanup\n' + 'code for a group of statements, while the "with" statement ' + 'allows the\n' + 'execution of initialization and finalization code around a ' + 'block of\n' + 'code. Function and class definitions are also syntactically ' + 'compound\n' + 'statements.\n' + '\n' + "A compound statement consists of one or more 'clauses.' A " + 'clause\n' + "consists of a header and a 'suite.' The clause headers of a\n" + 'particular compound statement are all at the same indentation ' + 'level.\n' + 'Each clause header begins with a uniquely identifying keyword ' + 'and ends\n' + 'with a colon. A suite is a group of statements controlled by ' + 'a\n' + 'clause. A suite can be one or more semicolon-separated ' + 'simple\n' + 'statements on the same line as the header, following the ' + "header's\n" + 'colon, or it can be one or more indented statements on ' + 'subsequent\n' + 'lines. Only the latter form of a suite can contain nested ' + 'compound\n' + 'statements; the following is illegal, mostly because it ' + "wouldn't be\n" + 'clear to which "if" clause a following "else" clause would ' + 'belong:\n' + '\n' + ' if test1: if test2: print(x)\n' + '\n' + 'Also note that the semicolon binds tighter than the colon in ' + 'this\n' + 'context, so that in the following example, either all or none ' + 'of the\n' + '"print()" calls are executed:\n' + '\n' + ' if x < y < z: print(x); print(y); print(z)\n' + '\n' + 'Summarizing:\n' + '\n' + ' compound_stmt ::= if_stmt\n' + ' | while_stmt\n' + ' | for_stmt\n' + ' | try_stmt\n' + ' | with_stmt\n' + ' | funcdef\n' + ' | classdef\n' + ' suite ::= stmt_list NEWLINE | NEWLINE INDENT ' + 'statement+ DEDENT\n' + ' statement ::= stmt_list NEWLINE | compound_stmt\n' + ' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n' + '\n' + 'Note that statements always end in a "NEWLINE" possibly ' + 'followed by a\n' + '"DEDENT". Also note that optional continuation clauses always ' + 'begin\n' + 'with a keyword that cannot start a statement, thus there are ' + 'no\n' + 'ambiguities (the \'dangling "else"\' problem is solved in ' + 'Python by\n' + 'requiring nested "if" statements to be indented).\n' + '\n' + 'The formatting of the grammar rules in the following sections ' + 'places\n' + 'each clause on a separate line for clarity.\n' + '\n' + '\n' + 'The "if" statement\n' + '==================\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" expression ":" suite\n' + ' ( "elif" expression ":" suite )*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the ' + 'expressions one\n' + 'by one until one is found to be true (see section *Boolean ' + 'operations*\n' + 'for the definition of true and false); then that suite is ' + 'executed\n' + '(and no other part of the "if" statement is executed or ' + 'evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, ' + 'if\n' + 'present, is executed.\n' + '\n' + '\n' + 'The "while" statement\n' + '=====================\n' + '\n' + 'The "while" statement is used for repeated execution as long ' + 'as an\n' + 'expression is true:\n' + '\n' + ' while_stmt ::= "while" expression ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'This repeatedly tests the expression and, if it is true, ' + 'executes the\n' + 'first suite; if the expression is false (which may be the ' + 'first time\n' + 'it is tested) the suite of the "else" clause, if present, is ' + 'executed\n' + 'and the loop terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause\'s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and ' + 'goes back\n' + 'to testing the expression.\n' + '\n' + '\n' + 'The "for" statement\n' + '===================\n' + '\n' + 'The "for" statement is used to iterate over the elements of a ' + 'sequence\n' + '(such as a string, tuple or list) or other iterable object:\n' + '\n' + ' for_stmt ::= "for" target_list "in" expression_list ":" ' + 'suite\n' + ' ["else" ":" suite]\n' + '\n' + 'The expression list is evaluated once; it should yield an ' + 'iterable\n' + 'object. An iterator is created for the result of the\n' + '"expression_list". The suite is then executed once for each ' + 'item\n' + 'provided by the iterator, in the order returned by the ' + 'iterator. Each\n' + 'item in turn is assigned to the target list using the standard ' + 'rules\n' + 'for assignments (see *Assignment statements*), and then the ' + 'suite is\n' + 'executed. When the items are exhausted (which is immediately ' + 'when the\n' + 'sequence is empty or an iterator raises a "StopIteration" ' + 'exception),\n' + 'the suite in the "else" clause, if present, is executed, and ' + 'the loop\n' + 'terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause\'s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and ' + 'continues\n' + 'with the next item, or with the "else" clause if there is no ' + 'next\n' + 'item.\n' + '\n' + 'The for-loop makes assignments to the variables(s) in the ' + 'target list.\n' + 'This overwrites all previous assignments to those variables ' + 'including\n' + 'those made in the suite of the for-loop:\n' + '\n' + ' for i in range(10):\n' + ' print(i)\n' + ' i = 5 # this will not affect the for-loop\n' + ' # because i will be overwritten with ' + 'the next\n' + ' # index in the range\n' + '\n' + 'Names in the target list are not deleted when the loop is ' + 'finished,\n' + 'but if the sequence is empty, they will not have been assigned ' + 'to at\n' + 'all by the loop. Hint: the built-in function "range()" ' + 'returns an\n' + 'iterator of integers suitable to emulate the effect of ' + 'Pascal\'s "for i\n' + ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, ' + '2]".\n' + '\n' + 'Note: There is a subtlety when the sequence is being modified ' + 'by the\n' + ' loop (this can only occur for mutable sequences, i.e. ' + 'lists). An\n' + ' internal counter is used to keep track of which item is used ' + 'next,\n' + ' and this is incremented on each iteration. When this ' + 'counter has\n' + ' reached the length of the sequence the loop terminates. ' + 'This means\n' + ' that if the suite deletes the current (or a previous) item ' + 'from the\n' + ' sequence, the next item will be skipped (since it gets the ' + 'index of\n' + ' the current item which has already been treated). Likewise, ' + 'if the\n' + ' suite inserts an item in the sequence before the current ' + 'item, the\n' + ' current item will be treated again the next time through the ' + 'loop.\n' + ' This can lead to nasty bugs that can be avoided by making a\n' + ' temporary copy using a slice of the whole sequence, e.g.,\n' + '\n' + ' for x in a[:]:\n' + ' if x < 0: a.remove(x)\n' + '\n' + '\n' + 'The "try" statement\n' + '===================\n' + '\n' + 'The "try" statement specifies exception handlers and/or ' + 'cleanup code\n' + 'for a group of statements:\n' + '\n' + ' try_stmt ::= try1_stmt | try2_stmt\n' + ' try1_stmt ::= "try" ":" suite\n' + ' ("except" [expression ["as" identifier]] ":" ' + 'suite)+\n' + ' ["else" ":" suite]\n' + ' ["finally" ":" suite]\n' + ' try2_stmt ::= "try" ":" suite\n' + ' "finally" ":" suite\n' + '\n' + 'The "except" clause(s) specify one or more exception handlers. ' + 'When no\n' + 'exception occurs in the "try" clause, no exception handler is\n' + 'executed. When an exception occurs in the "try" suite, a ' + 'search for an\n' + 'exception handler is started. This search inspects the except ' + 'clauses\n' + 'in turn until one is found that matches the exception. An ' + 'expression-\n' + 'less except clause, if present, must be last; it matches any\n' + 'exception. For an except clause with an expression, that ' + 'expression\n' + 'is evaluated, and the clause matches the exception if the ' + 'resulting\n' + 'object is "compatible" with the exception. An object is ' + 'compatible\n' + 'with an exception if it is the class or a base class of the ' + 'exception\n' + 'object or a tuple containing an item compatible with the ' + 'exception.\n' + '\n' + 'If no except clause matches the exception, the search for an ' + 'exception\n' + 'handler continues in the surrounding code and on the ' + 'invocation stack.\n' + '[1]\n' + '\n' + 'If the evaluation of an expression in the header of an except ' + 'clause\n' + 'raises an exception, the original search for a handler is ' + 'canceled and\n' + 'a search starts for the new exception in the surrounding code ' + 'and on\n' + 'the call stack (it is treated as if the entire "try" statement ' + 'raised\n' + 'the exception).\n' + '\n' + 'When a matching except clause is found, the exception is ' + 'assigned to\n' + 'the target specified after the "as" keyword in that except ' + 'clause, if\n' + "present, and the except clause's suite is executed. All " + 'except\n' + 'clauses must have an executable block. When the end of this ' + 'block is\n' + 'reached, execution continues normally after the entire try ' + 'statement.\n' + '(This means that if two nested handlers exist for the same ' + 'exception,\n' + 'and the exception occurs in the try clause of the inner ' + 'handler, the\n' + 'outer handler will not handle the exception.)\n' + '\n' + 'When an exception has been assigned using "as target", it is ' + 'cleared\n' + 'at the end of the except clause. This is as if\n' + '\n' + ' except E as N:\n' + ' foo\n' + '\n' + 'was translated to\n' + '\n' + ' except E as N:\n' + ' try:\n' + ' foo\n' + ' finally:\n' + ' del N\n' + '\n' + 'This means the exception must be assigned to a different name ' + 'to be\n' + 'able to refer to it after the except clause. Exceptions are ' + 'cleared\n' + 'because with the traceback attached to them, they form a ' + 'reference\n' + 'cycle with the stack frame, keeping all locals in that frame ' + 'alive\n' + 'until the next garbage collection occurs.\n' + '\n' + "Before an except clause's suite is executed, details about " + 'the\n' + 'exception are stored in the "sys" module and can be accessed ' + 'via\n' + '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple ' + 'consisting of the\n' + 'exception class, the exception instance and a traceback object ' + '(see\n' + 'section *The standard type hierarchy*) identifying the point ' + 'in the\n' + 'program where the exception occurred. "sys.exc_info()" values ' + 'are\n' + 'restored to their previous values (before the call) when ' + 'returning\n' + 'from a function that handled an exception.\n' + '\n' + 'The optional "else" clause is executed if and when control ' + 'flows off\n' + 'the end of the "try" clause. [2] Exceptions in the "else" ' + 'clause are\n' + 'not handled by the preceding "except" clauses.\n' + '\n' + 'If "finally" is present, it specifies a \'cleanup\' handler. ' + 'The "try"\n' + 'clause is executed, including any "except" and "else" ' + 'clauses. If an\n' + 'exception occurs in any of the clauses and is not handled, ' + 'the\n' + 'exception is temporarily saved. The "finally" clause is ' + 'executed. If\n' + 'there is a saved exception it is re-raised at the end of the ' + '"finally"\n' + 'clause. If the "finally" clause raises another exception, the ' + 'saved\n' + 'exception is set as the context of the new exception. If the ' + '"finally"\n' + 'clause executes a "return" or "break" statement, the saved ' + 'exception\n' + 'is discarded:\n' + '\n' + ' >>> def f():\n' + ' ... try:\n' + ' ... 1/0\n' + ' ... finally:\n' + ' ... return 42\n' + ' ...\n' + ' >>> f()\n' + ' 42\n' + '\n' + 'The exception information is not available to the program ' + 'during\n' + 'execution of the "finally" clause.\n' + '\n' + 'When a "return", "break" or "continue" statement is executed ' + 'in the\n' + '"try" suite of a "try"..."finally" statement, the "finally" ' + 'clause is\n' + 'also executed \'on the way out.\' A "continue" statement is ' + 'illegal in\n' + 'the "finally" clause. (The reason is a problem with the ' + 'current\n' + 'implementation --- this restriction may be lifted in the ' + 'future).\n' + '\n' + 'The return value of a function is determined by the last ' + '"return"\n' + 'statement executed. Since the "finally" clause always ' + 'executes, a\n' + '"return" statement executed in the "finally" clause will ' + 'always be the\n' + 'last one executed:\n' + '\n' + ' >>> def foo():\n' + ' ... try:\n' + " ... return 'try'\n" + ' ... finally:\n' + " ... return 'finally'\n" + ' ...\n' + ' >>> foo()\n' + " 'finally'\n" + '\n' + 'Additional information on exceptions can be found in section\n' + '*Exceptions*, and information on using the "raise" statement ' + 'to\n' + 'generate exceptions may be found in section *The raise ' + 'statement*.\n' + '\n' + '\n' + 'The "with" statement\n' + '====================\n' + '\n' + 'The "with" statement is used to wrap the execution of a block ' + 'with\n' + 'methods defined by a context manager (see section *With ' + 'Statement\n' + 'Context Managers*). This allows common ' + '"try"..."except"..."finally"\n' + 'usage patterns to be encapsulated for convenient reuse.\n' + '\n' + ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' + ' with_item ::= expression ["as" target]\n' + '\n' + 'The execution of the "with" statement with one "item" proceeds ' + 'as\n' + 'follows:\n' + '\n' + '1. The context expression (the expression given in the ' + '"with_item")\n' + ' is evaluated to obtain a context manager.\n' + '\n' + '2. The context manager\'s "__exit__()" is loaded for later ' + 'use.\n' + '\n' + '3. The context manager\'s "__enter__()" method is invoked.\n' + '\n' + '4. If a target was included in the "with" statement, the ' + 'return\n' + ' value from "__enter__()" is assigned to it.\n' + '\n' + ' Note: The "with" statement guarantees that if the ' + '"__enter__()"\n' + ' method returns without an error, then "__exit__()" will ' + 'always be\n' + ' called. Thus, if an error occurs during the assignment to ' + 'the\n' + ' target list, it will be treated the same as an error ' + 'occurring\n' + ' within the suite would be. See step 6 below.\n' + '\n' + '5. The suite is executed.\n' + '\n' + '6. The context manager\'s "__exit__()" method is invoked. If ' + 'an\n' + ' exception caused the suite to be exited, its type, value, ' + 'and\n' + ' traceback are passed as arguments to "__exit__()". ' + 'Otherwise, three\n' + ' "None" arguments are supplied.\n' + '\n' + ' If the suite was exited due to an exception, and the return ' + 'value\n' + ' from the "__exit__()" method was false, the exception is ' + 'reraised.\n' + ' If the return value was true, the exception is suppressed, ' + 'and\n' + ' execution continues with the statement following the ' + '"with"\n' + ' statement.\n' + '\n' + ' If the suite was exited for any reason other than an ' + 'exception, the\n' + ' return value from "__exit__()" is ignored, and execution ' + 'proceeds\n' + ' at the normal location for the kind of exit that was ' + 'taken.\n' + '\n' + 'With more than one item, the context managers are processed as ' + 'if\n' + 'multiple "with" statements were nested:\n' + '\n' + ' with A() as a, B() as b:\n' + ' suite\n' + '\n' + 'is equivalent to\n' + '\n' + ' with A() as a:\n' + ' with B() as b:\n' + ' suite\n' + '\n' + 'Changed in version 3.1: Support for multiple context ' + 'expressions.\n' + '\n' + 'See also: **PEP 0343** - The "with" statement\n' + '\n' + ' The specification, background, and examples for the ' + 'Python "with"\n' + ' statement.\n' + '\n' + '\n' + 'Function definitions\n' + '====================\n' + '\n' + 'A function definition defines a user-defined function object ' + '(see\n' + 'section *The standard type hierarchy*):\n' + '\n' + ' funcdef ::= [decorators] "def" funcname "(" ' + '[parameter_list] ")" ["->" expression] ":" suite\n' + ' decorators ::= decorator+\n' + ' decorator ::= "@" dotted_name ["(" [parameter_list ' + '[","]] ")"] NEWLINE\n' + ' dotted_name ::= identifier ("." identifier)*\n' + ' parameter_list ::= (defparameter ",")*\n' + ' | "*" [parameter] ("," defparameter)* ' + '["," "**" parameter]\n' + ' | "**" parameter\n' + ' | defparameter [","] )\n' + ' parameter ::= identifier [":" expression]\n' + ' defparameter ::= parameter ["=" expression]\n' + ' funcname ::= identifier\n' + '\n' + 'A function definition is an executable statement. Its ' + 'execution binds\n' + 'the function name in the current local namespace to a function ' + 'object\n' + '(a wrapper around the executable code for the function). ' + 'This\n' + 'function object contains a reference to the current global ' + 'namespace\n' + 'as the global namespace to be used when the function is ' + 'called.\n' + '\n' + 'The function definition does not execute the function body; ' + 'this gets\n' + 'executed only when the function is called. [3]\n' + '\n' + 'A function definition may be wrapped by one or more ' + '*decorator*\n' + 'expressions. Decorator expressions are evaluated when the ' + 'function is\n' + 'defined, in the scope that contains the function definition. ' + 'The\n' + 'result must be a callable, which is invoked with the function ' + 'object\n' + 'as the only argument. The returned value is bound to the ' + 'function name\n' + 'instead of the function object. Multiple decorators are ' + 'applied in\n' + 'nested fashion. For example, the following code\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' def func(): pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' def func(): pass\n' + ' func = f1(arg)(f2(func))\n' + '\n' + 'When one or more *parameters* have the form *parameter* "="\n' + '*expression*, the function is said to have "default parameter ' + 'values."\n' + 'For a parameter with a default value, the corresponding ' + '*argument* may\n' + "be omitted from a call, in which case the parameter's default " + 'value is\n' + 'substituted. If a parameter has a default value, all ' + 'following\n' + 'parameters up until the ""*"" must also have a default value ' + '--- this\n' + 'is a syntactic restriction that is not expressed by the ' + 'grammar.\n' + '\n' + '**Default parameter values are evaluated from left to right ' + 'when the\n' + 'function definition is executed.** This means that the ' + 'expression is\n' + 'evaluated once, when the function is defined, and that the ' + 'same "pre-\n' + 'computed" value is used for each call. This is especially ' + 'important\n' + 'to understand when a default parameter is a mutable object, ' + 'such as a\n' + 'list or a dictionary: if the function modifies the object ' + '(e.g. by\n' + 'appending an item to a list), the default value is in effect ' + 'modified.\n' + 'This is generally not what was intended. A way around this is ' + 'to use\n' + '"None" as the default, and explicitly test for it in the body ' + 'of the\n' + 'function, e.g.:\n' + '\n' + ' def whats_on_the_telly(penguin=None):\n' + ' if penguin is None:\n' + ' penguin = []\n' + ' penguin.append("property of the zoo")\n' + ' return penguin\n' + '\n' + 'Function call semantics are described in more detail in ' + 'section\n' + '*Calls*. A function call always assigns values to all ' + 'parameters\n' + 'mentioned in the parameter list, either from position ' + 'arguments, from\n' + 'keyword arguments, or from default values. If the form\n' + '""*identifier"" is present, it is initialized to a tuple ' + 'receiving any\n' + 'excess positional parameters, defaulting to the empty tuple. ' + 'If the\n' + 'form ""**identifier"" is present, it is initialized to a new\n' + 'dictionary receiving any excess keyword arguments, defaulting ' + 'to a new\n' + 'empty dictionary. Parameters after ""*"" or ""*identifier"" ' + 'are\n' + 'keyword-only parameters and may only be passed used keyword ' + 'arguments.\n' + '\n' + 'Parameters may have annotations of the form "": expression"" ' + 'following\n' + 'the parameter name. Any parameter may have an annotation even ' + 'those\n' + 'of the form "*identifier" or "**identifier". Functions may ' + 'have\n' + '"return" annotation of the form ""-> expression"" after the ' + 'parameter\n' + 'list. These annotations can be any valid Python expression ' + 'and are\n' + 'evaluated when the function definition is executed. ' + 'Annotations may\n' + 'be evaluated in a different order than they appear in the ' + 'source code.\n' + 'The presence of annotations does not change the semantics of ' + 'a\n' + 'function. The annotation values are available as values of a\n' + "dictionary keyed by the parameters' names in the " + '"__annotations__"\n' + 'attribute of the function object.\n' + '\n' + 'It is also possible to create anonymous functions (functions ' + 'not bound\n' + 'to a name), for immediate use in expressions. This uses ' + 'lambda\n' + 'expressions, described in section *Lambdas*. Note that the ' + 'lambda\n' + 'expression is merely a shorthand for a simplified function ' + 'definition;\n' + 'a function defined in a ""def"" statement can be passed around ' + 'or\n' + 'assigned to another name just like a function defined by a ' + 'lambda\n' + 'expression. The ""def"" form is actually more powerful since ' + 'it\n' + 'allows the execution of multiple statements and annotations.\n' + '\n' + "**Programmer's note:** Functions are first-class objects. A " + '""def""\n' + 'statement executed inside a function definition defines a ' + 'local\n' + 'function that can be returned or passed around. Free ' + 'variables used\n' + 'in the nested function can access the local variables of the ' + 'function\n' + 'containing the def. See section *Naming and binding* for ' + 'details.\n' + '\n' + 'See also: **PEP 3107** - Function Annotations\n' + '\n' + ' The original specification for function annotations.\n' + '\n' + '\n' + 'Class definitions\n' + '=================\n' + '\n' + 'A class definition defines a class object (see section *The ' + 'standard\n' + 'type hierarchy*):\n' + '\n' + ' classdef ::= [decorators] "class" classname ' + '[inheritance] ":" suite\n' + ' inheritance ::= "(" [parameter_list] ")"\n' + ' classname ::= identifier\n' + '\n' + 'A class definition is an executable statement. The ' + 'inheritance list\n' + 'usually gives a list of base classes (see *Customizing class ' + 'creation*\n' + 'for more advanced uses), so each item in the list should ' + 'evaluate to a\n' + 'class object which allows subclassing. Classes without an ' + 'inheritance\n' + 'list inherit, by default, from the base class "object"; ' + 'hence,\n' + '\n' + ' class Foo:\n' + ' pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo(object):\n' + ' pass\n' + '\n' + "The class's suite is then executed in a new execution frame " + '(see\n' + '*Naming and binding*), using a newly created local namespace ' + 'and the\n' + 'original global namespace. (Usually, the suite contains ' + 'mostly\n' + "function definitions.) When the class's suite finishes " + 'execution, its\n' + 'execution frame is discarded but its local namespace is saved. ' + '[4] A\n' + 'class object is then created using the inheritance list for ' + 'the base\n' + 'classes and the saved local namespace for the attribute ' + 'dictionary.\n' + 'The class name is bound to this class object in the original ' + 'local\n' + 'namespace.\n' + '\n' + 'Class creation can be customized heavily using *metaclasses*.\n' + '\n' + 'Classes can also be decorated: just like when decorating ' + 'functions,\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' class Foo: pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo: pass\n' + ' Foo = f1(arg)(f2(Foo))\n' + '\n' + 'The evaluation rules for the decorator expressions are the ' + 'same as for\n' + 'function decorators. The result must be a class object, which ' + 'is then\n' + 'bound to the class name.\n' + '\n' + "**Programmer's note:** Variables defined in the class " + 'definition are\n' + 'class attributes; they are shared by instances. Instance ' + 'attributes\n' + 'can be set in a method with "self.name = value". Both class ' + 'and\n' + 'instance attributes are accessible through the notation ' + '""self.name"",\n' + 'and an instance attribute hides a class attribute with the ' + 'same name\n' + 'when accessed in this way. Class attributes can be used as ' + 'defaults\n' + 'for instance attributes, but using mutable values there can ' + 'lead to\n' + 'unexpected results. *Descriptors* can be used to create ' + 'instance\n' + 'variables with different implementation details.\n' + '\n' + 'See also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** ' + '-\n' + ' Class Decorators\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] The exception is propagated to the invocation stack ' + 'unless\n' + ' there is a "finally" clause which happens to raise ' + 'another\n' + ' exception. That new exception causes the old one to be ' + 'lost.\n' + '\n' + '[2] Currently, control "flows off the end" except in the case ' + 'of\n' + ' an exception or the execution of a "return", "continue", ' + 'or\n' + ' "break" statement.\n' + '\n' + '[3] A string literal appearing as the first statement in the\n' + " function body is transformed into the function's " + '"__doc__"\n' + " attribute and therefore the function's *docstring*.\n" + '\n' + '[4] A string literal appearing as the first statement in the ' + 'class\n' + ' body is transformed into the namespace\'s "__doc__" item ' + 'and\n' + " therefore the class's *docstring*.\n", + 'context-managers': '\n' + 'With Statement Context Managers\n' + '*******************************\n' + '\n' + 'A *context manager* is an object that defines the ' + 'runtime context to\n' + 'be established when executing a "with" statement. The ' + 'context manager\n' + 'handles the entry into, and the exit from, the desired ' + 'runtime context\n' + 'for the execution of the block of code. Context ' + 'managers are normally\n' + 'invoked using the "with" statement (described in ' + 'section *The with\n' + 'statement*), but can also be used by directly invoking ' + 'their methods.\n' + '\n' + 'Typical uses of context managers include saving and ' + 'restoring various\n' + 'kinds of global state, locking and unlocking ' + 'resources, closing opened\n' + 'files, etc.\n' + '\n' + 'For more information on context managers, see *Context ' + 'Manager Types*.\n' + '\n' + 'object.__enter__(self)\n' + '\n' + ' Enter the runtime context related to this object. ' + 'The "with"\n' + " statement will bind this method's return value to " + 'the target(s)\n' + ' specified in the "as" clause of the statement, if ' + 'any.\n' + '\n' + 'object.__exit__(self, exc_type, exc_value, traceback)\n' + '\n' + ' Exit the runtime context related to this object. ' + 'The parameters\n' + ' describe the exception that caused the context to ' + 'be exited. If the\n' + ' context was exited without an exception, all three ' + 'arguments will\n' + ' be "None".\n' + '\n' + ' If an exception is supplied, and the method wishes ' + 'to suppress the\n' + ' exception (i.e., prevent it from being propagated), ' + 'it should\n' + ' return a true value. Otherwise, the exception will ' + 'be processed\n' + ' normally upon exit from this method.\n' + '\n' + ' Note that "__exit__()" methods should not reraise ' + 'the passed-in\n' + " exception; this is the caller's responsibility.\n" + '\n' + 'See also: **PEP 0343** - The "with" statement\n' + '\n' + ' The specification, background, and examples for ' + 'the Python "with"\n' + ' statement.\n', + 'continue': '\n' + 'The "continue" statement\n' + '************************\n' + '\n' + ' continue_stmt ::= "continue"\n' + '\n' + '"continue" may only occur syntactically nested in a "for" or ' + '"while"\n' + 'loop, but not nested in a function or class definition or ' + '"finally"\n' + 'clause within that loop. It continues with the next cycle of ' + 'the\n' + 'nearest enclosing loop.\n' + '\n' + 'When "continue" passes control out of a "try" statement with ' + 'a\n' + '"finally" clause, that "finally" clause is executed before ' + 'really\n' + 'starting the next loop cycle.\n', + 'conversions': '\n' + 'Arithmetic conversions\n' + '**********************\n' + '\n' + 'When a description of an arithmetic operator below uses the ' + 'phrase\n' + '"the numeric arguments are converted to a common type," ' + 'this means\n' + 'that the operator implementation for built-in types works ' + 'as follows:\n' + '\n' + '* If either argument is a complex number, the other is ' + 'converted to\n' + ' complex;\n' + '\n' + '* otherwise, if either argument is a floating point number, ' + 'the\n' + ' other is converted to floating point;\n' + '\n' + '* otherwise, both must be integers and no conversion is ' + 'necessary.\n' + '\n' + 'Some additional rules apply for certain operators (e.g., a ' + 'string as a\n' + "left argument to the '%' operator). Extensions must define " + 'their own\n' + 'conversion behavior.\n', + 'customization': '\n' + 'Basic customization\n' + '*******************\n' + '\n' + 'object.__new__(cls[, ...])\n' + '\n' + ' Called to create a new instance of class *cls*. ' + '"__new__()" is a\n' + ' static method (special-cased so you need not declare ' + 'it as such)\n' + ' that takes the class of which an instance was ' + 'requested as its\n' + ' first argument. The remaining arguments are those ' + 'passed to the\n' + ' object constructor expression (the call to the ' + 'class). The return\n' + ' value of "__new__()" should be the new object instance ' + '(usually an\n' + ' instance of *cls*).\n' + '\n' + ' Typical implementations create a new instance of the ' + 'class by\n' + ' invoking the superclass\'s "__new__()" method using\n' + ' "super(currentclass, cls).__new__(cls[, ...])" with ' + 'appropriate\n' + ' arguments and then modifying the newly-created ' + 'instance as\n' + ' necessary before returning it.\n' + '\n' + ' If "__new__()" returns an instance of *cls*, then the ' + 'new\n' + ' instance\'s "__init__()" method will be invoked like\n' + ' "__init__(self[, ...])", where *self* is the new ' + 'instance and the\n' + ' remaining arguments are the same as were passed to ' + '"__new__()".\n' + '\n' + ' If "__new__()" does not return an instance of *cls*, ' + 'then the new\n' + ' instance\'s "__init__()" method will not be invoked.\n' + '\n' + ' "__new__()" is intended mainly to allow subclasses of ' + 'immutable\n' + ' types (like int, str, or tuple) to customize instance ' + 'creation. It\n' + ' is also commonly overridden in custom metaclasses in ' + 'order to\n' + ' customize class creation.\n' + '\n' + 'object.__init__(self[, ...])\n' + '\n' + ' Called when the instance is created. The arguments ' + 'are those\n' + ' passed to the class constructor expression. If a base ' + 'class has an\n' + ' "__init__()" method, the derived class\'s "__init__()" ' + 'method, if\n' + ' any, must explicitly call it to ensure proper ' + 'initialization of the\n' + ' base class part of the instance; for example:\n' + ' "BaseClass.__init__(self, [args...])". As a special ' + 'constraint on\n' + ' constructors, no value may be returned; doing so will ' + 'cause a\n' + ' "TypeError" to be raised at runtime.\n' + '\n' + 'object.__del__(self)\n' + '\n' + ' Called when the instance is about to be destroyed. ' + 'This is also\n' + ' called a destructor. If a base class has a ' + '"__del__()" method, the\n' + ' derived class\'s "__del__()" method, if any, must ' + 'explicitly call it\n' + ' to ensure proper deletion of the base class part of ' + 'the instance.\n' + ' Note that it is possible (though not recommended!) for ' + 'the\n' + ' "__del__()" method to postpone destruction of the ' + 'instance by\n' + ' creating a new reference to it. It may then be called ' + 'at a later\n' + ' time when this new reference is deleted. It is not ' + 'guaranteed that\n' + ' "__del__()" methods are called for objects that still ' + 'exist when\n' + ' the interpreter exits.\n' + '\n' + ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' + 'the former\n' + ' decrements the reference count for "x" by one, and ' + 'the latter is\n' + ' only called when "x"\'s reference count reaches ' + 'zero. Some common\n' + ' situations that may prevent the reference count of ' + 'an object from\n' + ' going to zero include: circular references between ' + 'objects (e.g.,\n' + ' a doubly-linked list or a tree data structure with ' + 'parent and\n' + ' child pointers); a reference to the object on the ' + 'stack frame of\n' + ' a function that caught an exception (the traceback ' + 'stored in\n' + ' "sys.exc_info()[2]" keeps the stack frame alive); or ' + 'a reference\n' + ' to the object on the stack frame that raised an ' + 'unhandled\n' + ' exception in interactive mode (the traceback stored ' + 'in\n' + ' "sys.last_traceback" keeps the stack frame alive). ' + 'The first\n' + ' situation can only be remedied by explicitly ' + 'breaking the cycles;\n' + ' the latter two situations can be resolved by storing ' + '"None" in\n' + ' "sys.last_traceback". Circular references which are ' + 'garbage are\n' + ' detected and cleaned up when the cyclic garbage ' + 'collector is\n' + " enabled (it's on by default). Refer to the " + 'documentation for the\n' + ' "gc" module for more information about this topic.\n' + '\n' + ' Warning: Due to the precarious circumstances under ' + 'which\n' + ' "__del__()" methods are invoked, exceptions that ' + 'occur during\n' + ' their execution are ignored, and a warning is ' + 'printed to\n' + ' "sys.stderr" instead. Also, when "__del__()" is ' + 'invoked in\n' + ' response to a module being deleted (e.g., when ' + 'execution of the\n' + ' program is done), other globals referenced by the ' + '"__del__()"\n' + ' method may already have been deleted or in the ' + 'process of being\n' + ' torn down (e.g. the import machinery shutting ' + 'down). For this\n' + ' reason, "__del__()" methods should do the absolute ' + 'minimum needed\n' + ' to maintain external invariants. Starting with ' + 'version 1.5,\n' + ' Python guarantees that globals whose name begins ' + 'with a single\n' + ' underscore are deleted from their module before ' + 'other globals are\n' + ' deleted; if no other references to such globals ' + 'exist, this may\n' + ' help in assuring that imported modules are still ' + 'available at the\n' + ' time when the "__del__()" method is called.\n' + '\n' + 'object.__repr__(self)\n' + '\n' + ' Called by the "repr()" built-in function to compute ' + 'the "official"\n' + ' string representation of an object. If at all ' + 'possible, this\n' + ' should look like a valid Python expression that could ' + 'be used to\n' + ' recreate an object with the same value (given an ' + 'appropriate\n' + ' environment). If this is not possible, a string of ' + 'the form\n' + ' "<...some useful description...>" should be returned. ' + 'The return\n' + ' value must be a string object. If a class defines ' + '"__repr__()" but\n' + ' not "__str__()", then "__repr__()" is also used when ' + 'an "informal"\n' + ' string representation of instances of that class is ' + 'required.\n' + '\n' + ' This is typically used for debugging, so it is ' + 'important that the\n' + ' representation is information-rich and unambiguous.\n' + '\n' + 'object.__str__(self)\n' + '\n' + ' Called by "str(object)" and the built-in functions ' + '"format()" and\n' + ' "print()" to compute the "informal" or nicely ' + 'printable string\n' + ' representation of an object. The return value must be ' + 'a *string*\n' + ' object.\n' + '\n' + ' This method differs from "object.__repr__()" in that ' + 'there is no\n' + ' expectation that "__str__()" return a valid Python ' + 'expression: a\n' + ' more convenient or concise representation can be ' + 'used.\n' + '\n' + ' The default implementation defined by the built-in ' + 'type "object"\n' + ' calls "object.__repr__()".\n' + '\n' + 'object.__bytes__(self)\n' + '\n' + ' Called by "bytes()" to compute a byte-string ' + 'representation of an\n' + ' object. This should return a "bytes" object.\n' + '\n' + 'object.__format__(self, format_spec)\n' + '\n' + ' Called by the "format()" built-in function (and by ' + 'extension, the\n' + ' "str.format()" method of class "str") to produce a ' + '"formatted"\n' + ' string representation of an object. The "format_spec" ' + 'argument is a\n' + ' string that contains a description of the formatting ' + 'options\n' + ' desired. The interpretation of the "format_spec" ' + 'argument is up to\n' + ' the type implementing "__format__()", however most ' + 'classes will\n' + ' either delegate formatting to one of the built-in ' + 'types, or use a\n' + ' similar formatting option syntax.\n' + '\n' + ' See *Format Specification Mini-Language* for a ' + 'description of the\n' + ' standard formatting syntax.\n' + '\n' + ' The return value must be a string object.\n' + '\n' + ' Changed in version 3.4: The __format__ method of ' + '"object" itself\n' + ' raises a "TypeError" if passed any non-empty string.\n' + '\n' + 'object.__lt__(self, other)\n' + 'object.__le__(self, other)\n' + 'object.__eq__(self, other)\n' + 'object.__ne__(self, other)\n' + 'object.__gt__(self, other)\n' + 'object.__ge__(self, other)\n' + '\n' + ' These are the so-called "rich comparison" methods. ' + 'The\n' + ' correspondence between operator symbols and method ' + 'names is as\n' + ' follows: "xy" calls\n' + ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' + '\n' + ' A rich comparison method may return the singleton ' + '"NotImplemented"\n' + ' if it does not implement the operation for a given ' + 'pair of\n' + ' arguments. By convention, "False" and "True" are ' + 'returned for a\n' + ' successful comparison. However, these methods can ' + 'return any value,\n' + ' so if the comparison operator is used in a Boolean ' + 'context (e.g.,\n' + ' in the condition of an "if" statement), Python will ' + 'call "bool()"\n' + ' on the value to determine if the result is true or ' + 'false.\n' + '\n' + ' There are no implied relationships among the ' + 'comparison operators.\n' + ' The truth of "x==y" does not imply that "x!=y" is ' + 'false.\n' + ' Accordingly, when defining "__eq__()", one should also ' + 'define\n' + ' "__ne__()" so that the operators will behave as ' + 'expected. See the\n' + ' paragraph on "__hash__()" for some important notes on ' + 'creating\n' + ' *hashable* objects which support custom comparison ' + 'operations and\n' + ' are usable as dictionary keys.\n' + '\n' + ' There are no swapped-argument versions of these ' + 'methods (to be used\n' + ' when the left argument does not support the operation ' + 'but the right\n' + ' argument does); rather, "__lt__()" and "__gt__()" are ' + "each other's\n" + ' reflection, "__le__()" and "__ge__()" are each ' + "other's reflection,\n" + ' and "__eq__()" and "__ne__()" are their own ' + 'reflection.\n' + '\n' + ' Arguments to rich comparison methods are never ' + 'coerced.\n' + '\n' + ' To automatically generate ordering operations from a ' + 'single root\n' + ' operation, see "functools.total_ordering()".\n' + '\n' + 'object.__hash__(self)\n' + '\n' + ' Called by built-in function "hash()" and for ' + 'operations on members\n' + ' of hashed collections including "set", "frozenset", ' + 'and "dict".\n' + ' "__hash__()" should return an integer. The only ' + 'required property\n' + ' is that objects which compare equal have the same hash ' + 'value; it is\n' + ' advised to somehow mix together (e.g. using exclusive ' + 'or) the hash\n' + ' values for the components of the object that also play ' + 'a part in\n' + ' comparison of objects.\n' + '\n' + ' Note: "hash()" truncates the value returned from an ' + "object's\n" + ' custom "__hash__()" method to the size of a ' + '"Py_ssize_t". This\n' + ' is typically 8 bytes on 64-bit builds and 4 bytes on ' + '32-bit\n' + ' builds. If an object\'s "__hash__()" must ' + 'interoperate on builds\n' + ' of different bit sizes, be sure to check the width ' + 'on all\n' + ' supported builds. An easy way to do this is with ' + '"python -c\n' + ' "import sys; print(sys.hash_info.width)""\n' + '\n' + ' If a class does not define an "__eq__()" method it ' + 'should not\n' + ' define a "__hash__()" operation either; if it defines ' + '"__eq__()"\n' + ' but not "__hash__()", its instances will not be usable ' + 'as items in\n' + ' hashable collections. If a class defines mutable ' + 'objects and\n' + ' implements an "__eq__()" method, it should not ' + 'implement\n' + ' "__hash__()", since the implementation of hashable ' + 'collections\n' + " requires that a key's hash value is immutable (if the " + "object's hash\n" + ' value changes, it will be in the wrong hash bucket).\n' + '\n' + ' User-defined classes have "__eq__()" and "__hash__()" ' + 'methods by\n' + ' default; with them, all objects compare unequal ' + '(except with\n' + ' themselves) and "x.__hash__()" returns an appropriate ' + 'value such\n' + ' that "x == y" implies both that "x is y" and "hash(x) ' + '== hash(y)".\n' + '\n' + ' A class that overrides "__eq__()" and does not define ' + '"__hash__()"\n' + ' will have its "__hash__()" implicitly set to "None". ' + 'When the\n' + ' "__hash__()" method of a class is "None", instances of ' + 'the class\n' + ' will raise an appropriate "TypeError" when a program ' + 'attempts to\n' + ' retrieve their hash value, and will also be correctly ' + 'identified as\n' + ' unhashable when checking "isinstance(obj, ' + 'collections.Hashable").\n' + '\n' + ' If a class that overrides "__eq__()" needs to retain ' + 'the\n' + ' implementation of "__hash__()" from a parent class, ' + 'the interpreter\n' + ' must be told this explicitly by setting "__hash__ =\n' + ' .__hash__".\n' + '\n' + ' If a class that does not override "__eq__()" wishes to ' + 'suppress\n' + ' hash support, it should include "__hash__ = None" in ' + 'the class\n' + ' definition. A class which defines its own "__hash__()" ' + 'that\n' + ' explicitly raises a "TypeError" would be incorrectly ' + 'identified as\n' + ' hashable by an "isinstance(obj, collections.Hashable)" ' + 'call.\n' + '\n' + ' Note: By default, the "__hash__()" values of str, ' + 'bytes and\n' + ' datetime objects are "salted" with an unpredictable ' + 'random value.\n' + ' Although they remain constant within an individual ' + 'Python\n' + ' process, they are not predictable between repeated ' + 'invocations of\n' + ' Python.This is intended to provide protection ' + 'against a denial-\n' + ' of-service caused by carefully-chosen inputs that ' + 'exploit the\n' + ' worst case performance of a dict insertion, O(n^2) ' + 'complexity.\n' + ' See ' + 'http://www.ocert.org/advisories/ocert-2011-003.html for\n' + ' details.Changing hash values affects the iteration ' + 'order of\n' + ' dicts, sets and other mappings. Python has never ' + 'made guarantees\n' + ' about this ordering (and it typically varies between ' + '32-bit and\n' + ' 64-bit builds).See also "PYTHONHASHSEED".\n' + '\n' + ' Changed in version 3.3: Hash randomization is enabled ' + 'by default.\n' + '\n' + 'object.__bool__(self)\n' + '\n' + ' Called to implement truth value testing and the ' + 'built-in operation\n' + ' "bool()"; should return "False" or "True". When this ' + 'method is not\n' + ' defined, "__len__()" is called, if it is defined, and ' + 'the object is\n' + ' considered true if its result is nonzero. If a class ' + 'defines\n' + ' neither "__len__()" nor "__bool__()", all its ' + 'instances are\n' + ' considered true.\n', + 'debugger': '\n' + '"pdb" --- The Python Debugger\n' + '*****************************\n' + '\n' + 'The module "pdb" defines an interactive source code debugger ' + 'for\n' + 'Python programs. It supports setting (conditional) ' + 'breakpoints and\n' + 'single stepping at the source line level, inspection of stack ' + 'frames,\n' + 'source code listing, and evaluation of arbitrary Python code ' + 'in the\n' + 'context of any stack frame. It also supports post-mortem ' + 'debugging\n' + 'and can be called under program control.\n' + '\n' + 'The debugger is extensible -- it is actually defined as the ' + 'class\n' + '"Pdb". This is currently undocumented but easily understood by ' + 'reading\n' + 'the source. The extension interface uses the modules "bdb" ' + 'and "cmd".\n' + '\n' + 'The debugger\'s prompt is "(Pdb)". Typical usage to run a ' + 'program under\n' + 'control of the debugger is:\n' + '\n' + ' >>> import pdb\n' + ' >>> import mymodule\n' + " >>> pdb.run('mymodule.test()')\n" + ' > (0)?()\n' + ' (Pdb) continue\n' + ' > (1)?()\n' + ' (Pdb) continue\n' + " NameError: 'spam'\n" + ' > (1)?()\n' + ' (Pdb)\n' + '\n' + 'Changed in version 3.3: Tab-completion via the "readline" ' + 'module is\n' + 'available for commands and command arguments, e.g. the current ' + 'global\n' + 'and local names are offered as arguments of the "p" command.\n' + '\n' + '"pdb.py" can also be invoked as a script to debug other ' + 'scripts. For\n' + 'example:\n' + '\n' + ' python3 -m pdb myscript.py\n' + '\n' + 'When invoked as a script, pdb will automatically enter ' + 'post-mortem\n' + 'debugging if the program being debugged exits abnormally. ' + 'After post-\n' + 'mortem debugging (or after normal exit of the program), pdb ' + 'will\n' + "restart the program. Automatic restarting preserves pdb's " + 'state (such\n' + 'as breakpoints) and in most cases is more useful than quitting ' + 'the\n' + "debugger upon program's exit.\n" + '\n' + 'New in version 3.2: "pdb.py" now accepts a "-c" option that ' + 'executes\n' + 'commands as if given in a ".pdbrc" file, see *Debugger ' + 'Commands*.\n' + '\n' + 'The typical usage to break into the debugger from a running ' + 'program is\n' + 'to insert\n' + '\n' + ' import pdb; pdb.set_trace()\n' + '\n' + 'at the location you want to break into the debugger. You can ' + 'then\n' + 'step through the code following this statement, and continue ' + 'running\n' + 'without the debugger using the "continue" command.\n' + '\n' + 'The typical usage to inspect a crashed program is:\n' + '\n' + ' >>> import pdb\n' + ' >>> import mymodule\n' + ' >>> mymodule.test()\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in ?\n' + ' File "./mymodule.py", line 4, in test\n' + ' test2()\n' + ' File "./mymodule.py", line 3, in test2\n' + ' print(spam)\n' + ' NameError: spam\n' + ' >>> pdb.pm()\n' + ' > ./mymodule.py(3)test2()\n' + ' -> print(spam)\n' + ' (Pdb)\n' + '\n' + 'The module defines the following functions; each enters the ' + 'debugger\n' + 'in a slightly different way:\n' + '\n' + 'pdb.run(statement, globals=None, locals=None)\n' + '\n' + ' Execute the *statement* (given as a string or a code ' + 'object) under\n' + ' debugger control. The debugger prompt appears before any ' + 'code is\n' + ' executed; you can set breakpoints and type "continue", or ' + 'you can\n' + ' step through the statement using "step" or "next" (all ' + 'these\n' + ' commands are explained below). The optional *globals* and ' + '*locals*\n' + ' arguments specify the environment in which the code is ' + 'executed; by\n' + ' default the dictionary of the module "__main__" is used. ' + '(See the\n' + ' explanation of the built-in "exec()" or "eval()" ' + 'functions.)\n' + '\n' + 'pdb.runeval(expression, globals=None, locals=None)\n' + '\n' + ' Evaluate the *expression* (given as a string or a code ' + 'object)\n' + ' under debugger control. When "runeval()" returns, it ' + 'returns the\n' + ' value of the expression. Otherwise this function is ' + 'similar to\n' + ' "run()".\n' + '\n' + 'pdb.runcall(function, *args, **kwds)\n' + '\n' + ' Call the *function* (a function or method object, not a ' + 'string)\n' + ' with the given arguments. When "runcall()" returns, it ' + 'returns\n' + ' whatever the function call returned. The debugger prompt ' + 'appears\n' + ' as soon as the function is entered.\n' + '\n' + 'pdb.set_trace()\n' + '\n' + ' Enter the debugger at the calling stack frame. This is ' + 'useful to\n' + ' hard-code a breakpoint at a given point in a program, even ' + 'if the\n' + ' code is not otherwise being debugged (e.g. when an ' + 'assertion\n' + ' fails).\n' + '\n' + 'pdb.post_mortem(traceback=None)\n' + '\n' + ' Enter post-mortem debugging of the given *traceback* ' + 'object. If no\n' + ' *traceback* is given, it uses the one of the exception that ' + 'is\n' + ' currently being handled (an exception must be being handled ' + 'if the\n' + ' default is to be used).\n' + '\n' + 'pdb.pm()\n' + '\n' + ' Enter post-mortem debugging of the traceback found in\n' + ' "sys.last_traceback".\n' + '\n' + 'The "run*" functions and "set_trace()" are aliases for ' + 'instantiating\n' + 'the "Pdb" class and calling the method of the same name. If ' + 'you want\n' + 'to access further features, you have to do this yourself:\n' + '\n' + "class class pdb.Pdb(completekey='tab', stdin=None, " + 'stdout=None, skip=None, nosigint=False)\n' + '\n' + ' "Pdb" is the debugger class.\n' + '\n' + ' The *completekey*, *stdin* and *stdout* arguments are ' + 'passed to the\n' + ' underlying "cmd.Cmd" class; see the description there.\n' + '\n' + ' The *skip* argument, if given, must be an iterable of ' + 'glob-style\n' + ' module name patterns. The debugger will not step into ' + 'frames that\n' + ' originate in a module that matches one of these patterns. ' + '[1]\n' + '\n' + ' By default, Pdb sets a handler for the SIGINT signal (which ' + 'is sent\n' + ' when the user presses Ctrl-C on the console) when you give ' + 'a\n' + ' "continue" command. This allows you to break into the ' + 'debugger\n' + ' again by pressing Ctrl-C. If you want Pdb not to touch the ' + 'SIGINT\n' + ' handler, set *nosigint* tot true.\n' + '\n' + ' Example call to enable tracing with *skip*:\n' + '\n' + " import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n" + '\n' + ' New in version 3.1: The *skip* argument.\n' + '\n' + ' New in version 3.2: The *nosigint* argument. Previously, a ' + 'SIGINT\n' + ' handler was never set by Pdb.\n' + '\n' + ' run(statement, globals=None, locals=None)\n' + ' runeval(expression, globals=None, locals=None)\n' + ' runcall(function, *args, **kwds)\n' + ' set_trace()\n' + '\n' + ' See the documentation for the functions explained ' + 'above.\n' + '\n' + '\n' + 'Debugger Commands\n' + '=================\n' + '\n' + 'The commands recognized by the debugger are listed below. ' + 'Most\n' + 'commands can be abbreviated to one or two letters as ' + 'indicated; e.g.\n' + '"h(elp)" means that either "h" or "help" can be used to enter ' + 'the help\n' + 'command (but not "he" or "hel", nor "H" or "Help" or "HELP").\n' + 'Arguments to commands must be separated by whitespace (spaces ' + 'or\n' + 'tabs). Optional arguments are enclosed in square brackets ' + '("[]") in\n' + 'the command syntax; the square brackets must not be typed.\n' + 'Alternatives in the command syntax are separated by a vertical ' + 'bar\n' + '("|").\n' + '\n' + 'Entering a blank line repeats the last command entered. ' + 'Exception: if\n' + 'the last command was a "list" command, the next 11 lines are ' + 'listed.\n' + '\n' + "Commands that the debugger doesn't recognize are assumed to be " + 'Python\n' + 'statements and are executed in the context of the program ' + 'being\n' + 'debugged. Python statements can also be prefixed with an ' + 'exclamation\n' + 'point ("!"). This is a powerful way to inspect the program ' + 'being\n' + 'debugged; it is even possible to change a variable or call a ' + 'function.\n' + 'When an exception occurs in such a statement, the exception ' + 'name is\n' + "printed but the debugger's state is not changed.\n" + '\n' + 'The debugger supports *aliases*. Aliases can have parameters ' + 'which\n' + 'allows one a certain level of adaptability to the context ' + 'under\n' + 'examination.\n' + '\n' + 'Multiple commands may be entered on a single line, separated ' + 'by ";;".\n' + '(A single ";" is not used as it is the separator for multiple ' + 'commands\n' + 'in a line that is passed to the Python parser.) No ' + 'intelligence is\n' + 'applied to separating the commands; the input is split at the ' + 'first\n' + '";;" pair, even if it is in the middle of a quoted string.\n' + '\n' + 'If a file ".pdbrc" exists in the user\'s home directory or in ' + 'the\n' + 'current directory, it is read in and executed as if it had ' + 'been typed\n' + 'at the debugger prompt. This is particularly useful for ' + 'aliases. If\n' + 'both files exist, the one in the home directory is read first ' + 'and\n' + 'aliases defined there can be overridden by the local file.\n' + '\n' + 'Changed in version 3.2: ".pdbrc" can now contain commands ' + 'that\n' + 'continue debugging, such as "continue" or "next". Previously, ' + 'these\n' + 'commands had no effect.\n' + '\n' + 'h(elp) [command]\n' + '\n' + ' Without argument, print the list of available commands. ' + 'With a\n' + ' *command* as argument, print help about that command. ' + '"help pdb"\n' + ' displays the full documentation (the docstring of the ' + '"pdb"\n' + ' module). Since the *command* argument must be an ' + 'identifier, "help\n' + ' exec" must be entered to get help on the "!" command.\n' + '\n' + 'w(here)\n' + '\n' + ' Print a stack trace, with the most recent frame at the ' + 'bottom. An\n' + ' arrow indicates the current frame, which determines the ' + 'context of\n' + ' most commands.\n' + '\n' + 'd(own) [count]\n' + '\n' + ' Move the current frame *count* (default one) levels down in ' + 'the\n' + ' stack trace (to a newer frame).\n' + '\n' + 'u(p) [count]\n' + '\n' + ' Move the current frame *count* (default one) levels up in ' + 'the stack\n' + ' trace (to an older frame).\n' + '\n' + 'b(reak) [([filename:]lineno | function) [, condition]]\n' + '\n' + ' With a *lineno* argument, set a break there in the current ' + 'file.\n' + ' With a *function* argument, set a break at the first ' + 'executable\n' + ' statement within that function. The line number may be ' + 'prefixed\n' + ' with a filename and a colon, to specify a breakpoint in ' + 'another\n' + " file (probably one that hasn't been loaded yet). The file " + 'is\n' + ' searched on "sys.path". Note that each breakpoint is ' + 'assigned a\n' + ' number to which all the other breakpoint commands refer.\n' + '\n' + ' If a second argument is present, it is an expression which ' + 'must\n' + ' evaluate to true before the breakpoint is honored.\n' + '\n' + ' Without argument, list all breaks, including for each ' + 'breakpoint,\n' + ' the number of times that breakpoint has been hit, the ' + 'current\n' + ' ignore count, and the associated condition if any.\n' + '\n' + 'tbreak [([filename:]lineno | function) [, condition]]\n' + '\n' + ' Temporary breakpoint, which is removed automatically when ' + 'it is\n' + ' first hit. The arguments are the same as for "break".\n' + '\n' + 'cl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n' + '\n' + ' With a *filename:lineno* argument, clear all the ' + 'breakpoints at\n' + ' this line. With a space separated list of breakpoint ' + 'numbers, clear\n' + ' those breakpoints. Without argument, clear all breaks (but ' + 'first\n' + ' ask confirmation).\n' + '\n' + 'disable [bpnumber [bpnumber ...]]\n' + '\n' + ' Disable the breakpoints given as a space separated list of\n' + ' breakpoint numbers. Disabling a breakpoint means it cannot ' + 'cause\n' + ' the program to stop execution, but unlike clearing a ' + 'breakpoint, it\n' + ' remains in the list of breakpoints and can be ' + '(re-)enabled.\n' + '\n' + 'enable [bpnumber [bpnumber ...]]\n' + '\n' + ' Enable the breakpoints specified.\n' + '\n' + 'ignore bpnumber [count]\n' + '\n' + ' Set the ignore count for the given breakpoint number. If ' + 'count is\n' + ' omitted, the ignore count is set to 0. A breakpoint ' + 'becomes active\n' + ' when the ignore count is zero. When non-zero, the count ' + 'is\n' + ' decremented each time the breakpoint is reached and the ' + 'breakpoint\n' + ' is not disabled and any associated condition evaluates to ' + 'true.\n' + '\n' + 'condition bpnumber [condition]\n' + '\n' + ' Set a new *condition* for the breakpoint, an expression ' + 'which must\n' + ' evaluate to true before the breakpoint is honored. If ' + '*condition*\n' + ' is absent, any existing condition is removed; i.e., the ' + 'breakpoint\n' + ' is made unconditional.\n' + '\n' + 'commands [bpnumber]\n' + '\n' + ' Specify a list of commands for breakpoint number ' + '*bpnumber*. The\n' + ' commands themselves appear on the following lines. Type a ' + 'line\n' + ' containing just "end" to terminate the commands. An ' + 'example:\n' + '\n' + ' (Pdb) commands 1\n' + ' (com) p some_variable\n' + ' (com) end\n' + ' (Pdb)\n' + '\n' + ' To remove all commands from a breakpoint, type commands and ' + 'follow\n' + ' it immediately with "end"; that is, give no commands.\n' + '\n' + ' With no *bpnumber* argument, commands refers to the last ' + 'breakpoint\n' + ' set.\n' + '\n' + ' You can use breakpoint commands to start your program up ' + 'again.\n' + ' Simply use the continue command, or step, or any other ' + 'command that\n' + ' resumes execution.\n' + '\n' + ' Specifying any command resuming execution (currently ' + 'continue,\n' + ' step, next, return, jump, quit and their abbreviations) ' + 'terminates\n' + ' the command list (as if that command was immediately ' + 'followed by\n' + ' end). This is because any time you resume execution (even ' + 'with a\n' + ' simple next or step), you may encounter another ' + 'breakpoint--which\n' + ' could have its own command list, leading to ambiguities ' + 'about which\n' + ' list to execute.\n' + '\n' + " If you use the 'silent' command in the command list, the " + 'usual\n' + ' message about stopping at a breakpoint is not printed. ' + 'This may be\n' + ' desirable for breakpoints that are to print a specific ' + 'message and\n' + ' then continue. If none of the other commands print ' + 'anything, you\n' + ' see no sign that the breakpoint was reached.\n' + '\n' + 's(tep)\n' + '\n' + ' Execute the current line, stop at the first possible ' + 'occasion\n' + ' (either in a function that is called or on the next line in ' + 'the\n' + ' current function).\n' + '\n' + 'n(ext)\n' + '\n' + ' Continue execution until the next line in the current ' + 'function is\n' + ' reached or it returns. (The difference between "next" and ' + '"step"\n' + ' is that "step" stops inside a called function, while ' + '"next"\n' + ' executes called functions at (nearly) full speed, only ' + 'stopping at\n' + ' the next line in the current function.)\n' + '\n' + 'unt(il) [lineno]\n' + '\n' + ' Without argument, continue execution until the line with a ' + 'number\n' + ' greater than the current one is reached.\n' + '\n' + ' With a line number, continue execution until a line with a ' + 'number\n' + ' greater or equal to that is reached. In both cases, also ' + 'stop when\n' + ' the current frame returns.\n' + '\n' + ' Changed in version 3.2: Allow giving an explicit line ' + 'number.\n' + '\n' + 'r(eturn)\n' + '\n' + ' Continue execution until the current function returns.\n' + '\n' + 'c(ont(inue))\n' + '\n' + ' Continue execution, only stop when a breakpoint is ' + 'encountered.\n' + '\n' + 'j(ump) lineno\n' + '\n' + ' Set the next line that will be executed. Only available in ' + 'the\n' + ' bottom-most frame. This lets you jump back and execute ' + 'code again,\n' + " or jump forward to skip code that you don't want to run.\n" + '\n' + ' It should be noted that not all jumps are allowed -- for ' + 'instance\n' + ' it is not possible to jump into the middle of a "for" loop ' + 'or out\n' + ' of a "finally" clause.\n' + '\n' + 'l(ist) [first[, last]]\n' + '\n' + ' List source code for the current file. Without arguments, ' + 'list 11\n' + ' lines around the current line or continue the previous ' + 'listing.\n' + ' With "." as argument, list 11 lines around the current ' + 'line. With\n' + ' one argument, list 11 lines around at that line. With two\n' + ' arguments, list the given range; if the second argument is ' + 'less\n' + ' than the first, it is interpreted as a count.\n' + '\n' + ' The current line in the current frame is indicated by ' + '"->". If an\n' + ' exception is being debugged, the line where the exception ' + 'was\n' + ' originally raised or propagated is indicated by ">>", if it ' + 'differs\n' + ' from the current line.\n' + '\n' + ' New in version 3.2: The ">>" marker.\n' + '\n' + 'll | longlist\n' + '\n' + ' List all source code for the current function or frame.\n' + ' Interesting lines are marked as for "list".\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'a(rgs)\n' + '\n' + ' Print the argument list of the current function.\n' + '\n' + 'p expression\n' + '\n' + ' Evaluate the *expression* in the current context and print ' + 'its\n' + ' value.\n' + '\n' + ' Note: "print()" can also be used, but is not a debugger ' + 'command\n' + ' --- this executes the Python "print()" function.\n' + '\n' + 'pp expression\n' + '\n' + ' Like the "p" command, except the value of the expression is ' + 'pretty-\n' + ' printed using the "pprint" module.\n' + '\n' + 'whatis expression\n' + '\n' + ' Print the type of the *expression*.\n' + '\n' + 'source expression\n' + '\n' + ' Try to get source code for the given object and display ' + 'it.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'display [expression]\n' + '\n' + ' Display the value of the expression if it changed, each ' + 'time\n' + ' execution stops in the current frame.\n' + '\n' + ' Without expression, list all display expressions for the ' + 'current\n' + ' frame.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'undisplay [expression]\n' + '\n' + ' Do not display the expression any more in the current ' + 'frame.\n' + ' Without expression, clear all display expressions for the ' + 'current\n' + ' frame.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'interact\n' + '\n' + ' Start an interative interpreter (using the "code" module) ' + 'whose\n' + ' global namespace contains all the (global and local) names ' + 'found in\n' + ' the current scope.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'alias [name [command]]\n' + '\n' + ' Create an alias called *name* that executes *command*. The ' + 'command\n' + ' must *not* be enclosed in quotes. Replaceable parameters ' + 'can be\n' + ' indicated by "%1", "%2", and so on, while "%*" is replaced ' + 'by all\n' + ' the parameters. If no command is given, the current alias ' + 'for\n' + ' *name* is shown. If no arguments are given, all aliases are ' + 'listed.\n' + '\n' + ' Aliases may be nested and can contain anything that can be ' + 'legally\n' + ' typed at the pdb prompt. Note that internal pdb commands ' + '*can* be\n' + ' overridden by aliases. Such a command is then hidden until ' + 'the\n' + ' alias is removed. Aliasing is recursively applied to the ' + 'first\n' + ' word of the command line; all other words in the line are ' + 'left\n' + ' alone.\n' + '\n' + ' As an example, here are two useful aliases (especially when ' + 'placed\n' + ' in the ".pdbrc" file):\n' + '\n' + ' # Print instance variables (usage "pi classInst")\n' + ' alias pi for k in %1.__dict__.keys(): ' + 'print("%1.",k,"=",%1.__dict__[k])\n' + ' # Print instance variables in self\n' + ' alias ps pi self\n' + '\n' + 'unalias name\n' + '\n' + ' Delete the specified alias.\n' + '\n' + '! statement\n' + '\n' + ' Execute the (one-line) *statement* in the context of the ' + 'current\n' + ' stack frame. The exclamation point can be omitted unless ' + 'the first\n' + ' word of the statement resembles a debugger command. To set ' + 'a\n' + ' global variable, you can prefix the assignment command with ' + 'a\n' + ' "global" statement on the same line, e.g.:\n' + '\n' + " (Pdb) global list_options; list_options = ['-l']\n" + ' (Pdb)\n' + '\n' + 'run [args ...]\n' + 'restart [args ...]\n' + '\n' + ' Restart the debugged Python program. If an argument is ' + 'supplied,\n' + ' it is split with "shlex" and the result is used as the new\n' + ' "sys.argv". History, breakpoints, actions and debugger ' + 'options are\n' + ' preserved. "restart" is an alias for "run".\n' + '\n' + 'q(uit)\n' + '\n' + ' Quit from the debugger. The program being executed is ' + 'aborted.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] Whether a frame is considered to originate in a certain ' + 'module\n' + ' is determined by the "__name__" in the frame globals.\n', + 'del': '\n' + 'The "del" statement\n' + '*******************\n' + '\n' + ' del_stmt ::= "del" target_list\n' + '\n' + 'Deletion is recursively defined very similar to the way assignment ' + 'is\n' + 'defined. Rather than spelling it out in full details, here are ' + 'some\n' + 'hints.\n' + '\n' + 'Deletion of a target list recursively deletes each target, from ' + 'left\n' + 'to right.\n' + '\n' + 'Deletion of a name removes the binding of that name from the local ' + 'or\n' + 'global namespace, depending on whether the name occurs in a ' + '"global"\n' + 'statement in the same code block. If the name is unbound, a\n' + '"NameError" exception will be raised.\n' + '\n' + 'Deletion of attribute references, subscriptions and slicings is ' + 'passed\n' + 'to the primary object involved; deletion of a slicing is in ' + 'general\n' + 'equivalent to assignment of an empty slice of the right type (but ' + 'even\n' + 'this is determined by the sliced object).\n' + '\n' + 'Changed in version 3.2: Previously it was illegal to delete a name\n' + 'from the local namespace if it occurs as a free variable in a ' + 'nested\n' + 'block.\n', + 'dict': '\n' + 'Dictionary displays\n' + '*******************\n' + '\n' + 'A dictionary display is a possibly empty series of key/datum ' + 'pairs\n' + 'enclosed in curly braces:\n' + '\n' + ' dict_display ::= "{" [key_datum_list | ' + 'dict_comprehension] "}"\n' + ' key_datum_list ::= key_datum ("," key_datum)* [","]\n' + ' key_datum ::= expression ":" expression\n' + ' dict_comprehension ::= expression ":" expression comp_for\n' + '\n' + 'A dictionary display yields a new dictionary object.\n' + '\n' + 'If a comma-separated sequence of key/datum pairs is given, they ' + 'are\n' + 'evaluated from left to right to define the entries of the ' + 'dictionary:\n' + 'each key object is used as a key into the dictionary to store the\n' + 'corresponding datum. This means that you can specify the same ' + 'key\n' + "multiple times in the key/datum list, and the final dictionary's " + 'value\n' + 'for that key will be the last one given.\n' + '\n' + 'A dict comprehension, in contrast to list and set comprehensions,\n' + 'needs two expressions separated with a colon followed by the ' + 'usual\n' + '"for" and "if" clauses. When the comprehension is run, the ' + 'resulting\n' + 'key and value elements are inserted in the new dictionary in the ' + 'order\n' + 'they are produced.\n' + '\n' + 'Restrictions on the types of the key values are listed earlier in\n' + 'section *The standard type hierarchy*. (To summarize, the key ' + 'type\n' + 'should be *hashable*, which excludes all mutable objects.) ' + 'Clashes\n' + 'between duplicate keys are not detected; the last datum ' + '(textually\n' + 'rightmost in the display) stored for a given key value prevails.\n', + 'dynamic-features': '\n' + 'Interaction with dynamic features\n' + '*********************************\n' + '\n' + 'There are several cases where Python statements are ' + 'illegal when used\n' + 'in conjunction with nested scopes that contain free ' + 'variables.\n' + '\n' + 'If a variable is referenced in an enclosing scope, it ' + 'is illegal to\n' + 'delete the name. An error will be reported at compile ' + 'time.\n' + '\n' + 'If the wild card form of import --- "import *" --- is ' + 'used in a\n' + 'function and the function contains or is a nested ' + 'block with free\n' + 'variables, the compiler will raise a "SyntaxError".\n' + '\n' + 'The "eval()" and "exec()" functions do not have access ' + 'to the full\n' + 'environment for resolving names. Names may be ' + 'resolved in the local\n' + 'and global namespaces of the caller. Free variables ' + 'are not resolved\n' + 'in the nearest enclosing namespace, but in the global ' + 'namespace. [1]\n' + 'The "exec()" and "eval()" functions have optional ' + 'arguments to\n' + 'override the global and local namespace. If only one ' + 'namespace is\n' + 'specified, it is used for both.\n', + 'else': '\n' + 'The "if" statement\n' + '******************\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" expression ":" suite\n' + ' ( "elif" expression ":" suite )*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the expressions ' + 'one\n' + 'by one until one is found to be true (see section *Boolean ' + 'operations*\n' + 'for the definition of true and false); then that suite is ' + 'executed\n' + '(and no other part of the "if" statement is executed or ' + 'evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, if\n' + 'present, is executed.\n', + 'exceptions': '\n' + 'Exceptions\n' + '**********\n' + '\n' + 'Exceptions are a means of breaking out of the normal flow of ' + 'control\n' + 'of a code block in order to handle errors or other ' + 'exceptional\n' + 'conditions. An exception is *raised* at the point where the ' + 'error is\n' + 'detected; it may be *handled* by the surrounding code block ' + 'or by any\n' + 'code block that directly or indirectly invoked the code ' + 'block where\n' + 'the error occurred.\n' + '\n' + 'The Python interpreter raises an exception when it detects a ' + 'run-time\n' + 'error (such as division by zero). A Python program can ' + 'also\n' + 'explicitly raise an exception with the "raise" statement. ' + 'Exception\n' + 'handlers are specified with the "try" ... "except" ' + 'statement. The\n' + '"finally" clause of such a statement can be used to specify ' + 'cleanup\n' + 'code which does not handle the exception, but is executed ' + 'whether an\n' + 'exception occurred or not in the preceding code.\n' + '\n' + 'Python uses the "termination" model of error handling: an ' + 'exception\n' + 'handler can find out what happened and continue execution at ' + 'an outer\n' + 'level, but it cannot repair the cause of the error and retry ' + 'the\n' + 'failing operation (except by re-entering the offending piece ' + 'of code\n' + 'from the top).\n' + '\n' + 'When an exception is not handled at all, the interpreter ' + 'terminates\n' + 'execution of the program, or returns to its interactive main ' + 'loop. In\n' + 'either case, it prints a stack backtrace, except when the ' + 'exception is\n' + '"SystemExit".\n' + '\n' + 'Exceptions are identified by class instances. The "except" ' + 'clause is\n' + 'selected depending on the class of the instance: it must ' + 'reference the\n' + 'class of the instance or a base class thereof. The instance ' + 'can be\n' + 'received by the handler and can carry additional information ' + 'about the\n' + 'exceptional condition.\n' + '\n' + 'Note: Exception messages are not part of the Python API. ' + 'Their\n' + ' contents may change from one version of Python to the next ' + 'without\n' + ' warning and should not be relied on by code which will run ' + 'under\n' + ' multiple versions of the interpreter.\n' + '\n' + 'See also the description of the "try" statement in section ' + '*The try\n' + 'statement* and "raise" statement in section *The raise ' + 'statement*.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] This limitation occurs because the code that is executed ' + 'by\n' + ' these operations is not available at the time the module ' + 'is\n' + ' compiled.\n', + 'execmodel': '\n' + 'Execution model\n' + '***************\n' + '\n' + '\n' + 'Naming and binding\n' + '==================\n' + '\n' + '*Names* refer to objects. Names are introduced by name ' + 'binding\n' + 'operations. Each occurrence of a name in the program text ' + 'refers to\n' + 'the *binding* of that name established in the innermost ' + 'function block\n' + 'containing the use.\n' + '\n' + 'A *block* is a piece of Python program text that is executed ' + 'as a\n' + 'unit. The following are blocks: a module, a function body, ' + 'and a class\n' + 'definition. Each command typed interactively is a block. A ' + 'script\n' + 'file (a file given as standard input to the interpreter or ' + 'specified\n' + 'as a command line argument to the interpreter) is a code ' + 'block. A\n' + 'script command (a command specified on the interpreter ' + 'command line\n' + "with the '**-c**' option) is a code block. The string " + 'argument passed\n' + 'to the built-in functions "eval()" and "exec()" is a code ' + 'block.\n' + '\n' + 'A code block is executed in an *execution frame*. A frame ' + 'contains\n' + 'some administrative information (used for debugging) and ' + 'determines\n' + "where and how execution continues after the code block's " + 'execution has\n' + 'completed.\n' + '\n' + 'A *scope* defines the visibility of a name within a block. ' + 'If a local\n' + 'variable is defined in a block, its scope includes that ' + 'block. If the\n' + 'definition occurs in a function block, the scope extends to ' + 'any blocks\n' + 'contained within the defining one, unless a contained block ' + 'introduces\n' + 'a different binding for the name. The scope of names defined ' + 'in a\n' + 'class block is limited to the class block; it does not extend ' + 'to the\n' + 'code blocks of methods -- this includes comprehensions and ' + 'generator\n' + 'expressions since they are implemented using a function ' + 'scope. This\n' + 'means that the following will fail:\n' + '\n' + ' class A:\n' + ' a = 42\n' + ' b = list(a + i for i in range(10))\n' + '\n' + 'When a name is used in a code block, it is resolved using the ' + 'nearest\n' + 'enclosing scope. The set of all such scopes visible to a ' + 'code block\n' + "is called the block's *environment*.\n" + '\n' + 'If a name is bound in a block, it is a local variable of that ' + 'block,\n' + 'unless declared as "nonlocal". If a name is bound at the ' + 'module\n' + 'level, it is a global variable. (The variables of the module ' + 'code\n' + 'block are local and global.) If a variable is used in a code ' + 'block\n' + 'but not defined there, it is a *free variable*.\n' + '\n' + 'When a name is not found at all, a "NameError" exception is ' + 'raised.\n' + 'If the name refers to a local variable that has not been ' + 'bound, an\n' + '"UnboundLocalError" exception is raised. "UnboundLocalError" ' + 'is a\n' + 'subclass of "NameError".\n' + '\n' + 'The following constructs bind names: formal parameters to ' + 'functions,\n' + '"import" statements, class and function definitions (these ' + 'bind the\n' + 'class or function name in the defining block), and targets ' + 'that are\n' + 'identifiers if occurring in an assignment, "for" loop header, ' + 'or after\n' + '"as" in a "with" statement or "except" clause. The "import" ' + 'statement\n' + 'of the form "from ... import *" binds all names defined in ' + 'the\n' + 'imported module, except those beginning with an underscore. ' + 'This form\n' + 'may only be used at the module level.\n' + '\n' + 'A target occurring in a "del" statement is also considered ' + 'bound for\n' + 'this purpose (though the actual semantics are to unbind the ' + 'name).\n' + '\n' + 'Each assignment or import statement occurs within a block ' + 'defined by a\n' + 'class or function definition or at the module level (the ' + 'top-level\n' + 'code block).\n' + '\n' + 'If a name binding operation occurs anywhere within a code ' + 'block, all\n' + 'uses of the name within the block are treated as references ' + 'to the\n' + 'current block. This can lead to errors when a name is used ' + 'within a\n' + 'block before it is bound. This rule is subtle. Python ' + 'lacks\n' + 'declarations and allows name binding operations to occur ' + 'anywhere\n' + 'within a code block. The local variables of a code block can ' + 'be\n' + 'determined by scanning the entire text of the block for name ' + 'binding\n' + 'operations.\n' + '\n' + 'If the "global" statement occurs within a block, all uses of ' + 'the name\n' + 'specified in the statement refer to the binding of that name ' + 'in the\n' + 'top-level namespace. Names are resolved in the top-level ' + 'namespace by\n' + 'searching the global namespace, i.e. the namespace of the ' + 'module\n' + 'containing the code block, and the builtins namespace, the ' + 'namespace\n' + 'of the module "builtins". The global namespace is searched ' + 'first. If\n' + 'the name is not found there, the builtins namespace is ' + 'searched. The\n' + 'global statement must precede all uses of the name.\n' + '\n' + 'The builtins namespace associated with the execution of a ' + 'code block\n' + 'is actually found by looking up the name "__builtins__" in ' + 'its global\n' + 'namespace; this should be a dictionary or a module (in the ' + 'latter case\n' + "the module's dictionary is used). By default, when in the " + '"__main__"\n' + 'module, "__builtins__" is the built-in module "builtins"; ' + 'when in any\n' + 'other module, "__builtins__" is an alias for the dictionary ' + 'of the\n' + '"builtins" module itself. "__builtins__" can be set to a ' + 'user-created\n' + 'dictionary to create a weak form of restricted execution.\n' + '\n' + '**CPython implementation detail:** Users should not touch\n' + '"__builtins__"; it is strictly an implementation detail. ' + 'Users\n' + 'wanting to override values in the builtins namespace should ' + '"import"\n' + 'the "builtins" module and modify its attributes ' + 'appropriately.\n' + '\n' + 'The namespace for a module is automatically created the first ' + 'time a\n' + 'module is imported. The main module for a script is always ' + 'called\n' + '"__main__".\n' + '\n' + 'The "global" statement has the same scope as a name binding ' + 'operation\n' + 'in the same block. If the nearest enclosing scope for a free ' + 'variable\n' + 'contains a global statement, the free variable is treated as ' + 'a global.\n' + '\n' + 'A class definition is an executable statement that may use ' + 'and define\n' + 'names. These references follow the normal rules for name ' + 'resolution.\n' + 'The namespace of the class definition becomes the attribute ' + 'dictionary\n' + 'of the class. Names defined at the class scope are not ' + 'visible in\n' + 'methods.\n' + '\n' + '\n' + 'Interaction with dynamic features\n' + '---------------------------------\n' + '\n' + 'There are several cases where Python statements are illegal ' + 'when used\n' + 'in conjunction with nested scopes that contain free ' + 'variables.\n' + '\n' + 'If a variable is referenced in an enclosing scope, it is ' + 'illegal to\n' + 'delete the name. An error will be reported at compile time.\n' + '\n' + 'If the wild card form of import --- "import *" --- is used in ' + 'a\n' + 'function and the function contains or is a nested block with ' + 'free\n' + 'variables, the compiler will raise a "SyntaxError".\n' + '\n' + 'The "eval()" and "exec()" functions do not have access to the ' + 'full\n' + 'environment for resolving names. Names may be resolved in ' + 'the local\n' + 'and global namespaces of the caller. Free variables are not ' + 'resolved\n' + 'in the nearest enclosing namespace, but in the global ' + 'namespace. [1]\n' + 'The "exec()" and "eval()" functions have optional arguments ' + 'to\n' + 'override the global and local namespace. If only one ' + 'namespace is\n' + 'specified, it is used for both.\n' + '\n' + '\n' + 'Exceptions\n' + '==========\n' + '\n' + 'Exceptions are a means of breaking out of the normal flow of ' + 'control\n' + 'of a code block in order to handle errors or other ' + 'exceptional\n' + 'conditions. An exception is *raised* at the point where the ' + 'error is\n' + 'detected; it may be *handled* by the surrounding code block ' + 'or by any\n' + 'code block that directly or indirectly invoked the code block ' + 'where\n' + 'the error occurred.\n' + '\n' + 'The Python interpreter raises an exception when it detects a ' + 'run-time\n' + 'error (such as division by zero). A Python program can also\n' + 'explicitly raise an exception with the "raise" statement. ' + 'Exception\n' + 'handlers are specified with the "try" ... "except" ' + 'statement. The\n' + '"finally" clause of such a statement can be used to specify ' + 'cleanup\n' + 'code which does not handle the exception, but is executed ' + 'whether an\n' + 'exception occurred or not in the preceding code.\n' + '\n' + 'Python uses the "termination" model of error handling: an ' + 'exception\n' + 'handler can find out what happened and continue execution at ' + 'an outer\n' + 'level, but it cannot repair the cause of the error and retry ' + 'the\n' + 'failing operation (except by re-entering the offending piece ' + 'of code\n' + 'from the top).\n' + '\n' + 'When an exception is not handled at all, the interpreter ' + 'terminates\n' + 'execution of the program, or returns to its interactive main ' + 'loop. In\n' + 'either case, it prints a stack backtrace, except when the ' + 'exception is\n' + '"SystemExit".\n' + '\n' + 'Exceptions are identified by class instances. The "except" ' + 'clause is\n' + 'selected depending on the class of the instance: it must ' + 'reference the\n' + 'class of the instance or a base class thereof. The instance ' + 'can be\n' + 'received by the handler and can carry additional information ' + 'about the\n' + 'exceptional condition.\n' + '\n' + 'Note: Exception messages are not part of the Python API. ' + 'Their\n' + ' contents may change from one version of Python to the next ' + 'without\n' + ' warning and should not be relied on by code which will run ' + 'under\n' + ' multiple versions of the interpreter.\n' + '\n' + 'See also the description of the "try" statement in section ' + '*The try\n' + 'statement* and "raise" statement in section *The raise ' + 'statement*.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] This limitation occurs because the code that is executed ' + 'by\n' + ' these operations is not available at the time the module ' + 'is\n' + ' compiled.\n', + 'exprlists': '\n' + 'Expression lists\n' + '****************\n' + '\n' + ' expression_list ::= expression ( "," expression )* [","]\n' + '\n' + 'An expression list containing at least one comma yields a ' + 'tuple. The\n' + 'length of the tuple is the number of expressions in the ' + 'list. The\n' + 'expressions are evaluated from left to right.\n' + '\n' + 'The trailing comma is required only to create a single tuple ' + '(a.k.a. a\n' + '*singleton*); it is optional in all other cases. A single ' + 'expression\n' + "without a trailing comma doesn't create a tuple, but rather " + 'yields the\n' + 'value of that expression. (To create an empty tuple, use an ' + 'empty pair\n' + 'of parentheses: "()".)\n', + 'floating': '\n' + 'Floating point literals\n' + '***********************\n' + '\n' + 'Floating point literals are described by the following ' + 'lexical\n' + 'definitions:\n' + '\n' + ' floatnumber ::= pointfloat | exponentfloat\n' + ' pointfloat ::= [intpart] fraction | intpart "."\n' + ' exponentfloat ::= (intpart | pointfloat) exponent\n' + ' intpart ::= digit+\n' + ' fraction ::= "." digit+\n' + ' exponent ::= ("e" | "E") ["+" | "-"] digit+\n' + '\n' + 'Note that the integer and exponent parts are always ' + 'interpreted using\n' + 'radix 10. For example, "077e010" is legal, and denotes the ' + 'same number\n' + 'as "77e10". The allowed range of floating point literals is\n' + 'implementation-dependent. Some examples of floating point ' + 'literals:\n' + '\n' + ' 3.14 10. .001 1e100 3.14e-10 0e0\n' + '\n' + 'Note that numeric literals do not include a sign; a phrase ' + 'like "-1"\n' + 'is actually an expression composed of the unary operator "-" ' + 'and the\n' + 'literal "1".\n', + 'for': '\n' + 'The "for" statement\n' + '*******************\n' + '\n' + 'The "for" statement is used to iterate over the elements of a ' + 'sequence\n' + '(such as a string, tuple or list) or other iterable object:\n' + '\n' + ' for_stmt ::= "for" target_list "in" expression_list ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'The expression list is evaluated once; it should yield an iterable\n' + 'object. An iterator is created for the result of the\n' + '"expression_list". The suite is then executed once for each item\n' + 'provided by the iterator, in the order returned by the iterator. ' + 'Each\n' + 'item in turn is assigned to the target list using the standard ' + 'rules\n' + 'for assignments (see *Assignment statements*), and then the suite ' + 'is\n' + 'executed. When the items are exhausted (which is immediately when ' + 'the\n' + 'sequence is empty or an iterator raises a "StopIteration" ' + 'exception),\n' + 'the suite in the "else" clause, if present, is executed, and the ' + 'loop\n' + 'terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause\'s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and ' + 'continues\n' + 'with the next item, or with the "else" clause if there is no next\n' + 'item.\n' + '\n' + 'The for-loop makes assignments to the variables(s) in the target ' + 'list.\n' + 'This overwrites all previous assignments to those variables ' + 'including\n' + 'those made in the suite of the for-loop:\n' + '\n' + ' for i in range(10):\n' + ' print(i)\n' + ' i = 5 # this will not affect the for-loop\n' + ' # because i will be overwritten with the ' + 'next\n' + ' # index in the range\n' + '\n' + 'Names in the target list are not deleted when the loop is ' + 'finished,\n' + 'but if the sequence is empty, they will not have been assigned to ' + 'at\n' + 'all by the loop. Hint: the built-in function "range()" returns an\n' + "iterator of integers suitable to emulate the effect of Pascal's " + '"for i\n' + ':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, ' + '2]".\n' + '\n' + 'Note: There is a subtlety when the sequence is being modified by ' + 'the\n' + ' loop (this can only occur for mutable sequences, i.e. lists). ' + 'An\n' + ' internal counter is used to keep track of which item is used ' + 'next,\n' + ' and this is incremented on each iteration. When this counter ' + 'has\n' + ' reached the length of the sequence the loop terminates. This ' + 'means\n' + ' that if the suite deletes the current (or a previous) item from ' + 'the\n' + ' sequence, the next item will be skipped (since it gets the index ' + 'of\n' + ' the current item which has already been treated). Likewise, if ' + 'the\n' + ' suite inserts an item in the sequence before the current item, ' + 'the\n' + ' current item will be treated again the next time through the ' + 'loop.\n' + ' This can lead to nasty bugs that can be avoided by making a\n' + ' temporary copy using a slice of the whole sequence, e.g.,\n' + '\n' + ' for x in a[:]:\n' + ' if x < 0: a.remove(x)\n', + 'formatstrings': '\n' + 'Format String Syntax\n' + '********************\n' + '\n' + 'The "str.format()" method and the "Formatter" class share ' + 'the same\n' + 'syntax for format strings (although in the case of ' + '"Formatter",\n' + 'subclasses can define their own format string syntax).\n' + '\n' + 'Format strings contain "replacement fields" surrounded by ' + 'curly braces\n' + '"{}". Anything that is not contained in braces is ' + 'considered literal\n' + 'text, which is copied unchanged to the output. If you ' + 'need to include\n' + 'a brace character in the literal text, it can be escaped ' + 'by doubling:\n' + '"{{" and "}}".\n' + '\n' + 'The grammar for a replacement field is as follows:\n' + '\n' + ' replacement_field ::= "{" [field_name] ["!" ' + 'conversion] [":" format_spec] "}"\n' + ' field_name ::= arg_name ("." attribute_name ' + '| "[" element_index "]")*\n' + ' arg_name ::= [identifier | integer]\n' + ' attribute_name ::= identifier\n' + ' element_index ::= integer | index_string\n' + ' index_string ::= +\n' + ' conversion ::= "r" | "s" | "a"\n' + ' format_spec ::= \n' + '\n' + 'In less formal terms, the replacement field can start ' + 'with a\n' + '*field_name* that specifies the object whose value is to ' + 'be formatted\n' + 'and inserted into the output instead of the replacement ' + 'field. The\n' + '*field_name* is optionally followed by a *conversion* ' + 'field, which is\n' + 'preceded by an exclamation point "\'!\'", and a ' + '*format_spec*, which is\n' + 'preceded by a colon "\':\'". These specify a non-default ' + 'format for the\n' + 'replacement value.\n' + '\n' + 'See also the *Format Specification Mini-Language* ' + 'section.\n' + '\n' + 'The *field_name* itself begins with an *arg_name* that is ' + 'either a\n' + "number or a keyword. If it's a number, it refers to a " + 'positional\n' + "argument, and if it's a keyword, it refers to a named " + 'keyword\n' + 'argument. If the numerical arg_names in a format string ' + 'are 0, 1, 2,\n' + '... in sequence, they can all be omitted (not just some) ' + 'and the\n' + 'numbers 0, 1, 2, ... will be automatically inserted in ' + 'that order.\n' + 'Because *arg_name* is not quote-delimited, it is not ' + 'possible to\n' + 'specify arbitrary dictionary keys (e.g., the strings ' + '"\'10\'" or\n' + '"\':-]\'") within a format string. The *arg_name* can be ' + 'followed by any\n' + 'number of index or attribute expressions. An expression ' + 'of the form\n' + '"\'.name\'" selects the named attribute using ' + '"getattr()", while an\n' + 'expression of the form "\'[index]\'" does an index lookup ' + 'using\n' + '"__getitem__()".\n' + '\n' + 'Changed in version 3.1: The positional argument ' + 'specifiers can be\n' + 'omitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n' + '\n' + 'Some simple format string examples:\n' + '\n' + ' "First, thou shalt count to {0}" # References first ' + 'positional argument\n' + ' "Bring me a {}" # Implicitly ' + 'references the first positional argument\n' + ' "From {} to {}" # Same as "From {0} ' + 'to {1}"\n' + ' "My quest is {name}" # References keyword ' + "argument 'name'\n" + ' "Weight in tons {0.weight}" # \'weight\' ' + 'attribute of first positional arg\n' + ' "Units destroyed: {players[0]}" # First element of ' + "keyword argument 'players'.\n" + '\n' + 'The *conversion* field causes a type coercion before ' + 'formatting.\n' + 'Normally, the job of formatting a value is done by the ' + '"__format__()"\n' + 'method of the value itself. However, in some cases it is ' + 'desirable to\n' + 'force a type to be formatted as a string, overriding its ' + 'own\n' + 'definition of formatting. By converting the value to a ' + 'string before\n' + 'calling "__format__()", the normal formatting logic is ' + 'bypassed.\n' + '\n' + 'Three conversion flags are currently supported: "\'!s\'" ' + 'which calls\n' + '"str()" on the value, "\'!r\'" which calls "repr()" and ' + '"\'!a\'" which\n' + 'calls "ascii()".\n' + '\n' + 'Some examples:\n' + '\n' + ' "Harold\'s a clever {0!s}" # Calls str() on the ' + 'argument first\n' + ' "Bring out the holy {name!r}" # Calls repr() on the ' + 'argument first\n' + ' "More {!a}" # Calls ascii() on ' + 'the argument first\n' + '\n' + 'The *format_spec* field contains a specification of how ' + 'the value\n' + 'should be presented, including such details as field ' + 'width, alignment,\n' + 'padding, decimal precision and so on. Each value type ' + 'can define its\n' + 'own "formatting mini-language" or interpretation of the ' + '*format_spec*.\n' + '\n' + 'Most built-in types support a common formatting ' + 'mini-language, which\n' + 'is described in the next section.\n' + '\n' + 'A *format_spec* field can also include nested replacement ' + 'fields\n' + 'within it. These nested replacement fields can contain ' + 'only a field\n' + 'name; conversion flags and format specifications are not ' + 'allowed. The\n' + 'replacement fields within the format_spec are substituted ' + 'before the\n' + '*format_spec* string is interpreted. This allows the ' + 'formatting of a\n' + 'value to be dynamically specified.\n' + '\n' + 'See the *Format examples* section for some examples.\n' + '\n' + '\n' + 'Format Specification Mini-Language\n' + '==================================\n' + '\n' + '"Format specifications" are used within replacement ' + 'fields contained\n' + 'within a format string to define how individual values ' + 'are presented\n' + '(see *Format String Syntax*). They can also be passed ' + 'directly to the\n' + 'built-in "format()" function. Each formattable type may ' + 'define how\n' + 'the format specification is to be interpreted.\n' + '\n' + 'Most built-in types implement the following options for ' + 'format\n' + 'specifications, although some of the formatting options ' + 'are only\n' + 'supported by the numeric types.\n' + '\n' + 'A general convention is that an empty format string ' + '("""") produces\n' + 'the same result as if you had called "str()" on the ' + 'value. A non-empty\n' + 'format string typically modifies the result.\n' + '\n' + 'The general form of a *standard format specifier* is:\n' + '\n' + ' format_spec ::= ' + '[[fill]align][sign][#][0][width][,][.precision][type]\n' + ' fill ::= \n' + ' align ::= "<" | ">" | "=" | "^"\n' + ' sign ::= "+" | "-" | " "\n' + ' width ::= integer\n' + ' precision ::= integer\n' + ' type ::= "b" | "c" | "d" | "e" | "E" | "f" | ' + '"F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n' + '\n' + 'If a valid *align* value is specified, it can be preceded ' + 'by a *fill*\n' + 'character that can be any character and defaults to a ' + 'space if\n' + 'omitted. Note that it is not possible to use "{" and "}" ' + 'as *fill*\n' + 'char while using the "str.format()" method; this ' + 'limitation however\n' + 'doesn\'t affect the "format()" function.\n' + '\n' + 'The meaning of the various alignment options is as ' + 'follows:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Option | ' + 'Meaning ' + '|\n' + ' ' + '+===========+============================================================+\n' + ' | "\'<\'" | Forces the field to be left-aligned ' + 'within the available |\n' + ' | | space (this is the default for most ' + 'objects). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'>\'" | Forces the field to be right-aligned ' + 'within the available |\n' + ' | | space (this is the default for ' + 'numbers). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'=\'" | Forces the padding to be placed after ' + 'the sign (if any) |\n' + ' | | but before the digits. This is used for ' + 'printing fields |\n' + " | | in the form '+000000120'. This alignment " + 'option is only |\n' + ' | | valid for numeric ' + 'types. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'^\'" | Forces the field to be centered within ' + 'the available |\n' + ' | | ' + 'space. ' + '|\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'Note that unless a minimum field width is defined, the ' + 'field width\n' + 'will always be the same size as the data to fill it, so ' + 'that the\n' + 'alignment option has no meaning in this case.\n' + '\n' + 'The *sign* option is only valid for number types, and can ' + 'be one of\n' + 'the following:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Option | ' + 'Meaning ' + '|\n' + ' ' + '+===========+============================================================+\n' + ' | "\'+\'" | indicates that a sign should be used ' + 'for both positive as |\n' + ' | | well as negative ' + 'numbers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'-\'" | indicates that a sign should be used ' + 'only for negative |\n' + ' | | numbers (this is the default ' + 'behavior). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | space | indicates that a leading space should be ' + 'used on positive |\n' + ' | | numbers, and a minus sign on negative ' + 'numbers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'The "\'#\'" option causes the "alternate form" to be used ' + 'for the\n' + 'conversion. The alternate form is defined differently ' + 'for different\n' + 'types. This option is only valid for integer, float, ' + 'complex and\n' + 'Decimal types. For integers, when binary, octal, or ' + 'hexadecimal output\n' + 'is used, this option adds the prefix respective "\'0b\'", ' + '"\'0o\'", or\n' + '"\'0x\'" to the output value. For floats, complex and ' + 'Decimal the\n' + 'alternate form causes the result of the conversion to ' + 'always contain a\n' + 'decimal-point character, even if no digits follow it. ' + 'Normally, a\n' + 'decimal-point character appears in the result of these ' + 'conversions\n' + 'only if a digit follows it. In addition, for "\'g\'" and ' + '"\'G\'"\n' + 'conversions, trailing zeros are not removed from the ' + 'result.\n' + '\n' + 'The "\',\'" option signals the use of a comma for a ' + 'thousands separator.\n' + 'For a locale aware separator, use the "\'n\'" integer ' + 'presentation type\n' + 'instead.\n' + '\n' + 'Changed in version 3.1: Added the "\',\'" option (see ' + 'also **PEP 378**).\n' + '\n' + '*width* is a decimal integer defining the minimum field ' + 'width. If not\n' + 'specified, then the field width will be determined by the ' + 'content.\n' + '\n' + 'Preceding the *width* field by a zero ("\'0\'") character ' + 'enables sign-\n' + 'aware zero-padding for numeric types. This is equivalent ' + 'to a *fill*\n' + 'character of "\'0\'" with an *alignment* type of ' + '"\'=\'".\n' + '\n' + 'The *precision* is a decimal number indicating how many ' + 'digits should\n' + 'be displayed after the decimal point for a floating point ' + 'value\n' + 'formatted with "\'f\'" and "\'F\'", or before and after ' + 'the decimal point\n' + 'for a floating point value formatted with "\'g\'" or ' + '"\'G\'". For non-\n' + 'number types the field indicates the maximum field size - ' + 'in other\n' + 'words, how many characters will be used from the field ' + 'content. The\n' + '*precision* is not allowed for integer values.\n' + '\n' + 'Finally, the *type* determines how the data should be ' + 'presented.\n' + '\n' + 'The available string presentation types are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '+===========+============================================================+\n' + ' | "\'s\'" | String format. This is the default ' + 'type for strings and |\n' + ' | | may be ' + 'omitted. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | The same as ' + '"\'s\'". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'The available integer presentation types are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '+===========+============================================================+\n' + ' | "\'b\'" | Binary format. Outputs the number in ' + 'base 2. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'c\'" | Character. Converts the integer to the ' + 'corresponding |\n' + ' | | unicode character before ' + 'printing. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'d\'" | Decimal Integer. Outputs the number in ' + 'base 10. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'o\'" | Octal format. Outputs the number in ' + 'base 8. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'x\'" | Hex format. Outputs the number in base ' + '16, using lower- |\n' + ' | | case letters for the digits above ' + '9. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'X\'" | Hex format. Outputs the number in base ' + '16, using upper- |\n' + ' | | case letters for the digits above ' + '9. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'n\'" | Number. This is the same as "\'d\'", ' + 'except that it uses the |\n' + ' | | current locale setting to insert the ' + 'appropriate number |\n' + ' | | separator ' + 'characters. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | The same as ' + '"\'d\'". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'In addition to the above presentation types, integers can ' + 'be formatted\n' + 'with the floating point presentation types listed below ' + '(except "\'n\'"\n' + 'and None). When doing so, "float()" is used to convert ' + 'the integer to\n' + 'a floating point number before formatting.\n' + '\n' + 'The available presentation types for floating point and ' + 'decimal values\n' + 'are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '+===========+============================================================+\n' + ' | "\'e\'" | Exponent notation. Prints the number ' + 'in scientific |\n' + " | | notation using the letter 'e' to " + 'indicate the exponent. |\n' + ' | | The default precision is ' + '"6". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'E\'" | Exponent notation. Same as "\'e\'" ' + 'except it uses an upper |\n' + " | | case 'E' as the separator " + 'character. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'f\'" | Fixed point. Displays the number as a ' + 'fixed-point number. |\n' + ' | | The default precision is ' + '"6". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'F\'" | Fixed point. Same as "\'f\'", but ' + 'converts "nan" to "NAN" |\n' + ' | | and "inf" to ' + '"INF". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'g\'" | General format. For a given precision ' + '"p >= 1", this |\n' + ' | | rounds the number to "p" significant ' + 'digits and then |\n' + ' | | formats the result in either fixed-point ' + 'format or in |\n' + ' | | scientific notation, depending on its ' + 'magnitude. The |\n' + ' | | precise rules are as follows: suppose ' + 'that the result |\n' + ' | | formatted with presentation type "\'e\'" ' + 'and precision "p-1" |\n' + ' | | would have exponent "exp". Then if "-4 ' + '<= exp < p", the |\n' + ' | | number is formatted with presentation ' + 'type "\'f\'" and |\n' + ' | | precision "p-1-exp". Otherwise, the ' + 'number is formatted |\n' + ' | | with presentation type "\'e\'" and ' + 'precision "p-1". In both |\n' + ' | | cases insignificant trailing zeros are ' + 'removed from the |\n' + ' | | significand, and the decimal point is ' + 'also removed if |\n' + ' | | there are no remaining digits following ' + 'it. Positive and |\n' + ' | | negative infinity, positive and negative ' + 'zero, and nans, |\n' + ' | | are formatted as "inf", "-inf", "0", ' + '"-0" and "nan" |\n' + ' | | respectively, regardless of the ' + 'precision. A precision of |\n' + ' | | "0" is treated as equivalent to a ' + 'precision of "1". The |\n' + ' | | default precision is ' + '"6". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'G\'" | General format. Same as "\'g\'" except ' + 'switches to "\'E\'" if |\n' + ' | | the number gets too large. The ' + 'representations of infinity |\n' + ' | | and NaN are uppercased, ' + 'too. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'n\'" | Number. This is the same as "\'g\'", ' + 'except that it uses the |\n' + ' | | current locale setting to insert the ' + 'appropriate number |\n' + ' | | separator ' + 'characters. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'%\'" | Percentage. Multiplies the number by ' + '100 and displays in |\n' + ' | | fixed ("\'f\'") format, followed by a ' + 'percent sign. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | Similar to "\'g\'", except with at least ' + 'one digit past the |\n' + ' | | decimal point and a default precision of ' + '12. This is |\n' + ' | | intended to match "str()", except you ' + 'can add the other |\n' + ' | | format ' + 'modifiers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + '\n' + 'Format examples\n' + '===============\n' + '\n' + 'This section contains examples of the new format syntax ' + 'and comparison\n' + 'with the old "%"-formatting.\n' + '\n' + 'In most of the cases the syntax is similar to the old ' + '"%"-formatting,\n' + 'with the addition of the "{}" and with ":" used instead ' + 'of "%". For\n' + 'example, "\'%03.2f\'" can be translated to ' + '"\'{:03.2f}\'".\n' + '\n' + 'The new format syntax also supports new and different ' + 'options, shown\n' + 'in the follow examples.\n' + '\n' + 'Accessing arguments by position:\n' + '\n' + " >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n" + " 'a, b, c'\n" + " >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only\n" + " 'a, b, c'\n" + " >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n" + " 'c, b, a'\n" + " >>> '{2}, {1}, {0}'.format(*'abc') # unpacking " + 'argument sequence\n' + " 'c, b, a'\n" + " >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' " + 'indices can be repeated\n' + " 'abracadabra'\n" + '\n' + 'Accessing arguments by name:\n' + '\n' + " >>> 'Coordinates: {latitude}, " + "{longitude}'.format(latitude='37.24N', " + "longitude='-115.81W')\n" + " 'Coordinates: 37.24N, -115.81W'\n" + " >>> coord = {'latitude': '37.24N', 'longitude': " + "'-115.81W'}\n" + " >>> 'Coordinates: {latitude}, " + "{longitude}'.format(**coord)\n" + " 'Coordinates: 37.24N, -115.81W'\n" + '\n' + "Accessing arguments' attributes:\n" + '\n' + ' >>> c = 3-5j\n' + " >>> ('The complex number {0} is formed from the real " + "part {0.real} '\n" + " ... 'and the imaginary part {0.imag}.').format(c)\n" + " 'The complex number (3-5j) is formed from the real " + "part 3.0 and the imaginary part -5.0.'\n" + ' >>> class Point:\n' + ' ... def __init__(self, x, y):\n' + ' ... self.x, self.y = x, y\n' + ' ... def __str__(self):\n' + " ... return 'Point({self.x}, " + "{self.y})'.format(self=self)\n" + ' ...\n' + ' >>> str(Point(4, 2))\n' + " 'Point(4, 2)'\n" + '\n' + "Accessing arguments' items:\n" + '\n' + ' >>> coord = (3, 5)\n' + " >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n" + " 'X: 3; Y: 5'\n" + '\n' + 'Replacing "%s" and "%r":\n' + '\n' + ' >>> "repr() shows quotes: {!r}; str() doesn\'t: ' + '{!s}".format(\'test1\', \'test2\')\n' + ' "repr() shows quotes: \'test1\'; str() doesn\'t: ' + 'test2"\n' + '\n' + 'Aligning the text and specifying a width:\n' + '\n' + " >>> '{:<30}'.format('left aligned')\n" + " 'left aligned '\n" + " >>> '{:>30}'.format('right aligned')\n" + " ' right aligned'\n" + " >>> '{:^30}'.format('centered')\n" + " ' centered '\n" + " >>> '{:*^30}'.format('centered') # use '*' as a fill " + 'char\n' + " '***********centered***********'\n" + '\n' + 'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n' + '\n' + " >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it " + 'always\n' + " '+3.140000; -3.140000'\n" + " >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space " + 'for positive numbers\n' + " ' 3.140000; -3.140000'\n" + " >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only " + "the minus -- same as '{:f}; {:f}'\n" + " '3.140000; -3.140000'\n" + '\n' + 'Replacing "%x" and "%o" and converting the value to ' + 'different bases:\n' + '\n' + ' >>> # format also supports binary numbers\n' + ' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: ' + '{0:b}".format(42)\n' + " 'int: 42; hex: 2a; oct: 52; bin: 101010'\n" + ' >>> # with 0x, 0o, or 0b as prefix:\n' + ' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: ' + '{0:#b}".format(42)\n' + " 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n" + '\n' + 'Using the comma as a thousands separator:\n' + '\n' + " >>> '{:,}'.format(1234567890)\n" + " '1,234,567,890'\n" + '\n' + 'Expressing a percentage:\n' + '\n' + ' >>> points = 19\n' + ' >>> total = 22\n' + " >>> 'Correct answers: {:.2%}'.format(points/total)\n" + " 'Correct answers: 86.36%'\n" + '\n' + 'Using type-specific formatting:\n' + '\n' + ' >>> import datetime\n' + ' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n' + " >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n" + " '2010-07-04 12:15:58'\n" + '\n' + 'Nesting arguments and more complex examples:\n' + '\n' + " >>> for align, text in zip('<^>', ['left', 'center', " + "'right']):\n" + " ... '{0:{fill}{align}16}'.format(text, fill=align, " + 'align=align)\n' + ' ...\n' + " 'left<<<<<<<<<<<<'\n" + " '^^^^^center^^^^^'\n" + " '>>>>>>>>>>>right'\n" + ' >>>\n' + ' >>> octets = [192, 168, 0, 1]\n' + " >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n" + " 'C0A80001'\n" + ' >>> int(_, 16)\n' + ' 3232235521\n' + ' >>>\n' + ' >>> width = 5\n' + ' >>> for num in range(5,12): #doctest: ' + '+NORMALIZE_WHITESPACE\n' + " ... for base in 'dXob':\n" + " ... print('{0:{width}{base}}'.format(num, " + "base=base, width=width), end=' ')\n" + ' ... print()\n' + ' ...\n' + ' 5 5 5 101\n' + ' 6 6 6 110\n' + ' 7 7 7 111\n' + ' 8 8 10 1000\n' + ' 9 9 11 1001\n' + ' 10 A 12 1010\n' + ' 11 B 13 1011\n', + 'function': '\n' + 'Function definitions\n' + '********************\n' + '\n' + 'A function definition defines a user-defined function object ' + '(see\n' + 'section *The standard type hierarchy*):\n' + '\n' + ' funcdef ::= [decorators] "def" funcname "(" ' + '[parameter_list] ")" ["->" expression] ":" suite\n' + ' decorators ::= decorator+\n' + ' decorator ::= "@" dotted_name ["(" [parameter_list ' + '[","]] ")"] NEWLINE\n' + ' dotted_name ::= identifier ("." identifier)*\n' + ' parameter_list ::= (defparameter ",")*\n' + ' | "*" [parameter] ("," defparameter)* ' + '["," "**" parameter]\n' + ' | "**" parameter\n' + ' | defparameter [","] )\n' + ' parameter ::= identifier [":" expression]\n' + ' defparameter ::= parameter ["=" expression]\n' + ' funcname ::= identifier\n' + '\n' + 'A function definition is an executable statement. Its ' + 'execution binds\n' + 'the function name in the current local namespace to a function ' + 'object\n' + '(a wrapper around the executable code for the function). ' + 'This\n' + 'function object contains a reference to the current global ' + 'namespace\n' + 'as the global namespace to be used when the function is ' + 'called.\n' + '\n' + 'The function definition does not execute the function body; ' + 'this gets\n' + 'executed only when the function is called. [3]\n' + '\n' + 'A function definition may be wrapped by one or more ' + '*decorator*\n' + 'expressions. Decorator expressions are evaluated when the ' + 'function is\n' + 'defined, in the scope that contains the function definition. ' + 'The\n' + 'result must be a callable, which is invoked with the function ' + 'object\n' + 'as the only argument. The returned value is bound to the ' + 'function name\n' + 'instead of the function object. Multiple decorators are ' + 'applied in\n' + 'nested fashion. For example, the following code\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' def func(): pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' def func(): pass\n' + ' func = f1(arg)(f2(func))\n' + '\n' + 'When one or more *parameters* have the form *parameter* "="\n' + '*expression*, the function is said to have "default parameter ' + 'values."\n' + 'For a parameter with a default value, the corresponding ' + '*argument* may\n' + "be omitted from a call, in which case the parameter's default " + 'value is\n' + 'substituted. If a parameter has a default value, all ' + 'following\n' + 'parameters up until the ""*"" must also have a default value ' + '--- this\n' + 'is a syntactic restriction that is not expressed by the ' + 'grammar.\n' + '\n' + '**Default parameter values are evaluated from left to right ' + 'when the\n' + 'function definition is executed.** This means that the ' + 'expression is\n' + 'evaluated once, when the function is defined, and that the ' + 'same "pre-\n' + 'computed" value is used for each call. This is especially ' + 'important\n' + 'to understand when a default parameter is a mutable object, ' + 'such as a\n' + 'list or a dictionary: if the function modifies the object ' + '(e.g. by\n' + 'appending an item to a list), the default value is in effect ' + 'modified.\n' + 'This is generally not what was intended. A way around this is ' + 'to use\n' + '"None" as the default, and explicitly test for it in the body ' + 'of the\n' + 'function, e.g.:\n' + '\n' + ' def whats_on_the_telly(penguin=None):\n' + ' if penguin is None:\n' + ' penguin = []\n' + ' penguin.append("property of the zoo")\n' + ' return penguin\n' + '\n' + 'Function call semantics are described in more detail in ' + 'section\n' + '*Calls*. A function call always assigns values to all ' + 'parameters\n' + 'mentioned in the parameter list, either from position ' + 'arguments, from\n' + 'keyword arguments, or from default values. If the form\n' + '""*identifier"" is present, it is initialized to a tuple ' + 'receiving any\n' + 'excess positional parameters, defaulting to the empty tuple. ' + 'If the\n' + 'form ""**identifier"" is present, it is initialized to a new\n' + 'dictionary receiving any excess keyword arguments, defaulting ' + 'to a new\n' + 'empty dictionary. Parameters after ""*"" or ""*identifier"" ' + 'are\n' + 'keyword-only parameters and may only be passed used keyword ' + 'arguments.\n' + '\n' + 'Parameters may have annotations of the form "": expression"" ' + 'following\n' + 'the parameter name. Any parameter may have an annotation even ' + 'those\n' + 'of the form "*identifier" or "**identifier". Functions may ' + 'have\n' + '"return" annotation of the form ""-> expression"" after the ' + 'parameter\n' + 'list. These annotations can be any valid Python expression ' + 'and are\n' + 'evaluated when the function definition is executed. ' + 'Annotations may\n' + 'be evaluated in a different order than they appear in the ' + 'source code.\n' + 'The presence of annotations does not change the semantics of ' + 'a\n' + 'function. The annotation values are available as values of a\n' + "dictionary keyed by the parameters' names in the " + '"__annotations__"\n' + 'attribute of the function object.\n' + '\n' + 'It is also possible to create anonymous functions (functions ' + 'not bound\n' + 'to a name), for immediate use in expressions. This uses ' + 'lambda\n' + 'expressions, described in section *Lambdas*. Note that the ' + 'lambda\n' + 'expression is merely a shorthand for a simplified function ' + 'definition;\n' + 'a function defined in a ""def"" statement can be passed around ' + 'or\n' + 'assigned to another name just like a function defined by a ' + 'lambda\n' + 'expression. The ""def"" form is actually more powerful since ' + 'it\n' + 'allows the execution of multiple statements and annotations.\n' + '\n' + "**Programmer's note:** Functions are first-class objects. A " + '""def""\n' + 'statement executed inside a function definition defines a ' + 'local\n' + 'function that can be returned or passed around. Free ' + 'variables used\n' + 'in the nested function can access the local variables of the ' + 'function\n' + 'containing the def. See section *Naming and binding* for ' + 'details.\n' + '\n' + 'See also: **PEP 3107** - Function Annotations\n' + '\n' + ' The original specification for function annotations.\n', + 'global': '\n' + 'The "global" statement\n' + '**********************\n' + '\n' + ' global_stmt ::= "global" identifier ("," identifier)*\n' + '\n' + 'The "global" statement is a declaration which holds for the ' + 'entire\n' + 'current code block. It means that the listed identifiers are to ' + 'be\n' + 'interpreted as globals. It would be impossible to assign to a ' + 'global\n' + 'variable without "global", although free variables may refer to\n' + 'globals without being declared global.\n' + '\n' + 'Names listed in a "global" statement must not be used in the ' + 'same code\n' + 'block textually preceding that "global" statement.\n' + '\n' + 'Names listed in a "global" statement must not be defined as ' + 'formal\n' + 'parameters or in a "for" loop control target, "class" ' + 'definition,\n' + 'function definition, or "import" statement.\n' + '\n' + '**CPython implementation detail:** The current implementation ' + 'does not\n' + 'enforce the two restrictions, but programs should not abuse ' + 'this\n' + 'freedom, as future implementations may enforce them or silently ' + 'change\n' + 'the meaning of the program.\n' + '\n' + '**Programmer\'s note:** the "global" is a directive to the ' + 'parser. It\n' + 'applies only to code parsed at the same time as the "global"\n' + 'statement. In particular, a "global" statement contained in a ' + 'string\n' + 'or code object supplied to the built-in "exec()" function does ' + 'not\n' + 'affect the code block *containing* the function call, and code\n' + 'contained in such a string is unaffected by "global" statements ' + 'in the\n' + 'code containing the function call. The same applies to the ' + '"eval()"\n' + 'and "compile()" functions.\n', + 'id-classes': '\n' + 'Reserved classes of identifiers\n' + '*******************************\n' + '\n' + 'Certain classes of identifiers (besides keywords) have ' + 'special\n' + 'meanings. These classes are identified by the patterns of ' + 'leading and\n' + 'trailing underscore characters:\n' + '\n' + '"_*"\n' + ' Not imported by "from module import *". The special ' + 'identifier "_"\n' + ' is used in the interactive interpreter to store the ' + 'result of the\n' + ' last evaluation; it is stored in the "builtins" module. ' + 'When not\n' + ' in interactive mode, "_" has no special meaning and is ' + 'not defined.\n' + ' See section *The import statement*.\n' + '\n' + ' Note: The name "_" is often used in conjunction with\n' + ' internationalization; refer to the documentation for ' + 'the\n' + ' "gettext" module for more information on this ' + 'convention.\n' + '\n' + '"__*__"\n' + ' System-defined names. These names are defined by the ' + 'interpreter\n' + ' and its implementation (including the standard library). ' + 'Current\n' + ' system names are discussed in the *Special method names* ' + 'section\n' + ' and elsewhere. More will likely be defined in future ' + 'versions of\n' + ' Python. *Any* use of "__*__" names, in any context, that ' + 'does not\n' + ' follow explicitly documented use, is subject to breakage ' + 'without\n' + ' warning.\n' + '\n' + '"__*"\n' + ' Class-private names. Names in this category, when used ' + 'within the\n' + ' context of a class definition, are re-written to use a ' + 'mangled form\n' + ' to help avoid name clashes between "private" attributes ' + 'of base and\n' + ' derived classes. See section *Identifiers (Names)*.\n', + 'identifiers': '\n' + 'Identifiers and keywords\n' + '************************\n' + '\n' + 'Identifiers (also referred to as *names*) are described by ' + 'the\n' + 'following lexical definitions.\n' + '\n' + 'The syntax of identifiers in Python is based on the Unicode ' + 'standard\n' + 'annex UAX-31, with elaboration and changes as defined ' + 'below; see also\n' + '**PEP 3131** for further details.\n' + '\n' + 'Within the ASCII range (U+0001..U+007F), the valid ' + 'characters for\n' + 'identifiers are the same as in Python 2.x: the uppercase ' + 'and lowercase\n' + 'letters "A" through "Z", the underscore "_" and, except for ' + 'the first\n' + 'character, the digits "0" through "9".\n' + '\n' + 'Python 3.0 introduces additional characters from outside ' + 'the ASCII\n' + 'range (see **PEP 3131**). For these characters, the ' + 'classification\n' + 'uses the version of the Unicode Character Database as ' + 'included in the\n' + '"unicodedata" module.\n' + '\n' + 'Identifiers are unlimited in length. Case is significant.\n' + '\n' + ' identifier ::= xid_start xid_continue*\n' + ' id_start ::= \n' + ' id_continue ::= \n' + ' xid_start ::= \n' + ' xid_continue ::= \n' + '\n' + 'The Unicode category codes mentioned above stand for:\n' + '\n' + '* *Lu* - uppercase letters\n' + '\n' + '* *Ll* - lowercase letters\n' + '\n' + '* *Lt* - titlecase letters\n' + '\n' + '* *Lm* - modifier letters\n' + '\n' + '* *Lo* - other letters\n' + '\n' + '* *Nl* - letter numbers\n' + '\n' + '* *Mn* - nonspacing marks\n' + '\n' + '* *Mc* - spacing combining marks\n' + '\n' + '* *Nd* - decimal numbers\n' + '\n' + '* *Pc* - connector punctuations\n' + '\n' + '* *Other_ID_Start* - explicit list of characters in ' + 'PropList.txt to\n' + ' support backwards compatibility\n' + '\n' + '* *Other_ID_Continue* - likewise\n' + '\n' + 'All identifiers are converted into the normal form NFKC ' + 'while parsing;\n' + 'comparison of identifiers is based on NFKC.\n' + '\n' + 'A non-normative HTML file listing all valid identifier ' + 'characters for\n' + 'Unicode 4.1 can be found at http://www.dcl.hpi.uni-\n' + 'potsdam.de/home/loewis/table-3131.html.\n' + '\n' + '\n' + 'Keywords\n' + '========\n' + '\n' + 'The following identifiers are used as reserved words, or ' + '*keywords* of\n' + 'the language, and cannot be used as ordinary identifiers. ' + 'They must\n' + 'be spelled exactly as written here:\n' + '\n' + ' False class finally is return\n' + ' None continue for lambda try\n' + ' True def from nonlocal while\n' + ' and del global not with\n' + ' as elif if or yield\n' + ' assert else import pass\n' + ' break except in raise\n' + '\n' + '\n' + 'Reserved classes of identifiers\n' + '===============================\n' + '\n' + 'Certain classes of identifiers (besides keywords) have ' + 'special\n' + 'meanings. These classes are identified by the patterns of ' + 'leading and\n' + 'trailing underscore characters:\n' + '\n' + '"_*"\n' + ' Not imported by "from module import *". The special ' + 'identifier "_"\n' + ' is used in the interactive interpreter to store the ' + 'result of the\n' + ' last evaluation; it is stored in the "builtins" module. ' + 'When not\n' + ' in interactive mode, "_" has no special meaning and is ' + 'not defined.\n' + ' See section *The import statement*.\n' + '\n' + ' Note: The name "_" is often used in conjunction with\n' + ' internationalization; refer to the documentation for ' + 'the\n' + ' "gettext" module for more information on this ' + 'convention.\n' + '\n' + '"__*__"\n' + ' System-defined names. These names are defined by the ' + 'interpreter\n' + ' and its implementation (including the standard ' + 'library). Current\n' + ' system names are discussed in the *Special method names* ' + 'section\n' + ' and elsewhere. More will likely be defined in future ' + 'versions of\n' + ' Python. *Any* use of "__*__" names, in any context, ' + 'that does not\n' + ' follow explicitly documented use, is subject to breakage ' + 'without\n' + ' warning.\n' + '\n' + '"__*"\n' + ' Class-private names. Names in this category, when used ' + 'within the\n' + ' context of a class definition, are re-written to use a ' + 'mangled form\n' + ' to help avoid name clashes between "private" attributes ' + 'of base and\n' + ' derived classes. See section *Identifiers (Names)*.\n', + 'if': '\n' + 'The "if" statement\n' + '******************\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" expression ":" suite\n' + ' ( "elif" expression ":" suite )*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the expressions ' + 'one\n' + 'by one until one is found to be true (see section *Boolean ' + 'operations*\n' + 'for the definition of true and false); then that suite is executed\n' + '(and no other part of the "if" statement is executed or evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, if\n' + 'present, is executed.\n', + 'imaginary': '\n' + 'Imaginary literals\n' + '******************\n' + '\n' + 'Imaginary literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' imagnumber ::= (floatnumber | intpart) ("j" | "J")\n' + '\n' + 'An imaginary literal yields a complex number with a real part ' + 'of 0.0.\n' + 'Complex numbers are represented as a pair of floating point ' + 'numbers\n' + 'and have the same restrictions on their range. To create a ' + 'complex\n' + 'number with a nonzero real part, add a floating point number ' + 'to it,\n' + 'e.g., "(3+4j)". Some examples of imaginary literals:\n' + '\n' + ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', + 'import': '\n' + 'The "import" statement\n' + '**********************\n' + '\n' + ' import_stmt ::= "import" module ["as" name] ( "," module ' + '["as" name] )*\n' + ' | "from" relative_module "import" identifier ' + '["as" name]\n' + ' ( "," identifier ["as" name] )*\n' + ' | "from" relative_module "import" "(" ' + 'identifier ["as" name]\n' + ' ( "," identifier ["as" name] )* [","] ")"\n' + ' | "from" module "import" "*"\n' + ' module ::= (identifier ".")* identifier\n' + ' relative_module ::= "."* module | "."+\n' + ' name ::= identifier\n' + '\n' + 'The basic import statement (no "from" clause) is executed in ' + 'two\n' + 'steps:\n' + '\n' + '1. find a module, loading and initializing it if necessary\n' + '\n' + '2. define a name or names in the local namespace for the scope\n' + ' where the "import" statement occurs.\n' + '\n' + 'When the statement contains multiple clauses (separated by ' + 'commas) the\n' + 'two steps are carried out separately for each clause, just as ' + 'though\n' + 'the clauses had been separated out into individiual import ' + 'statements.\n' + '\n' + 'The details of the first step, finding and loading modules are\n' + 'described in greater detail in the section on the *import ' + 'system*,\n' + 'which also describes the various types of packages and modules ' + 'that\n' + 'can be imported, as well as all the hooks that can be used to\n' + 'customize the import system. Note that failures in this step ' + 'may\n' + 'indicate either that the module could not be located, *or* that ' + 'an\n' + 'error occurred while initializing the module, which includes ' + 'execution\n' + "of the module's code.\n" + '\n' + 'If the requested module is retrieved successfully, it will be ' + 'made\n' + 'available in the local namespace in one of three ways:\n' + '\n' + '* If the module name is followed by "as", then the name ' + 'following\n' + ' "as" is bound directly to the imported module.\n' + '\n' + '* If no other name is specified, and the module being imported ' + 'is a\n' + " top level module, the module's name is bound in the local " + 'namespace\n' + ' as a reference to the imported module\n' + '\n' + '* If the module being imported is *not* a top level module, then ' + 'the\n' + ' name of the top level package that contains the module is ' + 'bound in\n' + ' the local namespace as a reference to the top level package. ' + 'The\n' + ' imported module must be accessed using its full qualified ' + 'name\n' + ' rather than directly\n' + '\n' + 'The "from" form uses a slightly more complex process:\n' + '\n' + '1. find the module specified in the "from" clause, loading and\n' + ' initializing it if necessary;\n' + '\n' + '2. for each of the identifiers specified in the "import" ' + 'clauses:\n' + '\n' + ' 1. check if the imported module has an attribute by that ' + 'name\n' + '\n' + ' 2. if not, attempt to import a submodule with that name and ' + 'then\n' + ' check the imported module again for that attribute\n' + '\n' + ' 3. if the attribute is not found, "ImportError" is raised.\n' + '\n' + ' 4. otherwise, a reference to that value is stored in the ' + 'local\n' + ' namespace, using the name in the "as" clause if it is ' + 'present,\n' + ' otherwise using the attribute name\n' + '\n' + 'Examples:\n' + '\n' + ' import foo # foo imported and bound locally\n' + ' import foo.bar.baz # foo.bar.baz imported, foo bound ' + 'locally\n' + ' import foo.bar.baz as fbb # foo.bar.baz imported and bound ' + 'as fbb\n' + ' from foo.bar import baz # foo.bar.baz imported and bound ' + 'as baz\n' + ' from foo import attr # foo imported and foo.attr bound ' + 'as attr\n' + '\n' + 'If the list of identifiers is replaced by a star ("\'*\'"), all ' + 'public\n' + 'names defined in the module are bound in the local namespace for ' + 'the\n' + 'scope where the "import" statement occurs.\n' + '\n' + 'The *public names* defined by a module are determined by ' + 'checking the\n' + 'module\'s namespace for a variable named "__all__"; if defined, ' + 'it must\n' + 'be a sequence of strings which are names defined or imported by ' + 'that\n' + 'module. The names given in "__all__" are all considered public ' + 'and\n' + 'are required to exist. If "__all__" is not defined, the set of ' + 'public\n' + "names includes all names found in the module's namespace which " + 'do not\n' + 'begin with an underscore character ("\'_\'"). "__all__" should ' + 'contain\n' + 'the entire public API. It is intended to avoid accidentally ' + 'exporting\n' + 'items that are not part of the API (such as library modules ' + 'which were\n' + 'imported and used within the module).\n' + '\n' + 'The "from" form with "*" may only occur in a module scope. The ' + 'wild\n' + 'card form of import --- "from module import *" --- is only ' + 'allowed at\n' + 'the module level. Attempting to use it in class or function\n' + 'definitions will raise a "SyntaxError".\n' + '\n' + 'When specifying what module to import you do not have to specify ' + 'the\n' + 'absolute name of the module. When a module or package is ' + 'contained\n' + 'within another package it is possible to make a relative import ' + 'within\n' + 'the same top package without having to mention the package name. ' + 'By\n' + 'using leading dots in the specified module or package after ' + '"from" you\n' + 'can specify how high to traverse up the current package ' + 'hierarchy\n' + 'without specifying exact names. One leading dot means the ' + 'current\n' + 'package where the module making the import exists. Two dots ' + 'means up\n' + 'one package level. Three dots is up two levels, etc. So if you ' + 'execute\n' + '"from . import mod" from a module in the "pkg" package then you ' + 'will\n' + 'end up importing "pkg.mod". If you execute "from ..subpkg2 ' + 'import mod"\n' + 'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". ' + 'The\n' + 'specification for relative imports is contained within **PEP ' + '328**.\n' + '\n' + '"importlib.import_module()" is provided to support applications ' + 'that\n' + 'determine dynamically the modules to be loaded.\n' + '\n' + '\n' + 'Future statements\n' + '=================\n' + '\n' + 'A *future statement* is a directive to the compiler that a ' + 'particular\n' + 'module should be compiled using syntax or semantics that will ' + 'be\n' + 'available in a specified future release of Python where the ' + 'feature\n' + 'becomes standard.\n' + '\n' + 'The future statement is intended to ease migration to future ' + 'versions\n' + 'of Python that introduce incompatible changes to the language. ' + 'It\n' + 'allows use of the new features on a per-module basis before the\n' + 'release in which the feature becomes standard.\n' + '\n' + ' future_statement ::= "from" "__future__" "import" feature ' + '["as" name]\n' + ' ("," feature ["as" name])*\n' + ' | "from" "__future__" "import" "(" ' + 'feature ["as" name]\n' + ' ("," feature ["as" name])* [","] ")"\n' + ' feature ::= identifier\n' + ' name ::= identifier\n' + '\n' + 'A future statement must appear near the top of the module. The ' + 'only\n' + 'lines that can appear before a future statement are:\n' + '\n' + '* the module docstring (if any),\n' + '\n' + '* comments,\n' + '\n' + '* blank lines, and\n' + '\n' + '* other future statements.\n' + '\n' + 'The features recognized by Python 3.0 are "absolute_import",\n' + '"division", "generators", "unicode_literals", "print_function",\n' + '"nested_scopes" and "with_statement". They are all redundant ' + 'because\n' + 'they are always enabled, and only kept for backwards ' + 'compatibility.\n' + '\n' + 'A future statement is recognized and treated specially at ' + 'compile\n' + 'time: Changes to the semantics of core constructs are often\n' + 'implemented by generating different code. It may even be the ' + 'case\n' + 'that a new feature introduces new incompatible syntax (such as a ' + 'new\n' + 'reserved word), in which case the compiler may need to parse ' + 'the\n' + 'module differently. Such decisions cannot be pushed off until\n' + 'runtime.\n' + '\n' + 'For any given release, the compiler knows which feature names ' + 'have\n' + 'been defined, and raises a compile-time error if a future ' + 'statement\n' + 'contains a feature not known to it.\n' + '\n' + 'The direct runtime semantics are the same as for any import ' + 'statement:\n' + 'there is a standard module "__future__", described later, and it ' + 'will\n' + 'be imported in the usual way at the time the future statement ' + 'is\n' + 'executed.\n' + '\n' + 'The interesting runtime semantics depend on the specific ' + 'feature\n' + 'enabled by the future statement.\n' + '\n' + 'Note that there is nothing special about the statement:\n' + '\n' + ' import __future__ [as name]\n' + '\n' + "That is not a future statement; it's an ordinary import " + 'statement with\n' + 'no special semantics or syntax restrictions.\n' + '\n' + 'Code compiled by calls to the built-in functions "exec()" and\n' + '"compile()" that occur in a module "M" containing a future ' + 'statement\n' + 'will, by default, use the new syntax or semantics associated ' + 'with the\n' + 'future statement. This can be controlled by optional arguments ' + 'to\n' + '"compile()" --- see the documentation of that function for ' + 'details.\n' + '\n' + 'A future statement typed at an interactive interpreter prompt ' + 'will\n' + 'take effect for the rest of the interpreter session. If an\n' + 'interpreter is started with the *-i* option, is passed a script ' + 'name\n' + 'to execute, and the script includes a future statement, it will ' + 'be in\n' + 'effect in the interactive session started after the script is\n' + 'executed.\n' + '\n' + 'See also: **PEP 236** - Back to the __future__\n' + '\n' + ' The original proposal for the __future__ mechanism.\n', + 'in': '\n' + 'Comparisons\n' + '***********\n' + '\n' + 'Unlike C, all comparison operations in Python have the same ' + 'priority,\n' + 'which is lower than that of any arithmetic, shifting or bitwise\n' + 'operation. Also unlike C, expressions like "a < b < c" have the\n' + 'interpretation that is conventional in mathematics:\n' + '\n' + ' comparison ::= or_expr ( comp_operator or_expr )*\n' + ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n' + ' | "is" ["not"] | ["not"] "in"\n' + '\n' + 'Comparisons yield boolean values: "True" or "False".\n' + '\n' + 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" is\n' + 'equivalent to "x < y and y <= z", except that "y" is evaluated only\n' + 'once (but in both cases "z" is not evaluated at all when "x < y" is\n' + 'found to be false).\n' + '\n' + 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and ' + '*op1*,\n' + '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... ' + 'y\n' + 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN z", ' + 'except\n' + 'that each expression is evaluated at most once.\n' + '\n' + 'Note that "a op1 b op2 c" doesn\'t imply any kind of comparison ' + 'between\n' + '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\n' + 'perhaps not pretty).\n' + '\n' + 'The operators "<", ">", "==", ">=", "<=", and "!=" compare the ' + 'values\n' + 'of two objects. The objects need not have the same type. If both ' + 'are\n' + 'numbers, they are converted to a common type. Otherwise, the "==" ' + 'and\n' + '"!=" operators *always* consider objects of different types to be\n' + 'unequal, while the "<", ">", ">=" and "<=" operators raise a\n' + '"TypeError" when comparing objects of different types that do not\n' + 'implement these operators for the given pair of types. You can\n' + 'control comparison behavior of objects of non-built-in types by\n' + 'defining rich comparison methods like "__gt__()", described in ' + 'section\n' + '*Basic customization*.\n' + '\n' + 'Comparison of objects of the same type depends on the type:\n' + '\n' + '* Numbers are compared arithmetically.\n' + '\n' + '* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. ' + 'The\n' + ' are identical to themselves, "x is x" but are not equal to\n' + ' themselves, "x != x". Additionally, comparing any value to a\n' + ' not-a-number value will return "False". For example, both "3 <\n' + ' float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n' + '\n' + '* Bytes objects are compared lexicographically using the numeric\n' + ' values of their elements.\n' + '\n' + '* Strings are compared lexicographically using the numeric\n' + ' equivalents (the result of the built-in function "ord()") of ' + 'their\n' + " characters. [3] String and bytes object can't be compared!\n" + '\n' + '* Tuples and lists are compared lexicographically using comparison\n' + ' of corresponding elements. This means that to compare equal, ' + 'each\n' + ' element must compare equal and the two sequences must be of the ' + 'same\n' + ' type and have the same length.\n' + '\n' + ' If not equal, the sequences are ordered the same as their first\n' + ' differing elements. For example, "[1,2,x] <= [1,2,y]" has the ' + 'same\n' + ' value as "x <= y". If the corresponding element does not exist, ' + 'the\n' + ' shorter sequence is ordered first (for example, "[1,2] < ' + '[1,2,3]").\n' + '\n' + '* Mappings (dictionaries) compare equal if and only if they have ' + 'the\n' + ' same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', ' + "'>=',\n" + ' \'>\')" raise "TypeError".\n' + '\n' + '* Sets and frozensets define comparison operators to mean subset ' + 'and\n' + ' superset tests. Those relations do not define total orderings ' + '(the\n' + ' two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n' + ' another, nor supersets of one another). Accordingly, sets are ' + 'not\n' + ' appropriate arguments for functions which depend on total ' + 'ordering.\n' + ' For example, "min()", "max()", and "sorted()" produce undefined\n' + ' results given a list of sets as inputs.\n' + '\n' + '* Most other objects of built-in types compare unequal unless they\n' + ' are the same object; the choice whether one object is considered\n' + ' smaller or larger than another one is made arbitrarily but\n' + ' consistently within one execution of a program.\n' + '\n' + 'Comparison of objects of differing types depends on whether either ' + 'of\n' + 'the types provide explicit support for the comparison. Most ' + 'numeric\n' + 'types can be compared with one another. When cross-type comparison ' + 'is\n' + 'not supported, the comparison method returns "NotImplemented".\n' + '\n' + 'The operators "in" and "not in" test for membership. "x in s"\n' + 'evaluates to true if *x* is a member of *s*, and false otherwise. ' + '"x\n' + 'not in s" returns the negation of "x in s". All built-in sequences\n' + 'and set types support this as well as dictionary, for which "in" ' + 'tests\n' + 'whether the dictionary has a given key. For container types such as\n' + 'list, tuple, set, frozenset, dict, or collections.deque, the\n' + 'expression "x in y" is equivalent to "any(x is e or x == e for e in\n' + 'y)".\n' + '\n' + 'For the string and bytes types, "x in y" is true if and only if *x* ' + 'is\n' + 'a substring of *y*. An equivalent test is "y.find(x) != -1". ' + 'Empty\n' + 'strings are always considered to be a substring of any other ' + 'string,\n' + 'so """ in "abc"" will return "True".\n' + '\n' + 'For user-defined classes which define the "__contains__()" method, ' + '"x\n' + 'in y" is true if and only if "y.__contains__(x)" is true.\n' + '\n' + 'For user-defined classes which do not define "__contains__()" but ' + 'do\n' + 'define "__iter__()", "x in y" is true if some value "z" with "x == ' + 'z"\n' + 'is produced while iterating over "y". If an exception is raised\n' + 'during the iteration, it is as if "in" raised that exception.\n' + '\n' + 'Lastly, the old-style iteration protocol is tried: if a class ' + 'defines\n' + '"__getitem__()", "x in y" is true if and only if there is a non-\n' + 'negative integer index *i* such that "x == y[i]", and all lower\n' + 'integer indices do not raise "IndexError" exception. (If any other\n' + 'exception is raised, it is as if "in" raised that exception).\n' + '\n' + 'The operator "not in" is defined to have the inverse true value of\n' + '"in".\n' + '\n' + 'The operators "is" and "is not" test for object identity: "x is y" ' + 'is\n' + 'true if and only if *x* and *y* are the same object. "x is not y"\n' + 'yields the inverse truth value. [4]\n', + 'integers': '\n' + 'Integer literals\n' + '****************\n' + '\n' + 'Integer literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' integer ::= decimalinteger | octinteger | hexinteger ' + '| bininteger\n' + ' decimalinteger ::= nonzerodigit digit* | "0"+\n' + ' nonzerodigit ::= "1"..."9"\n' + ' digit ::= "0"..."9"\n' + ' octinteger ::= "0" ("o" | "O") octdigit+\n' + ' hexinteger ::= "0" ("x" | "X") hexdigit+\n' + ' bininteger ::= "0" ("b" | "B") bindigit+\n' + ' octdigit ::= "0"..."7"\n' + ' hexdigit ::= digit | "a"..."f" | "A"..."F"\n' + ' bindigit ::= "0" | "1"\n' + '\n' + 'There is no limit for the length of integer literals apart ' + 'from what\n' + 'can be stored in available memory.\n' + '\n' + 'Note that leading zeros in a non-zero decimal number are not ' + 'allowed.\n' + 'This is for disambiguation with C-style octal literals, which ' + 'Python\n' + 'used before version 3.0.\n' + '\n' + 'Some examples of integer literals:\n' + '\n' + ' 7 2147483647 0o177 ' + '0b100110111\n' + ' 3 79228162514264337593543950336 0o377 ' + '0x100000000\n' + ' 79228162514264337593543950336 ' + '0xdeadbeef\n', + 'lambda': '\n' + 'Lambdas\n' + '*******\n' + '\n' + ' lambda_expr ::= "lambda" [parameter_list]: expression\n' + ' lambda_expr_nocond ::= "lambda" [parameter_list]: ' + 'expression_nocond\n' + '\n' + 'Lambda expressions (sometimes called lambda forms) are used to ' + 'create\n' + 'anonymous functions. The expression "lambda arguments: ' + 'expression"\n' + 'yields a function object. The unnamed object behaves like a ' + 'function\n' + 'object defined with\n' + '\n' + ' def (arguments):\n' + ' return expression\n' + '\n' + 'See section *Function definitions* for the syntax of parameter ' + 'lists.\n' + 'Note that functions created with lambda expressions cannot ' + 'contain\n' + 'statements or annotations.\n', + 'lists': '\n' + 'List displays\n' + '*************\n' + '\n' + 'A list display is a possibly empty series of expressions enclosed ' + 'in\n' + 'square brackets:\n' + '\n' + ' list_display ::= "[" [expression_list | comprehension] "]"\n' + '\n' + 'A list display yields a new list object, the contents being ' + 'specified\n' + 'by either a list of expressions or a comprehension. When a ' + 'comma-\n' + 'separated list of expressions is supplied, its elements are ' + 'evaluated\n' + 'from left to right and placed into the list object in that ' + 'order.\n' + 'When a comprehension is supplied, the list is constructed from ' + 'the\n' + 'elements resulting from the comprehension.\n', + 'naming': '\n' + 'Naming and binding\n' + '******************\n' + '\n' + '*Names* refer to objects. Names are introduced by name binding\n' + 'operations. Each occurrence of a name in the program text refers ' + 'to\n' + 'the *binding* of that name established in the innermost function ' + 'block\n' + 'containing the use.\n' + '\n' + 'A *block* is a piece of Python program text that is executed as ' + 'a\n' + 'unit. The following are blocks: a module, a function body, and a ' + 'class\n' + 'definition. Each command typed interactively is a block. A ' + 'script\n' + 'file (a file given as standard input to the interpreter or ' + 'specified\n' + 'as a command line argument to the interpreter) is a code block. ' + 'A\n' + 'script command (a command specified on the interpreter command ' + 'line\n' + "with the '**-c**' option) is a code block. The string argument " + 'passed\n' + 'to the built-in functions "eval()" and "exec()" is a code ' + 'block.\n' + '\n' + 'A code block is executed in an *execution frame*. A frame ' + 'contains\n' + 'some administrative information (used for debugging) and ' + 'determines\n' + "where and how execution continues after the code block's " + 'execution has\n' + 'completed.\n' + '\n' + 'A *scope* defines the visibility of a name within a block. If a ' + 'local\n' + 'variable is defined in a block, its scope includes that block. ' + 'If the\n' + 'definition occurs in a function block, the scope extends to any ' + 'blocks\n' + 'contained within the defining one, unless a contained block ' + 'introduces\n' + 'a different binding for the name. The scope of names defined in ' + 'a\n' + 'class block is limited to the class block; it does not extend to ' + 'the\n' + 'code blocks of methods -- this includes comprehensions and ' + 'generator\n' + 'expressions since they are implemented using a function scope. ' + 'This\n' + 'means that the following will fail:\n' + '\n' + ' class A:\n' + ' a = 42\n' + ' b = list(a + i for i in range(10))\n' + '\n' + 'When a name is used in a code block, it is resolved using the ' + 'nearest\n' + 'enclosing scope. The set of all such scopes visible to a code ' + 'block\n' + "is called the block's *environment*.\n" + '\n' + 'If a name is bound in a block, it is a local variable of that ' + 'block,\n' + 'unless declared as "nonlocal". If a name is bound at the ' + 'module\n' + 'level, it is a global variable. (The variables of the module ' + 'code\n' + 'block are local and global.) If a variable is used in a code ' + 'block\n' + 'but not defined there, it is a *free variable*.\n' + '\n' + 'When a name is not found at all, a "NameError" exception is ' + 'raised.\n' + 'If the name refers to a local variable that has not been bound, ' + 'an\n' + '"UnboundLocalError" exception is raised. "UnboundLocalError" is ' + 'a\n' + 'subclass of "NameError".\n' + '\n' + 'The following constructs bind names: formal parameters to ' + 'functions,\n' + '"import" statements, class and function definitions (these bind ' + 'the\n' + 'class or function name in the defining block), and targets that ' + 'are\n' + 'identifiers if occurring in an assignment, "for" loop header, or ' + 'after\n' + '"as" in a "with" statement or "except" clause. The "import" ' + 'statement\n' + 'of the form "from ... import *" binds all names defined in the\n' + 'imported module, except those beginning with an underscore. ' + 'This form\n' + 'may only be used at the module level.\n' + '\n' + 'A target occurring in a "del" statement is also considered bound ' + 'for\n' + 'this purpose (though the actual semantics are to unbind the ' + 'name).\n' + '\n' + 'Each assignment or import statement occurs within a block ' + 'defined by a\n' + 'class or function definition or at the module level (the ' + 'top-level\n' + 'code block).\n' + '\n' + 'If a name binding operation occurs anywhere within a code block, ' + 'all\n' + 'uses of the name within the block are treated as references to ' + 'the\n' + 'current block. This can lead to errors when a name is used ' + 'within a\n' + 'block before it is bound. This rule is subtle. Python lacks\n' + 'declarations and allows name binding operations to occur ' + 'anywhere\n' + 'within a code block. The local variables of a code block can ' + 'be\n' + 'determined by scanning the entire text of the block for name ' + 'binding\n' + 'operations.\n' + '\n' + 'If the "global" statement occurs within a block, all uses of the ' + 'name\n' + 'specified in the statement refer to the binding of that name in ' + 'the\n' + 'top-level namespace. Names are resolved in the top-level ' + 'namespace by\n' + 'searching the global namespace, i.e. the namespace of the ' + 'module\n' + 'containing the code block, and the builtins namespace, the ' + 'namespace\n' + 'of the module "builtins". The global namespace is searched ' + 'first. If\n' + 'the name is not found there, the builtins namespace is ' + 'searched. The\n' + 'global statement must precede all uses of the name.\n' + '\n' + 'The builtins namespace associated with the execution of a code ' + 'block\n' + 'is actually found by looking up the name "__builtins__" in its ' + 'global\n' + 'namespace; this should be a dictionary or a module (in the ' + 'latter case\n' + "the module's dictionary is used). By default, when in the " + '"__main__"\n' + 'module, "__builtins__" is the built-in module "builtins"; when ' + 'in any\n' + 'other module, "__builtins__" is an alias for the dictionary of ' + 'the\n' + '"builtins" module itself. "__builtins__" can be set to a ' + 'user-created\n' + 'dictionary to create a weak form of restricted execution.\n' + '\n' + '**CPython implementation detail:** Users should not touch\n' + '"__builtins__"; it is strictly an implementation detail. Users\n' + 'wanting to override values in the builtins namespace should ' + '"import"\n' + 'the "builtins" module and modify its attributes appropriately.\n' + '\n' + 'The namespace for a module is automatically created the first ' + 'time a\n' + 'module is imported. The main module for a script is always ' + 'called\n' + '"__main__".\n' + '\n' + 'The "global" statement has the same scope as a name binding ' + 'operation\n' + 'in the same block. If the nearest enclosing scope for a free ' + 'variable\n' + 'contains a global statement, the free variable is treated as a ' + 'global.\n' + '\n' + 'A class definition is an executable statement that may use and ' + 'define\n' + 'names. These references follow the normal rules for name ' + 'resolution.\n' + 'The namespace of the class definition becomes the attribute ' + 'dictionary\n' + 'of the class. Names defined at the class scope are not visible ' + 'in\n' + 'methods.\n' + '\n' + '\n' + 'Interaction with dynamic features\n' + '=================================\n' + '\n' + 'There are several cases where Python statements are illegal when ' + 'used\n' + 'in conjunction with nested scopes that contain free variables.\n' + '\n' + 'If a variable is referenced in an enclosing scope, it is illegal ' + 'to\n' + 'delete the name. An error will be reported at compile time.\n' + '\n' + 'If the wild card form of import --- "import *" --- is used in a\n' + 'function and the function contains or is a nested block with ' + 'free\n' + 'variables, the compiler will raise a "SyntaxError".\n' + '\n' + 'The "eval()" and "exec()" functions do not have access to the ' + 'full\n' + 'environment for resolving names. Names may be resolved in the ' + 'local\n' + 'and global namespaces of the caller. Free variables are not ' + 'resolved\n' + 'in the nearest enclosing namespace, but in the global ' + 'namespace. [1]\n' + 'The "exec()" and "eval()" functions have optional arguments to\n' + 'override the global and local namespace. If only one namespace ' + 'is\n' + 'specified, it is used for both.\n', + 'nonlocal': '\n' + 'The "nonlocal" statement\n' + '************************\n' + '\n' + ' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n' + '\n' + 'The "nonlocal" statement causes the listed identifiers to ' + 'refer to\n' + 'previously bound variables in the nearest enclosing scope ' + 'excluding\n' + 'globals. This is important because the default behavior for ' + 'binding is\n' + 'to search the local namespace first. The statement allows\n' + 'encapsulated code to rebind variables outside of the local ' + 'scope\n' + 'besides the global (module) scope.\n' + '\n' + 'Names listed in a "nonlocal" statement, unlike those listed in ' + 'a\n' + '"global" statement, must refer to pre-existing bindings in an\n' + 'enclosing scope (the scope in which a new binding should be ' + 'created\n' + 'cannot be determined unambiguously).\n' + '\n' + 'Names listed in a "nonlocal" statement must not collide with ' + 'pre-\n' + 'existing bindings in the local scope.\n' + '\n' + 'See also: **PEP 3104** - Access to Names in Outer Scopes\n' + '\n' + ' The specification for the "nonlocal" statement.\n', + 'numbers': '\n' + 'Numeric literals\n' + '****************\n' + '\n' + 'There are three types of numeric literals: integers, floating ' + 'point\n' + 'numbers, and imaginary numbers. There are no complex literals\n' + '(complex numbers can be formed by adding a real number and an\n' + 'imaginary number).\n' + '\n' + 'Note that numeric literals do not include a sign; a phrase like ' + '"-1"\n' + 'is actually an expression composed of the unary operator ' + '\'"-"\' and the\n' + 'literal "1".\n', + 'numeric-types': '\n' + 'Emulating numeric types\n' + '***********************\n' + '\n' + 'The following methods can be defined to emulate numeric ' + 'objects.\n' + 'Methods corresponding to operations that are not ' + 'supported by the\n' + 'particular kind of number implemented (e.g., bitwise ' + 'operations for\n' + 'non-integral numbers) should be left undefined.\n' + '\n' + 'object.__add__(self, other)\n' + 'object.__sub__(self, other)\n' + 'object.__mul__(self, other)\n' + 'object.__truediv__(self, other)\n' + 'object.__floordiv__(self, other)\n' + 'object.__mod__(self, other)\n' + 'object.__divmod__(self, other)\n' + 'object.__pow__(self, other[, modulo])\n' + 'object.__lshift__(self, other)\n' + 'object.__rshift__(self, other)\n' + 'object.__and__(self, other)\n' + 'object.__xor__(self, other)\n' + 'object.__or__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "/", "//", "%", "divmod()", ' + '"pow()",\n' + ' "**", "<<", ">>", "&", "^", "|"). For instance, to ' + 'evaluate the\n' + ' expression "x + y", where *x* is an instance of a ' + 'class that has an\n' + ' "__add__()" method, "x.__add__(y)" is called. The ' + '"__divmod__()"\n' + ' method should be the equivalent to using ' + '"__floordiv__()" and\n' + ' "__mod__()"; it should not be related to ' + '"__truediv__()". Note\n' + ' that "__pow__()" should be defined to accept an ' + 'optional third\n' + ' argument if the ternary version of the built-in ' + '"pow()" function is\n' + ' to be supported.\n' + '\n' + ' If one of those methods does not support the operation ' + 'with the\n' + ' supplied arguments, it should return ' + '"NotImplemented".\n' + '\n' + 'object.__radd__(self, other)\n' + 'object.__rsub__(self, other)\n' + 'object.__rmul__(self, other)\n' + 'object.__rtruediv__(self, other)\n' + 'object.__rfloordiv__(self, other)\n' + 'object.__rmod__(self, other)\n' + 'object.__rdivmod__(self, other)\n' + 'object.__rpow__(self, other)\n' + 'object.__rlshift__(self, other)\n' + 'object.__rrshift__(self, other)\n' + 'object.__rand__(self, other)\n' + 'object.__rxor__(self, other)\n' + 'object.__ror__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "/", "//", "%", "divmod()", ' + '"pow()",\n' + ' "**", "<<", ">>", "&", "^", "|") with reflected ' + '(swapped) operands.\n' + ' These functions are only called if the left operand ' + 'does not\n' + ' support the corresponding operation and the operands ' + 'are of\n' + ' different types. [2] For instance, to evaluate the ' + 'expression "x -\n' + ' y", where *y* is an instance of a class that has an ' + '"__rsub__()"\n' + ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' + 'returns\n' + ' *NotImplemented*.\n' + '\n' + ' Note that ternary "pow()" will not try calling ' + '"__rpow__()" (the\n' + ' coercion rules would become too complicated).\n' + '\n' + " Note: If the right operand's type is a subclass of the " + 'left\n' + " operand's type and that subclass provides the " + 'reflected method\n' + ' for the operation, this method will be called before ' + 'the left\n' + " operand's non-reflected method. This behavior " + 'allows subclasses\n' + " to override their ancestors' operations.\n" + '\n' + 'object.__iadd__(self, other)\n' + 'object.__isub__(self, other)\n' + 'object.__imul__(self, other)\n' + 'object.__itruediv__(self, other)\n' + 'object.__ifloordiv__(self, other)\n' + 'object.__imod__(self, other)\n' + 'object.__ipow__(self, other[, modulo])\n' + 'object.__ilshift__(self, other)\n' + 'object.__irshift__(self, other)\n' + 'object.__iand__(self, other)\n' + 'object.__ixor__(self, other)\n' + 'object.__ior__(self, other)\n' + '\n' + ' These methods are called to implement the augmented ' + 'arithmetic\n' + ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", ' + '"**=", "<<=",\n' + ' ">>=", "&=", "^=", "|="). These methods should ' + 'attempt to do the\n' + ' operation in-place (modifying *self*) and return the ' + 'result (which\n' + ' could be, but does not have to be, *self*). If a ' + 'specific method\n' + ' is not defined, the augmented assignment falls back to ' + 'the normal\n' + ' methods. For instance, if *x* is an instance of a ' + 'class with an\n' + ' "__iadd__()" method, "x += y" is equivalent to "x = ' + 'x.__iadd__(y)"\n' + ' . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are ' + 'considered, as\n' + ' with the evaluation of "x + y". In certain situations, ' + 'augmented\n' + ' assignment can result in unexpected errors (see *Why ' + 'does\n' + " a_tuple[i] += ['item'] raise an exception when the " + 'addition\n' + ' works?*), but this behavior is in fact part of the ' + 'data model.\n' + '\n' + 'object.__neg__(self)\n' + 'object.__pos__(self)\n' + 'object.__abs__(self)\n' + 'object.__invert__(self)\n' + '\n' + ' Called to implement the unary arithmetic operations ' + '("-", "+",\n' + ' "abs()" and "~").\n' + '\n' + 'object.__complex__(self)\n' + 'object.__int__(self)\n' + 'object.__float__(self)\n' + 'object.__round__(self[, n])\n' + '\n' + ' Called to implement the built-in functions ' + '"complex()", "int()",\n' + ' "float()" and "round()". Should return a value of the ' + 'appropriate\n' + ' type.\n' + '\n' + 'object.__index__(self)\n' + '\n' + ' Called to implement "operator.index()", and whenever ' + 'Python needs\n' + ' to losslessly convert the numeric object to an integer ' + 'object (such\n' + ' as in slicing, or in the built-in "bin()", "hex()" and ' + '"oct()"\n' + ' functions). Presence of this method indicates that the ' + 'numeric\n' + ' object is an integer type. Must return an integer.\n' + '\n' + ' Note: In order to have a coherent integer type class, ' + 'when\n' + ' "__index__()" is defined "__int__()" should also be ' + 'defined, and\n' + ' both should return the same value.\n', + 'objects': '\n' + 'Objects, values and types\n' + '*************************\n' + '\n' + "*Objects* are Python's abstraction for data. All data in a " + 'Python\n' + 'program is represented by objects or by relations between ' + 'objects. (In\n' + "a sense, and in conformance to Von Neumann's model of a " + '"stored\n' + 'program computer," code is also represented by objects.)\n' + '\n' + "Every object has an identity, a type and a value. An object's\n" + '*identity* never changes once it has been created; you may ' + 'think of it\n' + 'as the object\'s address in memory. The \'"is"\' operator ' + 'compares the\n' + 'identity of two objects; the "id()" function returns an ' + 'integer\n' + 'representing its identity.\n' + '\n' + '**CPython implementation detail:** For CPython, "id(x)" is the ' + 'memory\n' + 'address where "x" is stored.\n' + '\n' + "An object's type determines the operations that the object " + 'supports\n' + '(e.g., "does it have a length?") and also defines the possible ' + 'values\n' + 'for objects of that type. The "type()" function returns an ' + "object's\n" + 'type (which is an object itself). Like its identity, an ' + "object's\n" + '*type* is also unchangeable. [1]\n' + '\n' + 'The *value* of some objects can change. Objects whose value ' + 'can\n' + 'change are said to be *mutable*; objects whose value is ' + 'unchangeable\n' + 'once they are created are called *immutable*. (The value of an\n' + 'immutable container object that contains a reference to a ' + 'mutable\n' + "object can change when the latter's value is changed; however " + 'the\n' + 'container is still considered immutable, because the collection ' + 'of\n' + 'objects it contains cannot be changed. So, immutability is ' + 'not\n' + 'strictly the same as having an unchangeable value, it is more ' + 'subtle.)\n' + "An object's mutability is determined by its type; for " + 'instance,\n' + 'numbers, strings and tuples are immutable, while dictionaries ' + 'and\n' + 'lists are mutable.\n' + '\n' + 'Objects are never explicitly destroyed; however, when they ' + 'become\n' + 'unreachable they may be garbage-collected. An implementation ' + 'is\n' + 'allowed to postpone garbage collection or omit it altogether ' + '--- it is\n' + 'a matter of implementation quality how garbage collection is\n' + 'implemented, as long as no objects are collected that are ' + 'still\n' + 'reachable.\n' + '\n' + '**CPython implementation detail:** CPython currently uses a ' + 'reference-\n' + 'counting scheme with (optional) delayed detection of cyclically ' + 'linked\n' + 'garbage, which collects most objects as soon as they become\n' + 'unreachable, but is not guaranteed to collect garbage ' + 'containing\n' + 'circular references. See the documentation of the "gc" module ' + 'for\n' + 'information on controlling the collection of cyclic garbage. ' + 'Other\n' + 'implementations act differently and CPython may change. Do not ' + 'depend\n' + 'on immediate finalization of objects when they become ' + 'unreachable (so\n' + 'you should always close files explicitly).\n' + '\n' + "Note that the use of the implementation's tracing or debugging\n" + 'facilities may keep objects alive that would normally be ' + 'collectable.\n' + 'Also note that catching an exception with a ' + '\'"try"..."except"\'\n' + 'statement may keep objects alive.\n' + '\n' + 'Some objects contain references to "external" resources such as ' + 'open\n' + 'files or windows. It is understood that these resources are ' + 'freed\n' + 'when the object is garbage-collected, but since garbage ' + 'collection is\n' + 'not guaranteed to happen, such objects also provide an explicit ' + 'way to\n' + 'release the external resource, usually a "close()" method. ' + 'Programs\n' + 'are strongly recommended to explicitly close such objects. ' + 'The\n' + '\'"try"..."finally"\' statement and the \'"with"\' statement ' + 'provide\n' + 'convenient ways to do this.\n' + '\n' + 'Some objects contain references to other objects; these are ' + 'called\n' + '*containers*. Examples of containers are tuples, lists and\n' + "dictionaries. The references are part of a container's value. " + 'In\n' + 'most cases, when we talk about the value of a container, we ' + 'imply the\n' + 'values, not the identities of the contained objects; however, ' + 'when we\n' + 'talk about the mutability of a container, only the identities ' + 'of the\n' + 'immediately contained objects are implied. So, if an ' + 'immutable\n' + 'container (like a tuple) contains a reference to a mutable ' + 'object, its\n' + 'value changes if that mutable object is changed.\n' + '\n' + 'Types affect almost all aspects of object behavior. Even the\n' + 'importance of object identity is affected in some sense: for ' + 'immutable\n' + 'types, operations that compute new values may actually return ' + 'a\n' + 'reference to any existing object with the same type and value, ' + 'while\n' + 'for mutable objects this is not allowed. E.g., after "a = 1; b ' + '= 1",\n' + '"a" and "b" may or may not refer to the same object with the ' + 'value\n' + 'one, depending on the implementation, but after "c = []; d = ' + '[]", "c"\n' + 'and "d" are guaranteed to refer to two different, unique, ' + 'newly\n' + 'created empty lists. (Note that "c = d = []" assigns the same ' + 'object\n' + 'to both "c" and "d".)\n', + 'operator-summary': '\n' + 'Operator precedence\n' + '*******************\n' + '\n' + 'The following table summarizes the operator precedence ' + 'in Python, from\n' + 'lowest precedence (least binding) to highest ' + 'precedence (most\n' + 'binding). Operators in the same box have the same ' + 'precedence. Unless\n' + 'the syntax is explicitly given, operators are binary. ' + 'Operators in\n' + 'the same box group left to right (except for ' + 'exponentiation, which\n' + 'groups from right to left).\n' + '\n' + 'Note that comparisons, membership tests, and identity ' + 'tests, all have\n' + 'the same precedence and have a left-to-right chaining ' + 'feature as\n' + 'described in the *Comparisons* section.\n' + '\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| Operator | ' + 'Description |\n' + '+=================================================+=======================================+\n' + '| "lambda" | ' + 'Lambda expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "if" -- "else" | ' + 'Conditional expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "or" | ' + 'Boolean OR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "and" | ' + 'Boolean AND |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "not" "x" | ' + 'Boolean NOT |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "in", "not in", "is", "is not", "<", "<=", ">", | ' + 'Comparisons, including membership |\n' + '| ">=", "!=", "==" | ' + 'tests and identity tests |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "|" | ' + 'Bitwise OR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "^" | ' + 'Bitwise XOR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "&" | ' + 'Bitwise AND |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "<<", ">>" | ' + 'Shifts |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "+", "-" | ' + 'Addition and subtraction |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "*", "/", "//", "%" | ' + 'Multiplication, division, remainder |\n' + '| | ' + '[5] |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "+x", "-x", "~x" | ' + 'Positive, negative, bitwise NOT |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "**" | ' + 'Exponentiation [6] |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "x[index]", "x[index:index]", | ' + 'Subscription, slicing, call, |\n' + '| "x(arguments...)", "x.attribute" | ' + 'attribute reference |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "(expressions...)", "[expressions...]", "{key: | ' + 'Binding or tuple display, list |\n' + '| value...}", "{expressions...}" | ' + 'display, dictionary display, set |\n' + '| | ' + 'display |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] While "abs(x%y) < abs(y)" is true mathematically, ' + 'for floats\n' + ' it may not be true numerically due to roundoff. ' + 'For example, and\n' + ' assuming a platform on which a Python float is an ' + 'IEEE 754 double-\n' + ' precision number, in order that "-1e-100 % 1e100" ' + 'have the same\n' + ' sign as "1e100", the computed result is "-1e-100 + ' + '1e100", which\n' + ' is numerically exactly equal to "1e100". The ' + 'function\n' + ' "math.fmod()" returns a result whose sign matches ' + 'the sign of the\n' + ' first argument instead, and so returns "-1e-100" ' + 'in this case.\n' + ' Which approach is more appropriate depends on the ' + 'application.\n' + '\n' + '[2] If x is very close to an exact integer multiple of ' + "y, it's\n" + ' possible for "x//y" to be one larger than ' + '"(x-x%y)//y" due to\n' + ' rounding. In such cases, Python returns the ' + 'latter result, in\n' + ' order to preserve that "divmod(x,y)[0] * y + x % ' + 'y" be very close\n' + ' to "x".\n' + '\n' + '[3] While comparisons between strings make sense at ' + 'the byte\n' + ' level, they may be counter-intuitive to users. ' + 'For example, the\n' + ' strings ""\\u00C7"" and ""\\u0327\\u0043"" compare ' + 'differently, even\n' + ' though they both represent the same unicode ' + 'character (LATIN\n' + ' CAPITAL LETTER C WITH CEDILLA). To compare ' + 'strings in a human\n' + ' recognizable way, compare using ' + '"unicodedata.normalize()".\n' + '\n' + '[4] Due to automatic garbage-collection, free lists, ' + 'and the\n' + ' dynamic nature of descriptors, you may notice ' + 'seemingly unusual\n' + ' behaviour in certain uses of the "is" operator, ' + 'like those\n' + ' involving comparisons between instance methods, or ' + 'constants.\n' + ' Check their documentation for more info.\n' + '\n' + '[5] The "%" operator is also used for string ' + 'formatting; the same\n' + ' precedence applies.\n' + '\n' + '[6] The power operator "**" binds less tightly than an ' + 'arithmetic\n' + ' or bitwise unary operator on its right, that is, ' + '"2**-1" is "0.5".\n', + 'pass': '\n' + 'The "pass" statement\n' + '********************\n' + '\n' + ' pass_stmt ::= "pass"\n' + '\n' + '"pass" is a null operation --- when it is executed, nothing ' + 'happens.\n' + 'It is useful as a placeholder when a statement is required\n' + 'syntactically, but no code needs to be executed, for example:\n' + '\n' + ' def f(arg): pass # a function that does nothing (yet)\n' + '\n' + ' class C: pass # a class with no methods (yet)\n', + 'power': '\n' + 'The power operator\n' + '******************\n' + '\n' + 'The power operator binds more tightly than unary operators on ' + 'its\n' + 'left; it binds less tightly than unary operators on its right. ' + 'The\n' + 'syntax is:\n' + '\n' + ' power ::= primary ["**" u_expr]\n' + '\n' + 'Thus, in an unparenthesized sequence of power and unary ' + 'operators, the\n' + 'operators are evaluated from right to left (this does not ' + 'constrain\n' + 'the evaluation order for the operands): "-1**2" results in "-1".\n' + '\n' + 'The power operator has the same semantics as the built-in ' + '"pow()"\n' + 'function, when called with two arguments: it yields its left ' + 'argument\n' + 'raised to the power of its right argument. The numeric arguments ' + 'are\n' + 'first converted to a common type, and the result is of that ' + 'type.\n' + '\n' + 'For int operands, the result has the same type as the operands ' + 'unless\n' + 'the second argument is negative; in that case, all arguments are\n' + 'converted to float and a float result is delivered. For example,\n' + '"10**2" returns "100", but "10**-2" returns "0.01".\n' + '\n' + 'Raising "0.0" to a negative power results in a ' + '"ZeroDivisionError".\n' + 'Raising a negative number to a fractional power results in a ' + '"complex"\n' + 'number. (In earlier versions it raised a "ValueError".)\n', + 'raise': '\n' + 'The "raise" statement\n' + '*********************\n' + '\n' + ' raise_stmt ::= "raise" [expression ["from" expression]]\n' + '\n' + 'If no expressions are present, "raise" re-raises the last ' + 'exception\n' + 'that was active in the current scope. If no exception is active ' + 'in\n' + 'the current scope, a "RuntimeError" exception is raised ' + 'indicating\n' + 'that this is an error.\n' + '\n' + 'Otherwise, "raise" evaluates the first expression as the ' + 'exception\n' + 'object. It must be either a subclass or an instance of\n' + '"BaseException". If it is a class, the exception instance will ' + 'be\n' + 'obtained when needed by instantiating the class with no ' + 'arguments.\n' + '\n' + "The *type* of the exception is the exception instance's class, " + 'the\n' + '*value* is the instance itself.\n' + '\n' + 'A traceback object is normally created automatically when an ' + 'exception\n' + 'is raised and attached to it as the "__traceback__" attribute, ' + 'which\n' + 'is writable. You can create an exception and set your own ' + 'traceback in\n' + 'one step using the "with_traceback()" exception method (which ' + 'returns\n' + 'the same exception instance, with its traceback set to its ' + 'argument),\n' + 'like so:\n' + '\n' + ' raise Exception("foo occurred").with_traceback(tracebackobj)\n' + '\n' + 'The "from" clause is used for exception chaining: if given, the ' + 'second\n' + '*expression* must be another exception class or instance, which ' + 'will\n' + 'then be attached to the raised exception as the "__cause__" ' + 'attribute\n' + '(which is writable). If the raised exception is not handled, ' + 'both\n' + 'exceptions will be printed:\n' + '\n' + ' >>> try:\n' + ' ... print(1 / 0)\n' + ' ... except Exception as exc:\n' + ' ... raise RuntimeError("Something bad happened") from exc\n' + ' ...\n' + ' Traceback (most recent call last):\n' + ' File "", line 2, in \n' + ' ZeroDivisionError: int division or modulo by zero\n' + '\n' + ' The above exception was the direct cause of the following ' + 'exception:\n' + '\n' + ' Traceback (most recent call last):\n' + ' File "", line 4, in \n' + ' RuntimeError: Something bad happened\n' + '\n' + 'A similar mechanism works implicitly if an exception is raised ' + 'inside\n' + 'an exception handler: the previous exception is then attached as ' + 'the\n' + 'new exception\'s "__context__" attribute:\n' + '\n' + ' >>> try:\n' + ' ... print(1 / 0)\n' + ' ... except:\n' + ' ... raise RuntimeError("Something bad happened")\n' + ' ...\n' + ' Traceback (most recent call last):\n' + ' File "", line 2, in \n' + ' ZeroDivisionError: int division or modulo by zero\n' + '\n' + ' During handling of the above exception, another exception ' + 'occurred:\n' + '\n' + ' Traceback (most recent call last):\n' + ' File "", line 4, in \n' + ' RuntimeError: Something bad happened\n' + '\n' + 'Additional information on exceptions can be found in section\n' + '*Exceptions*, and information about handling exceptions is in ' + 'section\n' + '*The try statement*.\n', + 'return': '\n' + 'The "return" statement\n' + '**********************\n' + '\n' + ' return_stmt ::= "return" [expression_list]\n' + '\n' + '"return" may only occur syntactically nested in a function ' + 'definition,\n' + 'not within a nested class definition.\n' + '\n' + 'If an expression list is present, it is evaluated, else "None" ' + 'is\n' + 'substituted.\n' + '\n' + '"return" leaves the current function call with the expression ' + 'list (or\n' + '"None") as return value.\n' + '\n' + 'When "return" passes control out of a "try" statement with a ' + '"finally"\n' + 'clause, that "finally" clause is executed before really leaving ' + 'the\n' + 'function.\n' + '\n' + 'In a generator function, the "return" statement indicates that ' + 'the\n' + 'generator is done and will cause "StopIteration" to be raised. ' + 'The\n' + 'returned value (if any) is used as an argument to construct\n' + '"StopIteration" and becomes the "StopIteration.value" ' + 'attribute.\n', + 'sequence-types': '\n' + 'Emulating container types\n' + '*************************\n' + '\n' + 'The following methods can be defined to implement ' + 'container objects.\n' + 'Containers usually are sequences (such as lists or ' + 'tuples) or mappings\n' + '(like dictionaries), but can represent other containers ' + 'as well. The\n' + 'first set of methods is used either to emulate a ' + 'sequence or to\n' + 'emulate a mapping; the difference is that for a ' + 'sequence, the\n' + 'allowable keys should be the integers *k* for which "0 ' + '<= k < N" where\n' + '*N* is the length of the sequence, or slice objects, ' + 'which define a\n' + 'range of items. It is also recommended that mappings ' + 'provide the\n' + 'methods "keys()", "values()", "items()", "get()", ' + '"clear()",\n' + '"setdefault()", "pop()", "popitem()", "copy()", and ' + '"update()"\n' + "behaving similar to those for Python's standard " + 'dictionary objects.\n' + 'The "collections" module provides a "MutableMapping" ' + 'abstract base\n' + 'class to help create those methods from a base set of ' + '"__getitem__()",\n' + '"__setitem__()", "__delitem__()", and "keys()". Mutable ' + 'sequences\n' + 'should provide methods "append()", "count()", "index()", ' + '"extend()",\n' + '"insert()", "pop()", "remove()", "reverse()" and ' + '"sort()", like Python\n' + 'standard list objects. Finally, sequence types should ' + 'implement\n' + 'addition (meaning concatenation) and multiplication ' + '(meaning\n' + 'repetition) by defining the methods "__add__()", ' + '"__radd__()",\n' + '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" ' + 'described\n' + 'below; they should not define other numerical ' + 'operators. It is\n' + 'recommended that both mappings and sequences implement ' + 'the\n' + '"__contains__()" method to allow efficient use of the ' + '"in" operator;\n' + 'for mappings, "in" should search the mapping\'s keys; ' + 'for sequences, it\n' + 'should search through the values. It is further ' + 'recommended that both\n' + 'mappings and sequences implement the "__iter__()" method ' + 'to allow\n' + 'efficient iteration through the container; for mappings, ' + '"__iter__()"\n' + 'should be the same as "keys()"; for sequences, it should ' + 'iterate\n' + 'through the values.\n' + '\n' + 'object.__len__(self)\n' + '\n' + ' Called to implement the built-in function "len()". ' + 'Should return\n' + ' the length of the object, an integer ">=" 0. Also, ' + 'an object that\n' + ' doesn\'t define a "__bool__()" method and whose ' + '"__len__()" method\n' + ' returns zero is considered to be false in a Boolean ' + 'context.\n' + '\n' + 'object.__length_hint__(self)\n' + '\n' + ' Called to implement "operator.length_hint()". Should ' + 'return an\n' + ' estimated length for the object (which may be greater ' + 'or less than\n' + ' the actual length). The length must be an integer ' + '">=" 0. This\n' + ' method is purely an optimization and is never ' + 'required for\n' + ' correctness.\n' + '\n' + ' New in version 3.4.\n' + '\n' + 'Note: Slicing is done exclusively with the following ' + 'three methods.\n' + ' A call like\n' + '\n' + ' a[1:2] = b\n' + '\n' + ' is translated to\n' + '\n' + ' a[slice(1, 2, None)] = b\n' + '\n' + ' and so forth. Missing slice items are always filled ' + 'in with "None".\n' + '\n' + 'object.__getitem__(self, key)\n' + '\n' + ' Called to implement evaluation of "self[key]". For ' + 'sequence types,\n' + ' the accepted keys should be integers and slice ' + 'objects. Note that\n' + ' the special interpretation of negative indexes (if ' + 'the class wishes\n' + ' to emulate a sequence type) is up to the ' + '"__getitem__()" method. If\n' + ' *key* is of an inappropriate type, "TypeError" may be ' + 'raised; if of\n' + ' a value outside the set of indexes for the sequence ' + '(after any\n' + ' special interpretation of negative values), ' + '"IndexError" should be\n' + ' raised. For mapping types, if *key* is missing (not ' + 'in the\n' + ' container), "KeyError" should be raised.\n' + '\n' + ' Note: "for" loops expect that an "IndexError" will be ' + 'raised for\n' + ' illegal indexes to allow proper detection of the ' + 'end of the\n' + ' sequence.\n' + '\n' + 'object.__setitem__(self, key, value)\n' + '\n' + ' Called to implement assignment to "self[key]". Same ' + 'note as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support changes to the values for keys, ' + 'or if new keys\n' + ' can be added, or for sequences if elements can be ' + 'replaced. The\n' + ' same exceptions should be raised for improper *key* ' + 'values as for\n' + ' the "__getitem__()" method.\n' + '\n' + 'object.__delitem__(self, key)\n' + '\n' + ' Called to implement deletion of "self[key]". Same ' + 'note as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support removal of keys, or for sequences ' + 'if elements\n' + ' can be removed from the sequence. The same ' + 'exceptions should be\n' + ' raised for improper *key* values as for the ' + '"__getitem__()" method.\n' + '\n' + 'object.__iter__(self)\n' + '\n' + ' This method is called when an iterator is required ' + 'for a container.\n' + ' This method should return a new iterator object that ' + 'can iterate\n' + ' over all the objects in the container. For mappings, ' + 'it should\n' + ' iterate over the keys of the container, and should ' + 'also be made\n' + ' available as the method "keys()".\n' + '\n' + ' Iterator objects also need to implement this method; ' + 'they are\n' + ' required to return themselves. For more information ' + 'on iterator\n' + ' objects, see *Iterator Types*.\n' + '\n' + 'object.__reversed__(self)\n' + '\n' + ' Called (if present) by the "reversed()" built-in to ' + 'implement\n' + ' reverse iteration. It should return a new iterator ' + 'object that\n' + ' iterates over all the objects in the container in ' + 'reverse order.\n' + '\n' + ' If the "__reversed__()" method is not provided, the ' + '"reversed()"\n' + ' built-in will fall back to using the sequence ' + 'protocol ("__len__()"\n' + ' and "__getitem__()"). Objects that support the ' + 'sequence protocol\n' + ' should only provide "__reversed__()" if they can ' + 'provide an\n' + ' implementation that is more efficient than the one ' + 'provided by\n' + ' "reversed()".\n' + '\n' + 'The membership test operators ("in" and "not in") are ' + 'normally\n' + 'implemented as an iteration through a sequence. ' + 'However, container\n' + 'objects can supply the following special method with a ' + 'more efficient\n' + 'implementation, which also does not require the object ' + 'be a sequence.\n' + '\n' + 'object.__contains__(self, item)\n' + '\n' + ' Called to implement membership test operators. ' + 'Should return true\n' + ' if *item* is in *self*, false otherwise. For mapping ' + 'objects, this\n' + ' should consider the keys of the mapping rather than ' + 'the values or\n' + ' the key-item pairs.\n' + '\n' + ' For objects that don\'t define "__contains__()", the ' + 'membership test\n' + ' first tries iteration via "__iter__()", then the old ' + 'sequence\n' + ' iteration protocol via "__getitem__()", see *this ' + 'section in the\n' + ' language reference*.\n', + 'shifting': '\n' + 'Shifting operations\n' + '*******************\n' + '\n' + 'The shifting operations have lower priority than the ' + 'arithmetic\n' + 'operations:\n' + '\n' + ' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n' + '\n' + 'These operators accept integers as arguments. They shift the ' + 'first\n' + 'argument to the left or right by the number of bits given by ' + 'the\n' + 'second argument.\n' + '\n' + 'A right shift by *n* bits is defined as floor division by ' + '"pow(2,n)".\n' + 'A left shift by *n* bits is defined as multiplication with ' + '"pow(2,n)".\n' + '\n' + 'Note: In the current implementation, the right-hand operand ' + 'is\n' + ' required to be at most "sys.maxsize". If the right-hand ' + 'operand is\n' + ' larger than "sys.maxsize" an "OverflowError" exception is ' + 'raised.\n', + 'slicings': '\n' + 'Slicings\n' + '********\n' + '\n' + 'A slicing selects a range of items in a sequence object (e.g., ' + 'a\n' + 'string, tuple or list). Slicings may be used as expressions ' + 'or as\n' + 'targets in assignment or "del" statements. The syntax for a ' + 'slicing:\n' + '\n' + ' slicing ::= primary "[" slice_list "]"\n' + ' slice_list ::= slice_item ("," slice_item)* [","]\n' + ' slice_item ::= expression | proper_slice\n' + ' proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" ' + '[stride] ]\n' + ' lower_bound ::= expression\n' + ' upper_bound ::= expression\n' + ' stride ::= expression\n' + '\n' + 'There is ambiguity in the formal syntax here: anything that ' + 'looks like\n' + 'an expression list also looks like a slice list, so any ' + 'subscription\n' + 'can be interpreted as a slicing. Rather than further ' + 'complicating the\n' + 'syntax, this is disambiguated by defining that in this case ' + 'the\n' + 'interpretation as a subscription takes priority over the\n' + 'interpretation as a slicing (this is the case if the slice ' + 'list\n' + 'contains no proper slice).\n' + '\n' + 'The semantics for a slicing are as follows. The primary must ' + 'evaluate\n' + 'to a mapping object, and it is indexed (using the same ' + '"__getitem__()"\n' + 'method as normal subscription) with a key that is constructed ' + 'from the\n' + 'slice list, as follows. If the slice list contains at least ' + 'one\n' + 'comma, the key is a tuple containing the conversion of the ' + 'slice\n' + 'items; otherwise, the conversion of the lone slice item is the ' + 'key.\n' + 'The conversion of a slice item that is an expression is that\n' + 'expression. The conversion of a proper slice is a slice ' + 'object (see\n' + 'section *The standard type hierarchy*) whose "start", "stop" ' + 'and\n' + '"step" attributes are the values of the expressions given as ' + 'lower\n' + 'bound, upper bound and stride, respectively, substituting ' + '"None" for\n' + 'missing expressions.\n', + 'specialattrs': '\n' + 'Special Attributes\n' + '******************\n' + '\n' + 'The implementation adds a few special read-only attributes ' + 'to several\n' + 'object types, where they are relevant. Some of these are ' + 'not reported\n' + 'by the "dir()" built-in function.\n' + '\n' + 'object.__dict__\n' + '\n' + ' A dictionary or other mapping object used to store an ' + "object's\n" + ' (writable) attributes.\n' + '\n' + 'instance.__class__\n' + '\n' + ' The class to which a class instance belongs.\n' + '\n' + 'class.__bases__\n' + '\n' + ' The tuple of base classes of a class object.\n' + '\n' + 'class.__name__\n' + '\n' + ' The name of the class or type.\n' + '\n' + 'class.__qualname__\n' + '\n' + ' The *qualified name* of the class or type.\n' + '\n' + ' New in version 3.3.\n' + '\n' + 'class.__mro__\n' + '\n' + ' This attribute is a tuple of classes that are ' + 'considered when\n' + ' looking for base classes during method resolution.\n' + '\n' + 'class.mro()\n' + '\n' + ' This method can be overridden by a metaclass to ' + 'customize the\n' + ' method resolution order for its instances. It is ' + 'called at class\n' + ' instantiation, and its result is stored in "__mro__".\n' + '\n' + 'class.__subclasses__()\n' + '\n' + ' Each class keeps a list of weak references to its ' + 'immediate\n' + ' subclasses. This method returns a list of all those ' + 'references\n' + ' still alive. Example:\n' + '\n' + ' >>> int.__subclasses__()\n' + " []\n" + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] Additional information on these special methods may be ' + 'found\n' + ' in the Python Reference Manual (*Basic ' + 'customization*).\n' + '\n' + '[2] As a consequence, the list "[1, 2]" is considered ' + 'equal to\n' + ' "[1.0, 2.0]", and similarly for tuples.\n' + '\n' + "[3] They must have since the parser can't tell the type of " + 'the\n' + ' operands.\n' + '\n' + '[4] Cased characters are those with general category ' + 'property\n' + ' being one of "Lu" (Letter, uppercase), "Ll" (Letter, ' + 'lowercase),\n' + ' or "Lt" (Letter, titlecase).\n' + '\n' + '[5] To format only a tuple you should therefore provide a\n' + ' singleton tuple whose only element is the tuple to be ' + 'formatted.\n', + 'specialnames': '\n' + 'Special method names\n' + '********************\n' + '\n' + 'A class can implement certain operations that are invoked ' + 'by special\n' + 'syntax (such as arithmetic operations or subscripting and ' + 'slicing) by\n' + "defining methods with special names. This is Python's " + 'approach to\n' + '*operator overloading*, allowing classes to define their ' + 'own behavior\n' + 'with respect to language operators. For instance, if a ' + 'class defines\n' + 'a method named "__getitem__()", and "x" is an instance of ' + 'this class,\n' + 'then "x[i]" is roughly equivalent to ' + '"type(x).__getitem__(x, i)".\n' + 'Except where mentioned, attempts to execute an operation ' + 'raise an\n' + 'exception when no appropriate method is defined ' + '(typically\n' + '"AttributeError" or "TypeError").\n' + '\n' + 'When implementing a class that emulates any built-in type, ' + 'it is\n' + 'important that the emulation only be implemented to the ' + 'degree that it\n' + 'makes sense for the object being modelled. For example, ' + 'some\n' + 'sequences may work well with retrieval of individual ' + 'elements, but\n' + 'extracting a slice may not make sense. (One example of ' + 'this is the\n' + '"NodeList" interface in the W3C\'s Document Object ' + 'Model.)\n' + '\n' + '\n' + 'Basic customization\n' + '===================\n' + '\n' + 'object.__new__(cls[, ...])\n' + '\n' + ' Called to create a new instance of class *cls*. ' + '"__new__()" is a\n' + ' static method (special-cased so you need not declare it ' + 'as such)\n' + ' that takes the class of which an instance was requested ' + 'as its\n' + ' first argument. The remaining arguments are those ' + 'passed to the\n' + ' object constructor expression (the call to the class). ' + 'The return\n' + ' value of "__new__()" should be the new object instance ' + '(usually an\n' + ' instance of *cls*).\n' + '\n' + ' Typical implementations create a new instance of the ' + 'class by\n' + ' invoking the superclass\'s "__new__()" method using\n' + ' "super(currentclass, cls).__new__(cls[, ...])" with ' + 'appropriate\n' + ' arguments and then modifying the newly-created instance ' + 'as\n' + ' necessary before returning it.\n' + '\n' + ' If "__new__()" returns an instance of *cls*, then the ' + 'new\n' + ' instance\'s "__init__()" method will be invoked like\n' + ' "__init__(self[, ...])", where *self* is the new ' + 'instance and the\n' + ' remaining arguments are the same as were passed to ' + '"__new__()".\n' + '\n' + ' If "__new__()" does not return an instance of *cls*, ' + 'then the new\n' + ' instance\'s "__init__()" method will not be invoked.\n' + '\n' + ' "__new__()" is intended mainly to allow subclasses of ' + 'immutable\n' + ' types (like int, str, or tuple) to customize instance ' + 'creation. It\n' + ' is also commonly overridden in custom metaclasses in ' + 'order to\n' + ' customize class creation.\n' + '\n' + 'object.__init__(self[, ...])\n' + '\n' + ' Called when the instance is created. The arguments are ' + 'those\n' + ' passed to the class constructor expression. If a base ' + 'class has an\n' + ' "__init__()" method, the derived class\'s "__init__()" ' + 'method, if\n' + ' any, must explicitly call it to ensure proper ' + 'initialization of the\n' + ' base class part of the instance; for example:\n' + ' "BaseClass.__init__(self, [args...])". As a special ' + 'constraint on\n' + ' constructors, no value may be returned; doing so will ' + 'cause a\n' + ' "TypeError" to be raised at runtime.\n' + '\n' + 'object.__del__(self)\n' + '\n' + ' Called when the instance is about to be destroyed. ' + 'This is also\n' + ' called a destructor. If a base class has a "__del__()" ' + 'method, the\n' + ' derived class\'s "__del__()" method, if any, must ' + 'explicitly call it\n' + ' to ensure proper deletion of the base class part of the ' + 'instance.\n' + ' Note that it is possible (though not recommended!) for ' + 'the\n' + ' "__del__()" method to postpone destruction of the ' + 'instance by\n' + ' creating a new reference to it. It may then be called ' + 'at a later\n' + ' time when this new reference is deleted. It is not ' + 'guaranteed that\n' + ' "__del__()" methods are called for objects that still ' + 'exist when\n' + ' the interpreter exits.\n' + '\n' + ' Note: "del x" doesn\'t directly call "x.__del__()" --- ' + 'the former\n' + ' decrements the reference count for "x" by one, and ' + 'the latter is\n' + ' only called when "x"\'s reference count reaches ' + 'zero. Some common\n' + ' situations that may prevent the reference count of an ' + 'object from\n' + ' going to zero include: circular references between ' + 'objects (e.g.,\n' + ' a doubly-linked list or a tree data structure with ' + 'parent and\n' + ' child pointers); a reference to the object on the ' + 'stack frame of\n' + ' a function that caught an exception (the traceback ' + 'stored in\n' + ' "sys.exc_info()[2]" keeps the stack frame alive); or ' + 'a reference\n' + ' to the object on the stack frame that raised an ' + 'unhandled\n' + ' exception in interactive mode (the traceback stored ' + 'in\n' + ' "sys.last_traceback" keeps the stack frame alive). ' + 'The first\n' + ' situation can only be remedied by explicitly breaking ' + 'the cycles;\n' + ' the latter two situations can be resolved by storing ' + '"None" in\n' + ' "sys.last_traceback". Circular references which are ' + 'garbage are\n' + ' detected and cleaned up when the cyclic garbage ' + 'collector is\n' + " enabled (it's on by default). Refer to the " + 'documentation for the\n' + ' "gc" module for more information about this topic.\n' + '\n' + ' Warning: Due to the precarious circumstances under ' + 'which\n' + ' "__del__()" methods are invoked, exceptions that ' + 'occur during\n' + ' their execution are ignored, and a warning is printed ' + 'to\n' + ' "sys.stderr" instead. Also, when "__del__()" is ' + 'invoked in\n' + ' response to a module being deleted (e.g., when ' + 'execution of the\n' + ' program is done), other globals referenced by the ' + '"__del__()"\n' + ' method may already have been deleted or in the ' + 'process of being\n' + ' torn down (e.g. the import machinery shutting down). ' + 'For this\n' + ' reason, "__del__()" methods should do the absolute ' + 'minimum needed\n' + ' to maintain external invariants. Starting with ' + 'version 1.5,\n' + ' Python guarantees that globals whose name begins with ' + 'a single\n' + ' underscore are deleted from their module before other ' + 'globals are\n' + ' deleted; if no other references to such globals ' + 'exist, this may\n' + ' help in assuring that imported modules are still ' + 'available at the\n' + ' time when the "__del__()" method is called.\n' + '\n' + 'object.__repr__(self)\n' + '\n' + ' Called by the "repr()" built-in function to compute the ' + '"official"\n' + ' string representation of an object. If at all ' + 'possible, this\n' + ' should look like a valid Python expression that could ' + 'be used to\n' + ' recreate an object with the same value (given an ' + 'appropriate\n' + ' environment). If this is not possible, a string of the ' + 'form\n' + ' "<...some useful description...>" should be returned. ' + 'The return\n' + ' value must be a string object. If a class defines ' + '"__repr__()" but\n' + ' not "__str__()", then "__repr__()" is also used when an ' + '"informal"\n' + ' string representation of instances of that class is ' + 'required.\n' + '\n' + ' This is typically used for debugging, so it is ' + 'important that the\n' + ' representation is information-rich and unambiguous.\n' + '\n' + 'object.__str__(self)\n' + '\n' + ' Called by "str(object)" and the built-in functions ' + '"format()" and\n' + ' "print()" to compute the "informal" or nicely printable ' + 'string\n' + ' representation of an object. The return value must be ' + 'a *string*\n' + ' object.\n' + '\n' + ' This method differs from "object.__repr__()" in that ' + 'there is no\n' + ' expectation that "__str__()" return a valid Python ' + 'expression: a\n' + ' more convenient or concise representation can be used.\n' + '\n' + ' The default implementation defined by the built-in type ' + '"object"\n' + ' calls "object.__repr__()".\n' + '\n' + 'object.__bytes__(self)\n' + '\n' + ' Called by "bytes()" to compute a byte-string ' + 'representation of an\n' + ' object. This should return a "bytes" object.\n' + '\n' + 'object.__format__(self, format_spec)\n' + '\n' + ' Called by the "format()" built-in function (and by ' + 'extension, the\n' + ' "str.format()" method of class "str") to produce a ' + '"formatted"\n' + ' string representation of an object. The "format_spec" ' + 'argument is a\n' + ' string that contains a description of the formatting ' + 'options\n' + ' desired. The interpretation of the "format_spec" ' + 'argument is up to\n' + ' the type implementing "__format__()", however most ' + 'classes will\n' + ' either delegate formatting to one of the built-in ' + 'types, or use a\n' + ' similar formatting option syntax.\n' + '\n' + ' See *Format Specification Mini-Language* for a ' + 'description of the\n' + ' standard formatting syntax.\n' + '\n' + ' The return value must be a string object.\n' + '\n' + ' Changed in version 3.4: The __format__ method of ' + '"object" itself\n' + ' raises a "TypeError" if passed any non-empty string.\n' + '\n' + 'object.__lt__(self, other)\n' + 'object.__le__(self, other)\n' + 'object.__eq__(self, other)\n' + 'object.__ne__(self, other)\n' + 'object.__gt__(self, other)\n' + 'object.__ge__(self, other)\n' + '\n' + ' These are the so-called "rich comparison" methods. The\n' + ' correspondence between operator symbols and method ' + 'names is as\n' + ' follows: "xy" calls\n' + ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' + '\n' + ' A rich comparison method may return the singleton ' + '"NotImplemented"\n' + ' if it does not implement the operation for a given pair ' + 'of\n' + ' arguments. By convention, "False" and "True" are ' + 'returned for a\n' + ' successful comparison. However, these methods can ' + 'return any value,\n' + ' so if the comparison operator is used in a Boolean ' + 'context (e.g.,\n' + ' in the condition of an "if" statement), Python will ' + 'call "bool()"\n' + ' on the value to determine if the result is true or ' + 'false.\n' + '\n' + ' There are no implied relationships among the comparison ' + 'operators.\n' + ' The truth of "x==y" does not imply that "x!=y" is ' + 'false.\n' + ' Accordingly, when defining "__eq__()", one should also ' + 'define\n' + ' "__ne__()" so that the operators will behave as ' + 'expected. See the\n' + ' paragraph on "__hash__()" for some important notes on ' + 'creating\n' + ' *hashable* objects which support custom comparison ' + 'operations and\n' + ' are usable as dictionary keys.\n' + '\n' + ' There are no swapped-argument versions of these methods ' + '(to be used\n' + ' when the left argument does not support the operation ' + 'but the right\n' + ' argument does); rather, "__lt__()" and "__gt__()" are ' + "each other's\n" + ' reflection, "__le__()" and "__ge__()" are each other\'s ' + 'reflection,\n' + ' and "__eq__()" and "__ne__()" are their own ' + 'reflection.\n' + '\n' + ' Arguments to rich comparison methods are never ' + 'coerced.\n' + '\n' + ' To automatically generate ordering operations from a ' + 'single root\n' + ' operation, see "functools.total_ordering()".\n' + '\n' + 'object.__hash__(self)\n' + '\n' + ' Called by built-in function "hash()" and for operations ' + 'on members\n' + ' of hashed collections including "set", "frozenset", and ' + '"dict".\n' + ' "__hash__()" should return an integer. The only ' + 'required property\n' + ' is that objects which compare equal have the same hash ' + 'value; it is\n' + ' advised to somehow mix together (e.g. using exclusive ' + 'or) the hash\n' + ' values for the components of the object that also play ' + 'a part in\n' + ' comparison of objects.\n' + '\n' + ' Note: "hash()" truncates the value returned from an ' + "object's\n" + ' custom "__hash__()" method to the size of a ' + '"Py_ssize_t". This\n' + ' is typically 8 bytes on 64-bit builds and 4 bytes on ' + '32-bit\n' + ' builds. If an object\'s "__hash__()" must ' + 'interoperate on builds\n' + ' of different bit sizes, be sure to check the width on ' + 'all\n' + ' supported builds. An easy way to do this is with ' + '"python -c\n' + ' "import sys; print(sys.hash_info.width)""\n' + '\n' + ' If a class does not define an "__eq__()" method it ' + 'should not\n' + ' define a "__hash__()" operation either; if it defines ' + '"__eq__()"\n' + ' but not "__hash__()", its instances will not be usable ' + 'as items in\n' + ' hashable collections. If a class defines mutable ' + 'objects and\n' + ' implements an "__eq__()" method, it should not ' + 'implement\n' + ' "__hash__()", since the implementation of hashable ' + 'collections\n' + " requires that a key's hash value is immutable (if the " + "object's hash\n" + ' value changes, it will be in the wrong hash bucket).\n' + '\n' + ' User-defined classes have "__eq__()" and "__hash__()" ' + 'methods by\n' + ' default; with them, all objects compare unequal (except ' + 'with\n' + ' themselves) and "x.__hash__()" returns an appropriate ' + 'value such\n' + ' that "x == y" implies both that "x is y" and "hash(x) ' + '== hash(y)".\n' + '\n' + ' A class that overrides "__eq__()" and does not define ' + '"__hash__()"\n' + ' will have its "__hash__()" implicitly set to "None". ' + 'When the\n' + ' "__hash__()" method of a class is "None", instances of ' + 'the class\n' + ' will raise an appropriate "TypeError" when a program ' + 'attempts to\n' + ' retrieve their hash value, and will also be correctly ' + 'identified as\n' + ' unhashable when checking "isinstance(obj, ' + 'collections.Hashable").\n' + '\n' + ' If a class that overrides "__eq__()" needs to retain ' + 'the\n' + ' implementation of "__hash__()" from a parent class, the ' + 'interpreter\n' + ' must be told this explicitly by setting "__hash__ =\n' + ' .__hash__".\n' + '\n' + ' If a class that does not override "__eq__()" wishes to ' + 'suppress\n' + ' hash support, it should include "__hash__ = None" in ' + 'the class\n' + ' definition. A class which defines its own "__hash__()" ' + 'that\n' + ' explicitly raises a "TypeError" would be incorrectly ' + 'identified as\n' + ' hashable by an "isinstance(obj, collections.Hashable)" ' + 'call.\n' + '\n' + ' Note: By default, the "__hash__()" values of str, bytes ' + 'and\n' + ' datetime objects are "salted" with an unpredictable ' + 'random value.\n' + ' Although they remain constant within an individual ' + 'Python\n' + ' process, they are not predictable between repeated ' + 'invocations of\n' + ' Python.This is intended to provide protection against ' + 'a denial-\n' + ' of-service caused by carefully-chosen inputs that ' + 'exploit the\n' + ' worst case performance of a dict insertion, O(n^2) ' + 'complexity.\n' + ' See ' + 'http://www.ocert.org/advisories/ocert-2011-003.html for\n' + ' details.Changing hash values affects the iteration ' + 'order of\n' + ' dicts, sets and other mappings. Python has never ' + 'made guarantees\n' + ' about this ordering (and it typically varies between ' + '32-bit and\n' + ' 64-bit builds).See also "PYTHONHASHSEED".\n' + '\n' + ' Changed in version 3.3: Hash randomization is enabled ' + 'by default.\n' + '\n' + 'object.__bool__(self)\n' + '\n' + ' Called to implement truth value testing and the ' + 'built-in operation\n' + ' "bool()"; should return "False" or "True". When this ' + 'method is not\n' + ' defined, "__len__()" is called, if it is defined, and ' + 'the object is\n' + ' considered true if its result is nonzero. If a class ' + 'defines\n' + ' neither "__len__()" nor "__bool__()", all its instances ' + 'are\n' + ' considered true.\n' + '\n' + '\n' + 'Customizing attribute access\n' + '============================\n' + '\n' + 'The following methods can be defined to customize the ' + 'meaning of\n' + 'attribute access (use of, assignment to, or deletion of ' + '"x.name") for\n' + 'class instances.\n' + '\n' + 'object.__getattr__(self, name)\n' + '\n' + ' Called when an attribute lookup has not found the ' + 'attribute in the\n' + ' usual places (i.e. it is not an instance attribute nor ' + 'is it found\n' + ' in the class tree for "self"). "name" is the attribute ' + 'name. This\n' + ' method should return the (computed) attribute value or ' + 'raise an\n' + ' "AttributeError" exception.\n' + '\n' + ' Note that if the attribute is found through the normal ' + 'mechanism,\n' + ' "__getattr__()" is not called. (This is an intentional ' + 'asymmetry\n' + ' between "__getattr__()" and "__setattr__()".) This is ' + 'done both for\n' + ' efficiency reasons and because otherwise ' + '"__getattr__()" would have\n' + ' no way to access other attributes of the instance. ' + 'Note that at\n' + ' least for instance variables, you can fake total ' + 'control by not\n' + ' inserting any values in the instance attribute ' + 'dictionary (but\n' + ' instead inserting them in another object). See the\n' + ' "__getattribute__()" method below for a way to actually ' + 'get total\n' + ' control over attribute access.\n' + '\n' + 'object.__getattribute__(self, name)\n' + '\n' + ' Called unconditionally to implement attribute accesses ' + 'for\n' + ' instances of the class. If the class also defines ' + '"__getattr__()",\n' + ' the latter will not be called unless ' + '"__getattribute__()" either\n' + ' calls it explicitly or raises an "AttributeError". This ' + 'method\n' + ' should return the (computed) attribute value or raise ' + 'an\n' + ' "AttributeError" exception. In order to avoid infinite ' + 'recursion in\n' + ' this method, its implementation should always call the ' + 'base class\n' + ' method with the same name to access any attributes it ' + 'needs, for\n' + ' example, "object.__getattribute__(self, name)".\n' + '\n' + ' Note: This method may still be bypassed when looking up ' + 'special\n' + ' methods as the result of implicit invocation via ' + 'language syntax\n' + ' or built-in functions. See *Special method lookup*.\n' + '\n' + 'object.__setattr__(self, name, value)\n' + '\n' + ' Called when an attribute assignment is attempted. This ' + 'is called\n' + ' instead of the normal mechanism (i.e. store the value ' + 'in the\n' + ' instance dictionary). *name* is the attribute name, ' + '*value* is the\n' + ' value to be assigned to it.\n' + '\n' + ' If "__setattr__()" wants to assign to an instance ' + 'attribute, it\n' + ' should call the base class method with the same name, ' + 'for example,\n' + ' "object.__setattr__(self, name, value)".\n' + '\n' + 'object.__delattr__(self, name)\n' + '\n' + ' Like "__setattr__()" but for attribute deletion instead ' + 'of\n' + ' assignment. This should only be implemented if "del ' + 'obj.name" is\n' + ' meaningful for the object.\n' + '\n' + 'object.__dir__(self)\n' + '\n' + ' Called when "dir()" is called on the object. A sequence ' + 'must be\n' + ' returned. "dir()" converts the returned sequence to a ' + 'list and\n' + ' sorts it.\n' + '\n' + '\n' + 'Implementing Descriptors\n' + '------------------------\n' + '\n' + 'The following methods only apply when an instance of the ' + 'class\n' + 'containing the method (a so-called *descriptor* class) ' + 'appears in an\n' + '*owner* class (the descriptor must be in either the ' + "owner's class\n" + 'dictionary or in the class dictionary for one of its ' + 'parents). In the\n' + 'examples below, "the attribute" refers to the attribute ' + 'whose name is\n' + 'the key of the property in the owner class\' "__dict__".\n' + '\n' + 'object.__get__(self, instance, owner)\n' + '\n' + ' Called to get the attribute of the owner class (class ' + 'attribute\n' + ' access) or of an instance of that class (instance ' + 'attribute\n' + ' access). *owner* is always the owner class, while ' + '*instance* is the\n' + ' instance that the attribute was accessed through, or ' + '"None" when\n' + ' the attribute is accessed through the *owner*. This ' + 'method should\n' + ' return the (computed) attribute value or raise an ' + '"AttributeError"\n' + ' exception.\n' + '\n' + 'object.__set__(self, instance, value)\n' + '\n' + ' Called to set the attribute on an instance *instance* ' + 'of the owner\n' + ' class to a new value, *value*.\n' + '\n' + 'object.__delete__(self, instance)\n' + '\n' + ' Called to delete the attribute on an instance ' + '*instance* of the\n' + ' owner class.\n' + '\n' + 'The attribute "__objclass__" is interpreted by the ' + '"inspect" module as\n' + 'specifying the class where this object was defined ' + '(setting this\n' + 'appropriately can assist in runtime introspection of ' + 'dynamic class\n' + 'attributes). For callables, it may indicate that an ' + 'instance of the\n' + 'given type (or a subclass) is expected or required as the ' + 'first\n' + 'positional argument (for example, CPython sets this ' + 'attribute for\n' + 'unbound methods that are implemented in C).\n' + '\n' + '\n' + 'Invoking Descriptors\n' + '--------------------\n' + '\n' + 'In general, a descriptor is an object attribute with ' + '"binding\n' + 'behavior", one whose attribute access has been overridden ' + 'by methods\n' + 'in the descriptor protocol: "__get__()", "__set__()", ' + 'and\n' + '"__delete__()". If any of those methods are defined for an ' + 'object, it\n' + 'is said to be a descriptor.\n' + '\n' + 'The default behavior for attribute access is to get, set, ' + 'or delete\n' + "the attribute from an object's dictionary. For instance, " + '"a.x" has a\n' + 'lookup chain starting with "a.__dict__[\'x\']", then\n' + '"type(a).__dict__[\'x\']", and continuing through the base ' + 'classes of\n' + '"type(a)" excluding metaclasses.\n' + '\n' + 'However, if the looked-up value is an object defining one ' + 'of the\n' + 'descriptor methods, then Python may override the default ' + 'behavior and\n' + 'invoke the descriptor method instead. Where this occurs ' + 'in the\n' + 'precedence chain depends on which descriptor methods were ' + 'defined and\n' + 'how they were called.\n' + '\n' + 'The starting point for descriptor invocation is a binding, ' + '"a.x". How\n' + 'the arguments are assembled depends on "a":\n' + '\n' + 'Direct Call\n' + ' The simplest and least common call is when user code ' + 'directly\n' + ' invokes a descriptor method: "x.__get__(a)".\n' + '\n' + 'Instance Binding\n' + ' If binding to an object instance, "a.x" is transformed ' + 'into the\n' + ' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n' + '\n' + 'Class Binding\n' + ' If binding to a class, "A.x" is transformed into the ' + 'call:\n' + ' "A.__dict__[\'x\'].__get__(None, A)".\n' + '\n' + 'Super Binding\n' + ' If "a" is an instance of "super", then the binding ' + '"super(B,\n' + ' obj).m()" searches "obj.__class__.__mro__" for the base ' + 'class "A"\n' + ' immediately preceding "B" and then invokes the ' + 'descriptor with the\n' + ' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n' + '\n' + 'For instance bindings, the precedence of descriptor ' + 'invocation depends\n' + 'on the which descriptor methods are defined. A descriptor ' + 'can define\n' + 'any combination of "__get__()", "__set__()" and ' + '"__delete__()". If it\n' + 'does not define "__get__()", then accessing the attribute ' + 'will return\n' + 'the descriptor object itself unless there is a value in ' + "the object's\n" + 'instance dictionary. If the descriptor defines ' + '"__set__()" and/or\n' + '"__delete__()", it is a data descriptor; if it defines ' + 'neither, it is\n' + 'a non-data descriptor. Normally, data descriptors define ' + 'both\n' + '"__get__()" and "__set__()", while non-data descriptors ' + 'have just the\n' + '"__get__()" method. Data descriptors with "__set__()" and ' + '"__get__()"\n' + 'defined always override a redefinition in an instance ' + 'dictionary. In\n' + 'contrast, non-data descriptors can be overridden by ' + 'instances.\n' + '\n' + 'Python methods (including "staticmethod()" and ' + '"classmethod()") are\n' + 'implemented as non-data descriptors. Accordingly, ' + 'instances can\n' + 'redefine and override methods. This allows individual ' + 'instances to\n' + 'acquire behaviors that differ from other instances of the ' + 'same class.\n' + '\n' + 'The "property()" function is implemented as a data ' + 'descriptor.\n' + 'Accordingly, instances cannot override the behavior of a ' + 'property.\n' + '\n' + '\n' + '__slots__\n' + '---------\n' + '\n' + 'By default, instances of classes have a dictionary for ' + 'attribute\n' + 'storage. This wastes space for objects having very few ' + 'instance\n' + 'variables. The space consumption can become acute when ' + 'creating large\n' + 'numbers of instances.\n' + '\n' + 'The default can be overridden by defining *__slots__* in a ' + 'class\n' + 'definition. The *__slots__* declaration takes a sequence ' + 'of instance\n' + 'variables and reserves just enough space in each instance ' + 'to hold a\n' + 'value for each variable. Space is saved because ' + '*__dict__* is not\n' + 'created for each instance.\n' + '\n' + 'object.__slots__\n' + '\n' + ' This class variable can be assigned a string, iterable, ' + 'or sequence\n' + ' of strings with variable names used by instances. If ' + 'defined in a\n' + ' class, *__slots__* reserves space for the declared ' + 'variables and\n' + ' prevents the automatic creation of *__dict__* and ' + '*__weakref__* for\n' + ' each instance.\n' + '\n' + '\n' + 'Notes on using *__slots__*\n' + '~~~~~~~~~~~~~~~~~~~~~~~~~~\n' + '\n' + '* When inheriting from a class without *__slots__*, the ' + '*__dict__*\n' + ' attribute of that class will always be accessible, so a ' + '*__slots__*\n' + ' definition in the subclass is meaningless.\n' + '\n' + '* Without a *__dict__* variable, instances cannot be ' + 'assigned new\n' + ' variables not listed in the *__slots__* definition. ' + 'Attempts to\n' + ' assign to an unlisted variable name raises ' + '"AttributeError". If\n' + ' dynamic assignment of new variables is desired, then ' + 'add\n' + ' "\'__dict__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* Without a *__weakref__* variable for each instance, ' + 'classes\n' + ' defining *__slots__* do not support weak references to ' + 'its\n' + ' instances. If weak reference support is needed, then ' + 'add\n' + ' "\'__weakref__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* *__slots__* are implemented at the class level by ' + 'creating\n' + ' descriptors (*Implementing Descriptors*) for each ' + 'variable name. As\n' + ' a result, class attributes cannot be used to set default ' + 'values for\n' + ' instance variables defined by *__slots__*; otherwise, ' + 'the class\n' + ' attribute would overwrite the descriptor assignment.\n' + '\n' + '* The action of a *__slots__* declaration is limited to ' + 'the class\n' + ' where it is defined. As a result, subclasses will have ' + 'a *__dict__*\n' + ' unless they also define *__slots__* (which must only ' + 'contain names\n' + ' of any *additional* slots).\n' + '\n' + '* If a class defines a slot also defined in a base class, ' + 'the\n' + ' instance variable defined by the base class slot is ' + 'inaccessible\n' + ' (except by retrieving its descriptor directly from the ' + 'base class).\n' + ' This renders the meaning of the program undefined. In ' + 'the future, a\n' + ' check may be added to prevent this.\n' + '\n' + '* Nonempty *__slots__* does not work for classes derived ' + 'from\n' + ' "variable-length" built-in types such as "int", "bytes" ' + 'and "tuple".\n' + '\n' + '* Any non-string iterable may be assigned to *__slots__*. ' + 'Mappings\n' + ' may also be used; however, in the future, special ' + 'meaning may be\n' + ' assigned to the values corresponding to each key.\n' + '\n' + '* *__class__* assignment works only if both classes have ' + 'the same\n' + ' *__slots__*.\n' + '\n' + '\n' + 'Customizing class creation\n' + '==========================\n' + '\n' + 'By default, classes are constructed using "type()". The ' + 'class body is\n' + 'executed in a new namespace and the class name is bound ' + 'locally to the\n' + 'result of "type(name, bases, namespace)".\n' + '\n' + 'The class creation process can be customised by passing ' + 'the\n' + '"metaclass" keyword argument in the class definition line, ' + 'or by\n' + 'inheriting from an existing class that included such an ' + 'argument. In\n' + 'the following example, both "MyClass" and "MySubclass" are ' + 'instances\n' + 'of "Meta":\n' + '\n' + ' class Meta(type):\n' + ' pass\n' + '\n' + ' class MyClass(metaclass=Meta):\n' + ' pass\n' + '\n' + ' class MySubclass(MyClass):\n' + ' pass\n' + '\n' + 'Any other keyword arguments that are specified in the ' + 'class definition\n' + 'are passed through to all metaclass operations described ' + 'below.\n' + '\n' + 'When a class definition is executed, the following steps ' + 'occur:\n' + '\n' + '* the appropriate metaclass is determined\n' + '\n' + '* the class namespace is prepared\n' + '\n' + '* the class body is executed\n' + '\n' + '* the class object is created\n' + '\n' + '\n' + 'Determining the appropriate metaclass\n' + '-------------------------------------\n' + '\n' + 'The appropriate metaclass for a class definition is ' + 'determined as\n' + 'follows:\n' + '\n' + '* if no bases and no explicit metaclass are given, then ' + '"type()" is\n' + ' used\n' + '\n' + '* if an explicit metaclass is given and it is *not* an ' + 'instance of\n' + ' "type()", then it is used directly as the metaclass\n' + '\n' + '* if an instance of "type()" is given as the explicit ' + 'metaclass, or\n' + ' bases are defined, then the most derived metaclass is ' + 'used\n' + '\n' + 'The most derived metaclass is selected from the explicitly ' + 'specified\n' + 'metaclass (if any) and the metaclasses (i.e. "type(cls)") ' + 'of all\n' + 'specified base classes. The most derived metaclass is one ' + 'which is a\n' + 'subtype of *all* of these candidate metaclasses. If none ' + 'of the\n' + 'candidate metaclasses meets that criterion, then the class ' + 'definition\n' + 'will fail with "TypeError".\n' + '\n' + '\n' + 'Preparing the class namespace\n' + '-----------------------------\n' + '\n' + 'Once the appropriate metaclass has been identified, then ' + 'the class\n' + 'namespace is prepared. If the metaclass has a ' + '"__prepare__" attribute,\n' + 'it is called as "namespace = metaclass.__prepare__(name, ' + 'bases,\n' + '**kwds)" (where the additional keyword arguments, if any, ' + 'come from\n' + 'the class definition).\n' + '\n' + 'If the metaclass has no "__prepare__" attribute, then the ' + 'class\n' + 'namespace is initialised as an empty "dict()" instance.\n' + '\n' + 'See also: **PEP 3115** - Metaclasses in Python 3000\n' + '\n' + ' Introduced the "__prepare__" namespace hook\n' + '\n' + '\n' + 'Executing the class body\n' + '------------------------\n' + '\n' + 'The class body is executed (approximately) as "exec(body, ' + 'globals(),\n' + 'namespace)". The key difference from a normal call to ' + '"exec()" is that\n' + 'lexical scoping allows the class body (including any ' + 'methods) to\n' + 'reference names from the current and outer scopes when the ' + 'class\n' + 'definition occurs inside a function.\n' + '\n' + 'However, even when the class definition occurs inside the ' + 'function,\n' + 'methods defined inside the class still cannot see names ' + 'defined at the\n' + 'class scope. Class variables must be accessed through the ' + 'first\n' + 'parameter of instance or class methods, and cannot be ' + 'accessed at all\n' + 'from static methods.\n' + '\n' + '\n' + 'Creating the class object\n' + '-------------------------\n' + '\n' + 'Once the class namespace has been populated by executing ' + 'the class\n' + 'body, the class object is created by calling ' + '"metaclass(name, bases,\n' + 'namespace, **kwds)" (the additional keywords passed here ' + 'are the same\n' + 'as those passed to "__prepare__").\n' + '\n' + 'This class object is the one that will be referenced by ' + 'the zero-\n' + 'argument form of "super()". "__class__" is an implicit ' + 'closure\n' + 'reference created by the compiler if any methods in a ' + 'class body refer\n' + 'to either "__class__" or "super". This allows the zero ' + 'argument form\n' + 'of "super()" to correctly identify the class being defined ' + 'based on\n' + 'lexical scoping, while the class or instance that was used ' + 'to make the\n' + 'current call is identified based on the first argument ' + 'passed to the\n' + 'method.\n' + '\n' + 'After the class object is created, it is passed to the ' + 'class\n' + 'decorators included in the class definition (if any) and ' + 'the resulting\n' + 'object is bound in the local namespace as the defined ' + 'class.\n' + '\n' + 'See also: **PEP 3135** - New super\n' + '\n' + ' Describes the implicit "__class__" closure reference\n' + '\n' + '\n' + 'Metaclass example\n' + '-----------------\n' + '\n' + 'The potential uses for metaclasses are boundless. Some ' + 'ideas that have\n' + 'been explored include logging, interface checking, ' + 'automatic\n' + 'delegation, automatic property creation, proxies, ' + 'frameworks, and\n' + 'automatic resource locking/synchronization.\n' + '\n' + 'Here is an example of a metaclass that uses an\n' + '"collections.OrderedDict" to remember the order that class ' + 'variables\n' + 'are defined:\n' + '\n' + ' class OrderedClass(type):\n' + '\n' + ' @classmethod\n' + ' def __prepare__(metacls, name, bases, **kwds):\n' + ' return collections.OrderedDict()\n' + '\n' + ' def __new__(cls, name, bases, namespace, **kwds):\n' + ' result = type.__new__(cls, name, bases, ' + 'dict(namespace))\n' + ' result.members = tuple(namespace)\n' + ' return result\n' + '\n' + ' class A(metaclass=OrderedClass):\n' + ' def one(self): pass\n' + ' def two(self): pass\n' + ' def three(self): pass\n' + ' def four(self): pass\n' + '\n' + ' >>> A.members\n' + " ('__module__', 'one', 'two', 'three', 'four')\n" + '\n' + 'When the class definition for *A* gets executed, the ' + 'process begins\n' + 'with calling the metaclass\'s "__prepare__()" method which ' + 'returns an\n' + 'empty "collections.OrderedDict". That mapping records the ' + 'methods and\n' + 'attributes of *A* as they are defined within the body of ' + 'the class\n' + 'statement. Once those definitions are executed, the ' + 'ordered dictionary\n' + 'is fully populated and the metaclass\'s "__new__()" method ' + 'gets\n' + 'invoked. That method builds the new type and it saves the ' + 'ordered\n' + 'dictionary keys in an attribute called "members".\n' + '\n' + '\n' + 'Customizing instance and subclass checks\n' + '========================================\n' + '\n' + 'The following methods are used to override the default ' + 'behavior of the\n' + '"isinstance()" and "issubclass()" built-in functions.\n' + '\n' + 'In particular, the metaclass "abc.ABCMeta" implements ' + 'these methods in\n' + 'order to allow the addition of Abstract Base Classes ' + '(ABCs) as\n' + '"virtual base classes" to any class or type (including ' + 'built-in\n' + 'types), including other ABCs.\n' + '\n' + 'class.__instancecheck__(self, instance)\n' + '\n' + ' Return true if *instance* should be considered a ' + '(direct or\n' + ' indirect) instance of *class*. If defined, called to ' + 'implement\n' + ' "isinstance(instance, class)".\n' + '\n' + 'class.__subclasscheck__(self, subclass)\n' + '\n' + ' Return true if *subclass* should be considered a ' + '(direct or\n' + ' indirect) subclass of *class*. If defined, called to ' + 'implement\n' + ' "issubclass(subclass, class)".\n' + '\n' + 'Note that these methods are looked up on the type ' + '(metaclass) of a\n' + 'class. They cannot be defined as class methods in the ' + 'actual class.\n' + 'This is consistent with the lookup of special methods that ' + 'are called\n' + 'on instances, only in this case the instance is itself a ' + 'class.\n' + '\n' + 'See also: **PEP 3119** - Introducing Abstract Base ' + 'Classes\n' + '\n' + ' Includes the specification for customizing ' + '"isinstance()" and\n' + ' "issubclass()" behavior through "__instancecheck__()" ' + 'and\n' + ' "__subclasscheck__()", with motivation for this ' + 'functionality in\n' + ' the context of adding Abstract Base Classes (see the ' + '"abc"\n' + ' module) to the language.\n' + '\n' + '\n' + 'Emulating callable objects\n' + '==========================\n' + '\n' + 'object.__call__(self[, args...])\n' + '\n' + ' Called when the instance is "called" as a function; if ' + 'this method\n' + ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n' + ' "x.__call__(arg1, arg2, ...)".\n' + '\n' + '\n' + 'Emulating container types\n' + '=========================\n' + '\n' + 'The following methods can be defined to implement ' + 'container objects.\n' + 'Containers usually are sequences (such as lists or tuples) ' + 'or mappings\n' + '(like dictionaries), but can represent other containers as ' + 'well. The\n' + 'first set of methods is used either to emulate a sequence ' + 'or to\n' + 'emulate a mapping; the difference is that for a sequence, ' + 'the\n' + 'allowable keys should be the integers *k* for which "0 <= ' + 'k < N" where\n' + '*N* is the length of the sequence, or slice objects, which ' + 'define a\n' + 'range of items. It is also recommended that mappings ' + 'provide the\n' + 'methods "keys()", "values()", "items()", "get()", ' + '"clear()",\n' + '"setdefault()", "pop()", "popitem()", "copy()", and ' + '"update()"\n' + "behaving similar to those for Python's standard dictionary " + 'objects.\n' + 'The "collections" module provides a "MutableMapping" ' + 'abstract base\n' + 'class to help create those methods from a base set of ' + '"__getitem__()",\n' + '"__setitem__()", "__delitem__()", and "keys()". Mutable ' + 'sequences\n' + 'should provide methods "append()", "count()", "index()", ' + '"extend()",\n' + '"insert()", "pop()", "remove()", "reverse()" and "sort()", ' + 'like Python\n' + 'standard list objects. Finally, sequence types should ' + 'implement\n' + 'addition (meaning concatenation) and multiplication ' + '(meaning\n' + 'repetition) by defining the methods "__add__()", ' + '"__radd__()",\n' + '"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" ' + 'described\n' + 'below; they should not define other numerical operators. ' + 'It is\n' + 'recommended that both mappings and sequences implement ' + 'the\n' + '"__contains__()" method to allow efficient use of the "in" ' + 'operator;\n' + 'for mappings, "in" should search the mapping\'s keys; for ' + 'sequences, it\n' + 'should search through the values. It is further ' + 'recommended that both\n' + 'mappings and sequences implement the "__iter__()" method ' + 'to allow\n' + 'efficient iteration through the container; for mappings, ' + '"__iter__()"\n' + 'should be the same as "keys()"; for sequences, it should ' + 'iterate\n' + 'through the values.\n' + '\n' + 'object.__len__(self)\n' + '\n' + ' Called to implement the built-in function "len()". ' + 'Should return\n' + ' the length of the object, an integer ">=" 0. Also, an ' + 'object that\n' + ' doesn\'t define a "__bool__()" method and whose ' + '"__len__()" method\n' + ' returns zero is considered to be false in a Boolean ' + 'context.\n' + '\n' + 'object.__length_hint__(self)\n' + '\n' + ' Called to implement "operator.length_hint()". Should ' + 'return an\n' + ' estimated length for the object (which may be greater ' + 'or less than\n' + ' the actual length). The length must be an integer ">=" ' + '0. This\n' + ' method is purely an optimization and is never required ' + 'for\n' + ' correctness.\n' + '\n' + ' New in version 3.4.\n' + '\n' + 'Note: Slicing is done exclusively with the following three ' + 'methods.\n' + ' A call like\n' + '\n' + ' a[1:2] = b\n' + '\n' + ' is translated to\n' + '\n' + ' a[slice(1, 2, None)] = b\n' + '\n' + ' and so forth. Missing slice items are always filled in ' + 'with "None".\n' + '\n' + 'object.__getitem__(self, key)\n' + '\n' + ' Called to implement evaluation of "self[key]". For ' + 'sequence types,\n' + ' the accepted keys should be integers and slice ' + 'objects. Note that\n' + ' the special interpretation of negative indexes (if the ' + 'class wishes\n' + ' to emulate a sequence type) is up to the ' + '"__getitem__()" method. If\n' + ' *key* is of an inappropriate type, "TypeError" may be ' + 'raised; if of\n' + ' a value outside the set of indexes for the sequence ' + '(after any\n' + ' special interpretation of negative values), ' + '"IndexError" should be\n' + ' raised. For mapping types, if *key* is missing (not in ' + 'the\n' + ' container), "KeyError" should be raised.\n' + '\n' + ' Note: "for" loops expect that an "IndexError" will be ' + 'raised for\n' + ' illegal indexes to allow proper detection of the end ' + 'of the\n' + ' sequence.\n' + '\n' + 'object.__setitem__(self, key, value)\n' + '\n' + ' Called to implement assignment to "self[key]". Same ' + 'note as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support changes to the values for keys, or ' + 'if new keys\n' + ' can be added, or for sequences if elements can be ' + 'replaced. The\n' + ' same exceptions should be raised for improper *key* ' + 'values as for\n' + ' the "__getitem__()" method.\n' + '\n' + 'object.__delitem__(self, key)\n' + '\n' + ' Called to implement deletion of "self[key]". Same note ' + 'as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support removal of keys, or for sequences ' + 'if elements\n' + ' can be removed from the sequence. The same exceptions ' + 'should be\n' + ' raised for improper *key* values as for the ' + '"__getitem__()" method.\n' + '\n' + 'object.__iter__(self)\n' + '\n' + ' This method is called when an iterator is required for ' + 'a container.\n' + ' This method should return a new iterator object that ' + 'can iterate\n' + ' over all the objects in the container. For mappings, ' + 'it should\n' + ' iterate over the keys of the container, and should also ' + 'be made\n' + ' available as the method "keys()".\n' + '\n' + ' Iterator objects also need to implement this method; ' + 'they are\n' + ' required to return themselves. For more information on ' + 'iterator\n' + ' objects, see *Iterator Types*.\n' + '\n' + 'object.__reversed__(self)\n' + '\n' + ' Called (if present) by the "reversed()" built-in to ' + 'implement\n' + ' reverse iteration. It should return a new iterator ' + 'object that\n' + ' iterates over all the objects in the container in ' + 'reverse order.\n' + '\n' + ' If the "__reversed__()" method is not provided, the ' + '"reversed()"\n' + ' built-in will fall back to using the sequence protocol ' + '("__len__()"\n' + ' and "__getitem__()"). Objects that support the ' + 'sequence protocol\n' + ' should only provide "__reversed__()" if they can ' + 'provide an\n' + ' implementation that is more efficient than the one ' + 'provided by\n' + ' "reversed()".\n' + '\n' + 'The membership test operators ("in" and "not in") are ' + 'normally\n' + 'implemented as an iteration through a sequence. However, ' + 'container\n' + 'objects can supply the following special method with a ' + 'more efficient\n' + 'implementation, which also does not require the object be ' + 'a sequence.\n' + '\n' + 'object.__contains__(self, item)\n' + '\n' + ' Called to implement membership test operators. Should ' + 'return true\n' + ' if *item* is in *self*, false otherwise. For mapping ' + 'objects, this\n' + ' should consider the keys of the mapping rather than the ' + 'values or\n' + ' the key-item pairs.\n' + '\n' + ' For objects that don\'t define "__contains__()", the ' + 'membership test\n' + ' first tries iteration via "__iter__()", then the old ' + 'sequence\n' + ' iteration protocol via "__getitem__()", see *this ' + 'section in the\n' + ' language reference*.\n' + '\n' + '\n' + 'Emulating numeric types\n' + '=======================\n' + '\n' + 'The following methods can be defined to emulate numeric ' + 'objects.\n' + 'Methods corresponding to operations that are not supported ' + 'by the\n' + 'particular kind of number implemented (e.g., bitwise ' + 'operations for\n' + 'non-integral numbers) should be left undefined.\n' + '\n' + 'object.__add__(self, other)\n' + 'object.__sub__(self, other)\n' + 'object.__mul__(self, other)\n' + 'object.__truediv__(self, other)\n' + 'object.__floordiv__(self, other)\n' + 'object.__mod__(self, other)\n' + 'object.__divmod__(self, other)\n' + 'object.__pow__(self, other[, modulo])\n' + 'object.__lshift__(self, other)\n' + 'object.__rshift__(self, other)\n' + 'object.__and__(self, other)\n' + 'object.__xor__(self, other)\n' + 'object.__or__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "/", "//", "%", "divmod()", ' + '"pow()",\n' + ' "**", "<<", ">>", "&", "^", "|"). For instance, to ' + 'evaluate the\n' + ' expression "x + y", where *x* is an instance of a class ' + 'that has an\n' + ' "__add__()" method, "x.__add__(y)" is called. The ' + '"__divmod__()"\n' + ' method should be the equivalent to using ' + '"__floordiv__()" and\n' + ' "__mod__()"; it should not be related to ' + '"__truediv__()". Note\n' + ' that "__pow__()" should be defined to accept an ' + 'optional third\n' + ' argument if the ternary version of the built-in "pow()" ' + 'function is\n' + ' to be supported.\n' + '\n' + ' If one of those methods does not support the operation ' + 'with the\n' + ' supplied arguments, it should return "NotImplemented".\n' + '\n' + 'object.__radd__(self, other)\n' + 'object.__rsub__(self, other)\n' + 'object.__rmul__(self, other)\n' + 'object.__rtruediv__(self, other)\n' + 'object.__rfloordiv__(self, other)\n' + 'object.__rmod__(self, other)\n' + 'object.__rdivmod__(self, other)\n' + 'object.__rpow__(self, other)\n' + 'object.__rlshift__(self, other)\n' + 'object.__rrshift__(self, other)\n' + 'object.__rand__(self, other)\n' + 'object.__rxor__(self, other)\n' + 'object.__ror__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "/", "//", "%", "divmod()", ' + '"pow()",\n' + ' "**", "<<", ">>", "&", "^", "|") with reflected ' + '(swapped) operands.\n' + ' These functions are only called if the left operand ' + 'does not\n' + ' support the corresponding operation and the operands ' + 'are of\n' + ' different types. [2] For instance, to evaluate the ' + 'expression "x -\n' + ' y", where *y* is an instance of a class that has an ' + '"__rsub__()"\n' + ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' + 'returns\n' + ' *NotImplemented*.\n' + '\n' + ' Note that ternary "pow()" will not try calling ' + '"__rpow__()" (the\n' + ' coercion rules would become too complicated).\n' + '\n' + " Note: If the right operand's type is a subclass of the " + 'left\n' + " operand's type and that subclass provides the " + 'reflected method\n' + ' for the operation, this method will be called before ' + 'the left\n' + " operand's non-reflected method. This behavior allows " + 'subclasses\n' + " to override their ancestors' operations.\n" + '\n' + 'object.__iadd__(self, other)\n' + 'object.__isub__(self, other)\n' + 'object.__imul__(self, other)\n' + 'object.__itruediv__(self, other)\n' + 'object.__ifloordiv__(self, other)\n' + 'object.__imod__(self, other)\n' + 'object.__ipow__(self, other[, modulo])\n' + 'object.__ilshift__(self, other)\n' + 'object.__irshift__(self, other)\n' + 'object.__iand__(self, other)\n' + 'object.__ixor__(self, other)\n' + 'object.__ior__(self, other)\n' + '\n' + ' These methods are called to implement the augmented ' + 'arithmetic\n' + ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", ' + '"**=", "<<=",\n' + ' ">>=", "&=", "^=", "|="). These methods should attempt ' + 'to do the\n' + ' operation in-place (modifying *self*) and return the ' + 'result (which\n' + ' could be, but does not have to be, *self*). If a ' + 'specific method\n' + ' is not defined, the augmented assignment falls back to ' + 'the normal\n' + ' methods. For instance, if *x* is an instance of a ' + 'class with an\n' + ' "__iadd__()" method, "x += y" is equivalent to "x = ' + 'x.__iadd__(y)"\n' + ' . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are ' + 'considered, as\n' + ' with the evaluation of "x + y". In certain situations, ' + 'augmented\n' + ' assignment can result in unexpected errors (see *Why ' + 'does\n' + " a_tuple[i] += ['item'] raise an exception when the " + 'addition\n' + ' works?*), but this behavior is in fact part of the data ' + 'model.\n' + '\n' + 'object.__neg__(self)\n' + 'object.__pos__(self)\n' + 'object.__abs__(self)\n' + 'object.__invert__(self)\n' + '\n' + ' Called to implement the unary arithmetic operations ' + '("-", "+",\n' + ' "abs()" and "~").\n' + '\n' + 'object.__complex__(self)\n' + 'object.__int__(self)\n' + 'object.__float__(self)\n' + 'object.__round__(self[, n])\n' + '\n' + ' Called to implement the built-in functions "complex()", ' + '"int()",\n' + ' "float()" and "round()". Should return a value of the ' + 'appropriate\n' + ' type.\n' + '\n' + 'object.__index__(self)\n' + '\n' + ' Called to implement "operator.index()", and whenever ' + 'Python needs\n' + ' to losslessly convert the numeric object to an integer ' + 'object (such\n' + ' as in slicing, or in the built-in "bin()", "hex()" and ' + '"oct()"\n' + ' functions). Presence of this method indicates that the ' + 'numeric\n' + ' object is an integer type. Must return an integer.\n' + '\n' + ' Note: In order to have a coherent integer type class, ' + 'when\n' + ' "__index__()" is defined "__int__()" should also be ' + 'defined, and\n' + ' both should return the same value.\n' + '\n' + '\n' + 'With Statement Context Managers\n' + '===============================\n' + '\n' + 'A *context manager* is an object that defines the runtime ' + 'context to\n' + 'be established when executing a "with" statement. The ' + 'context manager\n' + 'handles the entry into, and the exit from, the desired ' + 'runtime context\n' + 'for the execution of the block of code. Context managers ' + 'are normally\n' + 'invoked using the "with" statement (described in section ' + '*The with\n' + 'statement*), but can also be used by directly invoking ' + 'their methods.\n' + '\n' + 'Typical uses of context managers include saving and ' + 'restoring various\n' + 'kinds of global state, locking and unlocking resources, ' + 'closing opened\n' + 'files, etc.\n' + '\n' + 'For more information on context managers, see *Context ' + 'Manager Types*.\n' + '\n' + 'object.__enter__(self)\n' + '\n' + ' Enter the runtime context related to this object. The ' + '"with"\n' + " statement will bind this method's return value to the " + 'target(s)\n' + ' specified in the "as" clause of the statement, if any.\n' + '\n' + 'object.__exit__(self, exc_type, exc_value, traceback)\n' + '\n' + ' Exit the runtime context related to this object. The ' + 'parameters\n' + ' describe the exception that caused the context to be ' + 'exited. If the\n' + ' context was exited without an exception, all three ' + 'arguments will\n' + ' be "None".\n' + '\n' + ' If an exception is supplied, and the method wishes to ' + 'suppress the\n' + ' exception (i.e., prevent it from being propagated), it ' + 'should\n' + ' return a true value. Otherwise, the exception will be ' + 'processed\n' + ' normally upon exit from this method.\n' + '\n' + ' Note that "__exit__()" methods should not reraise the ' + 'passed-in\n' + " exception; this is the caller's responsibility.\n" + '\n' + 'See also: **PEP 0343** - The "with" statement\n' + '\n' + ' The specification, background, and examples for the ' + 'Python "with"\n' + ' statement.\n' + '\n' + '\n' + 'Special method lookup\n' + '=====================\n' + '\n' + 'For custom classes, implicit invocations of special ' + 'methods are only\n' + "guaranteed to work correctly if defined on an object's " + 'type, not in\n' + "the object's instance dictionary. That behaviour is the " + 'reason why\n' + 'the following code raises an exception:\n' + '\n' + ' >>> class C:\n' + ' ... pass\n' + ' ...\n' + ' >>> c = C()\n' + ' >>> c.__len__ = lambda: 5\n' + ' >>> len(c)\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " TypeError: object of type 'C' has no len()\n" + '\n' + 'The rationale behind this behaviour lies with a number of ' + 'special\n' + 'methods such as "__hash__()" and "__repr__()" that are ' + 'implemented by\n' + 'all objects, including type objects. If the implicit ' + 'lookup of these\n' + 'methods used the conventional lookup process, they would ' + 'fail when\n' + 'invoked on the type object itself:\n' + '\n' + ' >>> 1 .__hash__() == hash(1)\n' + ' True\n' + ' >>> int.__hash__() == hash(int)\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " TypeError: descriptor '__hash__' of 'int' object needs " + 'an argument\n' + '\n' + 'Incorrectly attempting to invoke an unbound method of a ' + 'class in this\n' + "way is sometimes referred to as 'metaclass confusion', and " + 'is avoided\n' + 'by bypassing the instance when looking up special ' + 'methods:\n' + '\n' + ' >>> type(1).__hash__(1) == hash(1)\n' + ' True\n' + ' >>> type(int).__hash__(int) == hash(int)\n' + ' True\n' + '\n' + 'In addition to bypassing any instance attributes in the ' + 'interest of\n' + 'correctness, implicit special method lookup generally also ' + 'bypasses\n' + 'the "__getattribute__()" method even of the object\'s ' + 'metaclass:\n' + '\n' + ' >>> class Meta(type):\n' + ' ... def __getattribute__(*args):\n' + ' ... print("Metaclass getattribute invoked")\n' + ' ... return type.__getattribute__(*args)\n' + ' ...\n' + ' >>> class C(object, metaclass=Meta):\n' + ' ... def __len__(self):\n' + ' ... return 10\n' + ' ... def __getattribute__(*args):\n' + ' ... print("Class getattribute invoked")\n' + ' ... return object.__getattribute__(*args)\n' + ' ...\n' + ' >>> c = C()\n' + ' >>> c.__len__() # Explicit lookup via ' + 'instance\n' + ' Class getattribute invoked\n' + ' 10\n' + ' >>> type(c).__len__(c) # Explicit lookup via ' + 'type\n' + ' Metaclass getattribute invoked\n' + ' 10\n' + ' >>> len(c) # Implicit lookup\n' + ' 10\n' + '\n' + 'Bypassing the "__getattribute__()" machinery in this ' + 'fashion provides\n' + 'significant scope for speed optimisations within the ' + 'interpreter, at\n' + 'the cost of some flexibility in the handling of special ' + 'methods (the\n' + 'special method *must* be set on the class object itself in ' + 'order to be\n' + 'consistently invoked by the interpreter).\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + "[1] It *is* possible in some cases to change an object's " + 'type,\n' + ' under certain controlled conditions. It generally ' + "isn't a good\n" + ' idea though, since it can lead to some very strange ' + 'behaviour if\n' + ' it is handled incorrectly.\n' + '\n' + '[2] For operands of the same type, it is assumed that if ' + 'the non-\n' + ' reflected method (such as "__add__()") fails the ' + 'operation is not\n' + ' supported, which is why the reflected method is not ' + 'called.\n', + 'string-methods': '\n' + 'String Methods\n' + '**************\n' + '\n' + 'Strings implement all of the *common* sequence ' + 'operations, along with\n' + 'the additional methods described below.\n' + '\n' + 'Strings also support two styles of string formatting, ' + 'one providing a\n' + 'large degree of flexibility and customization (see ' + '"str.format()",\n' + '*Format String Syntax* and *String Formatting*) and the ' + 'other based on\n' + 'C "printf" style formatting that handles a narrower ' + 'range of types and\n' + 'is slightly harder to use correctly, but is often faster ' + 'for the cases\n' + 'it can handle (*printf-style String Formatting*).\n' + '\n' + 'The *Text Processing Services* section of the standard ' + 'library covers\n' + 'a number of other modules that provide various text ' + 'related utilities\n' + '(including regular expression support in the "re" ' + 'module).\n' + '\n' + 'str.capitalize()\n' + '\n' + ' Return a copy of the string with its first character ' + 'capitalized\n' + ' and the rest lowercased.\n' + '\n' + 'str.casefold()\n' + '\n' + ' Return a casefolded copy of the string. Casefolded ' + 'strings may be\n' + ' used for caseless matching.\n' + '\n' + ' Casefolding is similar to lowercasing but more ' + 'aggressive because\n' + ' it is intended to remove all case distinctions in a ' + 'string. For\n' + ' example, the German lowercase letter "\'?\'" is ' + 'equivalent to ""ss"".\n' + ' Since it is already lowercase, "lower()" would do ' + 'nothing to "\'?\'";\n' + ' "casefold()" converts it to ""ss"".\n' + '\n' + ' The casefolding algorithm is described in section ' + '3.13 of the\n' + ' Unicode Standard.\n' + '\n' + ' New in version 3.3.\n' + '\n' + 'str.center(width[, fillchar])\n' + '\n' + ' Return centered in a string of length *width*. ' + 'Padding is done\n' + ' using the specified *fillchar* (default is an ASCII ' + 'space). The\n' + ' original string is returned if *width* is less than ' + 'or equal to\n' + ' "len(s)".\n' + '\n' + 'str.count(sub[, start[, end]])\n' + '\n' + ' Return the number of non-overlapping occurrences of ' + 'substring *sub*\n' + ' in the range [*start*, *end*]. Optional arguments ' + '*start* and\n' + ' *end* are interpreted as in slice notation.\n' + '\n' + 'str.encode(encoding="utf-8", errors="strict")\n' + '\n' + ' Return an encoded version of the string as a bytes ' + 'object. Default\n' + ' encoding is "\'utf-8\'". *errors* may be given to set ' + 'a different\n' + ' error handling scheme. The default for *errors* is ' + '"\'strict\'",\n' + ' meaning that encoding errors raise a "UnicodeError". ' + 'Other possible\n' + ' values are "\'ignore\'", "\'replace\'", ' + '"\'xmlcharrefreplace\'",\n' + ' "\'backslashreplace\'" and any other name registered ' + 'via\n' + ' "codecs.register_error()", see section *Codec Base ' + 'Classes*. For a\n' + ' list of possible encodings, see section *Standard ' + 'Encodings*.\n' + '\n' + ' Changed in version 3.1: Support for keyword arguments ' + 'added.\n' + '\n' + 'str.endswith(suffix[, start[, end]])\n' + '\n' + ' Return "True" if the string ends with the specified ' + '*suffix*,\n' + ' otherwise return "False". *suffix* can also be a ' + 'tuple of suffixes\n' + ' to look for. With optional *start*, test beginning ' + 'at that\n' + ' position. With optional *end*, stop comparing at ' + 'that position.\n' + '\n' + 'str.expandtabs(tabsize=8)\n' + '\n' + ' Return a copy of the string where all tab characters ' + 'are replaced\n' + ' by one or more spaces, depending on the current ' + 'column and the\n' + ' given tab size. Tab positions occur every *tabsize* ' + 'characters\n' + ' (default is 8, giving tab positions at columns 0, 8, ' + '16 and so on).\n' + ' To expand the string, the current column is set to ' + 'zero and the\n' + ' string is examined character by character. If the ' + 'character is a\n' + ' tab ("\\t"), one or more space characters are ' + 'inserted in the result\n' + ' until the current column is equal to the next tab ' + 'position. (The\n' + ' tab character itself is not copied.) If the ' + 'character is a newline\n' + ' ("\\n") or return ("\\r"), it is copied and the ' + 'current column is\n' + ' reset to zero. Any other character is copied ' + 'unchanged and the\n' + ' current column is incremented by one regardless of ' + 'how the\n' + ' character is represented when printed.\n' + '\n' + " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n" + " '01 012 0123 01234'\n" + " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n" + " '01 012 0123 01234'\n" + '\n' + 'str.find(sub[, start[, end]])\n' + '\n' + ' Return the lowest index in the string where substring ' + '*sub* is\n' + ' found, such that *sub* is contained in the slice ' + '"s[start:end]".\n' + ' Optional arguments *start* and *end* are interpreted ' + 'as in slice\n' + ' notation. Return "-1" if *sub* is not found.\n' + '\n' + ' Note: The "find()" method should be used only if you ' + 'need to know\n' + ' the position of *sub*. To check if *sub* is a ' + 'substring or not,\n' + ' use the "in" operator:\n' + '\n' + " >>> 'Py' in 'Python'\n" + ' True\n' + '\n' + 'str.format(*args, **kwargs)\n' + '\n' + ' Perform a string formatting operation. The string on ' + 'which this\n' + ' method is called can contain literal text or ' + 'replacement fields\n' + ' delimited by braces "{}". Each replacement field ' + 'contains either\n' + ' the numeric index of a positional argument, or the ' + 'name of a\n' + ' keyword argument. Returns a copy of the string where ' + 'each\n' + ' replacement field is replaced with the string value ' + 'of the\n' + ' corresponding argument.\n' + '\n' + ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n' + " 'The sum of 1 + 2 is 3'\n" + '\n' + ' See *Format String Syntax* for a description of the ' + 'various\n' + ' formatting options that can be specified in format ' + 'strings.\n' + '\n' + 'str.format_map(mapping)\n' + '\n' + ' Similar to "str.format(**mapping)", except that ' + '"mapping" is used\n' + ' directly and not copied to a "dict". This is useful ' + 'if for example\n' + ' "mapping" is a dict subclass:\n' + '\n' + ' >>> class Default(dict):\n' + ' ... def __missing__(self, key):\n' + ' ... return key\n' + ' ...\n' + " >>> '{name} was born in " + "{country}'.format_map(Default(name='Guido'))\n" + " 'Guido was born in country'\n" + '\n' + ' New in version 3.2.\n' + '\n' + 'str.index(sub[, start[, end]])\n' + '\n' + ' Like "find()", but raise "ValueError" when the ' + 'substring is not\n' + ' found.\n' + '\n' + 'str.isalnum()\n' + '\n' + ' Return true if all characters in the string are ' + 'alphanumeric and\n' + ' there is at least one character, false otherwise. A ' + 'character "c"\n' + ' is alphanumeric if one of the following returns ' + '"True":\n' + ' "c.isalpha()", "c.isdecimal()", "c.isdigit()", or ' + '"c.isnumeric()".\n' + '\n' + 'str.isalpha()\n' + '\n' + ' Return true if all characters in the string are ' + 'alphabetic and\n' + ' there is at least one character, false otherwise. ' + 'Alphabetic\n' + ' characters are those characters defined in the ' + 'Unicode character\n' + ' database as "Letter", i.e., those with general ' + 'category property\n' + ' being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note ' + 'that this is\n' + ' different from the "Alphabetic" property defined in ' + 'the Unicode\n' + ' Standard.\n' + '\n' + 'str.isdecimal()\n' + '\n' + ' Return true if all characters in the string are ' + 'decimal characters\n' + ' and there is at least one character, false otherwise. ' + 'Decimal\n' + ' characters are those from general category "Nd". This ' + 'category\n' + ' includes digit characters, and all characters that ' + 'can be used to\n' + ' form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC ' + 'DIGIT ZERO.\n' + '\n' + 'str.isdigit()\n' + '\n' + ' Return true if all characters in the string are ' + 'digits and there is\n' + ' at least one character, false otherwise. Digits ' + 'include decimal\n' + ' characters and digits that need special handling, ' + 'such as the\n' + ' compatibility superscript digits. Formally, a digit ' + 'is a character\n' + ' that has the property value Numeric_Type=Digit or\n' + ' Numeric_Type=Decimal.\n' + '\n' + 'str.isidentifier()\n' + '\n' + ' Return true if the string is a valid identifier ' + 'according to the\n' + ' language definition, section *Identifiers and ' + 'keywords*.\n' + '\n' + ' Use "keyword.iskeyword()" to test for reserved ' + 'identifiers such as\n' + ' "def" and "class".\n' + '\n' + 'str.islower()\n' + '\n' + ' Return true if all cased characters [4] in the string ' + 'are lowercase\n' + ' and there is at least one cased character, false ' + 'otherwise.\n' + '\n' + 'str.isnumeric()\n' + '\n' + ' Return true if all characters in the string are ' + 'numeric characters,\n' + ' and there is at least one character, false otherwise. ' + 'Numeric\n' + ' characters include digit characters, and all ' + 'characters that have\n' + ' the Unicode numeric value property, e.g. U+2155, ' + 'VULGAR FRACTION\n' + ' ONE FIFTH. Formally, numeric characters are those ' + 'with the\n' + ' property value Numeric_Type=Digit, ' + 'Numeric_Type=Decimal or\n' + ' Numeric_Type=Numeric.\n' + '\n' + 'str.isprintable()\n' + '\n' + ' Return true if all characters in the string are ' + 'printable or the\n' + ' string is empty, false otherwise. Nonprintable ' + 'characters are\n' + ' those characters defined in the Unicode character ' + 'database as\n' + ' "Other" or "Separator", excepting the ASCII space ' + '(0x20) which is\n' + ' considered printable. (Note that printable ' + 'characters in this\n' + ' context are those which should not be escaped when ' + '"repr()" is\n' + ' invoked on a string. It has no bearing on the ' + 'handling of strings\n' + ' written to "sys.stdout" or "sys.stderr".)\n' + '\n' + 'str.isspace()\n' + '\n' + ' Return true if there are only whitespace characters ' + 'in the string\n' + ' and there is at least one character, false ' + 'otherwise. Whitespace\n' + ' characters are those characters defined in the ' + 'Unicode character\n' + ' database as "Other" or "Separator" and those with ' + 'bidirectional\n' + ' property being one of "WS", "B", or "S".\n' + '\n' + 'str.istitle()\n' + '\n' + ' Return true if the string is a titlecased string and ' + 'there is at\n' + ' least one character, for example uppercase characters ' + 'may only\n' + ' follow uncased characters and lowercase characters ' + 'only cased ones.\n' + ' Return false otherwise.\n' + '\n' + 'str.isupper()\n' + '\n' + ' Return true if all cased characters [4] in the string ' + 'are uppercase\n' + ' and there is at least one cased character, false ' + 'otherwise.\n' + '\n' + 'str.join(iterable)\n' + '\n' + ' Return a string which is the concatenation of the ' + 'strings in the\n' + ' *iterable* *iterable*. A "TypeError" will be raised ' + 'if there are\n' + ' any non-string values in *iterable*, including ' + '"bytes" objects.\n' + ' The separator between elements is the string ' + 'providing this method.\n' + '\n' + 'str.ljust(width[, fillchar])\n' + '\n' + ' Return the string left justified in a string of ' + 'length *width*.\n' + ' Padding is done using the specified *fillchar* ' + '(default is an ASCII\n' + ' space). The original string is returned if *width* is ' + 'less than or\n' + ' equal to "len(s)".\n' + '\n' + 'str.lower()\n' + '\n' + ' Return a copy of the string with all the cased ' + 'characters [4]\n' + ' converted to lowercase.\n' + '\n' + ' The lowercasing algorithm used is described in ' + 'section 3.13 of the\n' + ' Unicode Standard.\n' + '\n' + 'str.lstrip([chars])\n' + '\n' + ' Return a copy of the string with leading characters ' + 'removed. The\n' + ' *chars* argument is a string specifying the set of ' + 'characters to be\n' + ' removed. If omitted or "None", the *chars* argument ' + 'defaults to\n' + ' removing whitespace. The *chars* argument is not a ' + 'prefix; rather,\n' + ' all combinations of its values are stripped:\n' + '\n' + " >>> ' spacious '.lstrip()\n" + " 'spacious '\n" + " >>> 'www.example.com'.lstrip('cmowz.')\n" + " 'example.com'\n" + '\n' + 'static str.maketrans(x[, y[, z]])\n' + '\n' + ' This static method returns a translation table usable ' + 'for\n' + ' "str.translate()".\n' + '\n' + ' If there is only one argument, it must be a ' + 'dictionary mapping\n' + ' Unicode ordinals (integers) or characters (strings of ' + 'length 1) to\n' + ' Unicode ordinals, strings (of arbitrary lengths) or ' + 'None.\n' + ' Character keys will then be converted to ordinals.\n' + '\n' + ' If there are two arguments, they must be strings of ' + 'equal length,\n' + ' and in the resulting dictionary, each character in x ' + 'will be mapped\n' + ' to the character at the same position in y. If there ' + 'is a third\n' + ' argument, it must be a string, whose characters will ' + 'be mapped to\n' + ' None in the result.\n' + '\n' + 'str.partition(sep)\n' + '\n' + ' Split the string at the first occurrence of *sep*, ' + 'and return a\n' + ' 3-tuple containing the part before the separator, the ' + 'separator\n' + ' itself, and the part after the separator. If the ' + 'separator is not\n' + ' found, return a 3-tuple containing the string itself, ' + 'followed by\n' + ' two empty strings.\n' + '\n' + 'str.replace(old, new[, count])\n' + '\n' + ' Return a copy of the string with all occurrences of ' + 'substring *old*\n' + ' replaced by *new*. If the optional argument *count* ' + 'is given, only\n' + ' the first *count* occurrences are replaced.\n' + '\n' + 'str.rfind(sub[, start[, end]])\n' + '\n' + ' Return the highest index in the string where ' + 'substring *sub* is\n' + ' found, such that *sub* is contained within ' + '"s[start:end]".\n' + ' Optional arguments *start* and *end* are interpreted ' + 'as in slice\n' + ' notation. Return "-1" on failure.\n' + '\n' + 'str.rindex(sub[, start[, end]])\n' + '\n' + ' Like "rfind()" but raises "ValueError" when the ' + 'substring *sub* is\n' + ' not found.\n' + '\n' + 'str.rjust(width[, fillchar])\n' + '\n' + ' Return the string right justified in a string of ' + 'length *width*.\n' + ' Padding is done using the specified *fillchar* ' + '(default is an ASCII\n' + ' space). The original string is returned if *width* is ' + 'less than or\n' + ' equal to "len(s)".\n' + '\n' + 'str.rpartition(sep)\n' + '\n' + ' Split the string at the last occurrence of *sep*, and ' + 'return a\n' + ' 3-tuple containing the part before the separator, the ' + 'separator\n' + ' itself, and the part after the separator. If the ' + 'separator is not\n' + ' found, return a 3-tuple containing two empty strings, ' + 'followed by\n' + ' the string itself.\n' + '\n' + 'str.rsplit(sep=None, maxsplit=-1)\n' + '\n' + ' Return a list of the words in the string, using *sep* ' + 'as the\n' + ' delimiter string. If *maxsplit* is given, at most ' + '*maxsplit* splits\n' + ' are done, the *rightmost* ones. If *sep* is not ' + 'specified or\n' + ' "None", any whitespace string is a separator. Except ' + 'for splitting\n' + ' from the right, "rsplit()" behaves like "split()" ' + 'which is\n' + ' described in detail below.\n' + '\n' + 'str.rstrip([chars])\n' + '\n' + ' Return a copy of the string with trailing characters ' + 'removed. The\n' + ' *chars* argument is a string specifying the set of ' + 'characters to be\n' + ' removed. If omitted or "None", the *chars* argument ' + 'defaults to\n' + ' removing whitespace. The *chars* argument is not a ' + 'suffix; rather,\n' + ' all combinations of its values are stripped:\n' + '\n' + " >>> ' spacious '.rstrip()\n" + " ' spacious'\n" + " >>> 'mississippi'.rstrip('ipz')\n" + " 'mississ'\n" + '\n' + 'str.split(sep=None, maxsplit=-1)\n' + '\n' + ' Return a list of the words in the string, using *sep* ' + 'as the\n' + ' delimiter string. If *maxsplit* is given, at most ' + '*maxsplit*\n' + ' splits are done (thus, the list will have at most ' + '"maxsplit+1"\n' + ' elements). If *maxsplit* is not specified or "-1", ' + 'then there is\n' + ' no limit on the number of splits (all possible splits ' + 'are made).\n' + '\n' + ' If *sep* is given, consecutive delimiters are not ' + 'grouped together\n' + ' and are deemed to delimit empty strings (for ' + 'example,\n' + ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', ' + '\'2\']"). The *sep* argument\n' + ' may consist of multiple characters (for example,\n' + ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', ' + '\'3\']"). Splitting an\n' + ' empty string with a specified separator returns ' + '"[\'\']".\n' + '\n' + ' For example:\n' + '\n' + " >>> '1,2,3'.split(',')\n" + " ['1', '2', '3']\n" + " >>> '1,2,3'.split(',', maxsplit=1)\n" + " ['1', '2 3']\n" + " >>> '1,2,,3,'.split(',')\n" + " ['1', '2', '', '3', '']\n" + '\n' + ' If *sep* is not specified or is "None", a different ' + 'splitting\n' + ' algorithm is applied: runs of consecutive whitespace ' + 'are regarded\n' + ' as a single separator, and the result will contain no ' + 'empty strings\n' + ' at the start or end if the string has leading or ' + 'trailing\n' + ' whitespace. Consequently, splitting an empty string ' + 'or a string\n' + ' consisting of just whitespace with a "None" separator ' + 'returns "[]".\n' + '\n' + ' For example:\n' + '\n' + " >>> '1 2 3'.split()\n" + " ['1', '2', '3']\n" + " >>> '1 2 3'.split(maxsplit=1)\n" + " ['1', '2 3']\n" + " >>> ' 1 2 3 '.split()\n" + " ['1', '2', '3']\n" + '\n' + 'str.splitlines([keepends])\n' + '\n' + ' Return a list of the lines in the string, breaking at ' + 'line\n' + ' boundaries. This method uses the *universal newlines* ' + 'approach to\n' + ' splitting lines. Line breaks are not included in the ' + 'resulting list\n' + ' unless *keepends* is given and true.\n' + '\n' + ' For example:\n' + '\n' + " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n" + " ['ab c', '', 'de fg', 'kl']``\n" + " >>> 'ab c\\n\\nde " + "fg\\rkl\\r\\n'.splitlines(keepends=True)\n" + " ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n" + '\n' + ' Unlike "split()" when a delimiter string *sep* is ' + 'given, this\n' + ' method returns an empty list for the empty string, ' + 'and a terminal\n' + ' line break does not result in an extra line:\n' + '\n' + ' >>> "".splitlines()\n' + ' []\n' + ' >>> "One line\\n".splitlines()\n' + " ['One line']\n" + '\n' + ' For comparison, "split(\'\\n\')" gives:\n' + '\n' + " >>> ''.split('\\n')\n" + " ['']\n" + " >>> 'Two lines\\n'.split('\\n')\n" + " ['Two lines', '']\n" + '\n' + 'str.startswith(prefix[, start[, end]])\n' + '\n' + ' Return "True" if string starts with the *prefix*, ' + 'otherwise return\n' + ' "False". *prefix* can also be a tuple of prefixes to ' + 'look for.\n' + ' With optional *start*, test string beginning at that ' + 'position.\n' + ' With optional *end*, stop comparing string at that ' + 'position.\n' + '\n' + 'str.strip([chars])\n' + '\n' + ' Return a copy of the string with the leading and ' + 'trailing\n' + ' characters removed. The *chars* argument is a string ' + 'specifying the\n' + ' set of characters to be removed. If omitted or ' + '"None", the *chars*\n' + ' argument defaults to removing whitespace. The *chars* ' + 'argument is\n' + ' not a prefix or suffix; rather, all combinations of ' + 'its values are\n' + ' stripped:\n' + '\n' + " >>> ' spacious '.strip()\n" + " 'spacious'\n" + " >>> 'www.example.com'.strip('cmowz.')\n" + " 'example'\n" + '\n' + 'str.swapcase()\n' + '\n' + ' Return a copy of the string with uppercase characters ' + 'converted to\n' + ' lowercase and vice versa. Note that it is not ' + 'necessarily true that\n' + ' "s.swapcase().swapcase() == s".\n' + '\n' + 'str.title()\n' + '\n' + ' Return a titlecased version of the string where words ' + 'start with an\n' + ' uppercase character and the remaining characters are ' + 'lowercase.\n' + '\n' + ' For example:\n' + '\n' + " >>> 'Hello world'.title()\n" + " 'Hello World'\n" + '\n' + ' The algorithm uses a simple language-independent ' + 'definition of a\n' + ' word as groups of consecutive letters. The ' + 'definition works in\n' + ' many contexts but it means that apostrophes in ' + 'contractions and\n' + ' possessives form word boundaries, which may not be ' + 'the desired\n' + ' result:\n' + '\n' + ' >>> "they\'re bill\'s friends from the ' + 'UK".title()\n' + ' "They\'Re Bill\'S Friends From The Uk"\n' + '\n' + ' A workaround for apostrophes can be constructed using ' + 'regular\n' + ' expressions:\n' + '\n' + ' >>> import re\n' + ' >>> def titlecase(s):\n' + ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n' + ' ... lambda mo: ' + 'mo.group(0)[0].upper() +\n' + ' ... ' + 'mo.group(0)[1:].lower(),\n' + ' ... s)\n' + ' ...\n' + ' >>> titlecase("they\'re bill\'s friends.")\n' + ' "They\'re Bill\'s Friends."\n' + '\n' + 'str.translate(map)\n' + '\n' + ' Return a copy of the *s* where all characters have ' + 'been mapped\n' + ' through the *map* which must be a dictionary of ' + 'Unicode ordinals\n' + ' (integers) to Unicode ordinals, strings or "None". ' + 'Unmapped\n' + ' characters are left untouched. Characters mapped to ' + '"None" are\n' + ' deleted.\n' + '\n' + ' You can use "str.maketrans()" to create a translation ' + 'map from\n' + ' character-to-character mappings in different ' + 'formats.\n' + '\n' + ' Note: An even more flexible approach is to create a ' + 'custom\n' + ' character mapping codec using the "codecs" module ' + '(see\n' + ' "encodings.cp1251" for an example).\n' + '\n' + 'str.upper()\n' + '\n' + ' Return a copy of the string with all the cased ' + 'characters [4]\n' + ' converted to uppercase. Note that ' + '"str.upper().isupper()" might be\n' + ' "False" if "s" contains uncased characters or if the ' + 'Unicode\n' + ' category of the resulting character(s) is not "Lu" ' + '(Letter,\n' + ' uppercase), but e.g. "Lt" (Letter, titlecase).\n' + '\n' + ' The uppercasing algorithm used is described in ' + 'section 3.13 of the\n' + ' Unicode Standard.\n' + '\n' + 'str.zfill(width)\n' + '\n' + ' Return a copy of the string left filled with ASCII ' + '"\'0\'" digits to\n' + ' make a string of length *width*. A leading sign ' + 'prefix ("\'+\'"/"\'-\'"\n' + ' is handled by inserting the padding *after* the sign ' + 'character\n' + ' rather than before. The original string is returned ' + 'if *width* is\n' + ' less than or equal to "len(s)".\n' + '\n' + ' For example:\n' + '\n' + ' >>> "42".zfill(5)\n' + " '00042'\n" + ' >>> "-42".zfill(5)\n' + " '-0042'\n", + 'strings': '\n' + 'String and Bytes literals\n' + '*************************\n' + '\n' + 'String literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' stringliteral ::= [stringprefix](shortstring | ' + 'longstring)\n' + ' stringprefix ::= "r" | "u" | "R" | "U"\n' + ' shortstring ::= "\'" shortstringitem* "\'" | \'"\' ' + 'shortstringitem* \'"\'\n' + ' longstring ::= "\'\'\'" longstringitem* "\'\'\'" | ' + '\'"""\' longstringitem* \'"""\'\n' + ' shortstringitem ::= shortstringchar | stringescapeseq\n' + ' longstringitem ::= longstringchar | stringescapeseq\n' + ' shortstringchar ::= \n' + ' longstringchar ::= \n' + ' stringescapeseq ::= "\\" \n' + '\n' + ' bytesliteral ::= bytesprefix(shortbytes | longbytes)\n' + ' bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | ' + '"rb" | "rB" | "Rb" | "RB"\n' + ' shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' ' + 'shortbytesitem* \'"\'\n' + ' longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | ' + '\'"""\' longbytesitem* \'"""\'\n' + ' shortbytesitem ::= shortbyteschar | bytesescapeseq\n' + ' longbytesitem ::= longbyteschar | bytesescapeseq\n' + ' shortbyteschar ::= \n' + ' longbyteschar ::= \n' + ' bytesescapeseq ::= "\\" \n' + '\n' + 'One syntactic restriction not indicated by these productions is ' + 'that\n' + 'whitespace is not allowed between the "stringprefix" or ' + '"bytesprefix"\n' + 'and the rest of the literal. The source character set is ' + 'defined by\n' + 'the encoding declaration; it is UTF-8 if no encoding ' + 'declaration is\n' + 'given in the source file; see section *Encoding declarations*.\n' + '\n' + 'In plain English: Both types of literals can be enclosed in ' + 'matching\n' + 'single quotes ("\'") or double quotes ("""). They can also be ' + 'enclosed\n' + 'in matching groups of three single or double quotes (these are\n' + 'generally referred to as *triple-quoted strings*). The ' + 'backslash\n' + '("\\") character is used to escape characters that otherwise ' + 'have a\n' + 'special meaning, such as newline, backslash itself, or the ' + 'quote\n' + 'character.\n' + '\n' + 'Bytes literals are always prefixed with "\'b\'" or "\'B\'"; ' + 'they produce\n' + 'an instance of the "bytes" type instead of the "str" type. ' + 'They may\n' + 'only contain ASCII characters; bytes with a numeric value of ' + '128 or\n' + 'greater must be expressed with escapes.\n' + '\n' + 'As of Python 3.3 it is possible again to prefix unicode strings ' + 'with a\n' + '"u" prefix to simplify maintenance of dual 2.x and 3.x ' + 'codebases.\n' + '\n' + 'Both string and bytes literals may optionally be prefixed with ' + 'a\n' + 'letter "\'r\'" or "\'R\'"; such strings are called *raw ' + 'strings* and treat\n' + 'backslashes as literal characters. As a result, in string ' + 'literals,\n' + '"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated ' + 'specially.\n' + "Given that Python 2.x's raw unicode literals behave differently " + 'than\n' + 'Python 3.x\'s the "\'ur\'" syntax is not supported.\n' + '\n' + ' New in version 3.3: The "\'rb\'" prefix of raw bytes ' + 'literals has\n' + ' been added as a synonym of "\'br\'".\n' + '\n' + ' New in version 3.3: Support for the unicode legacy literal\n' + ' ("u\'value\'") was reintroduced to simplify the maintenance ' + 'of dual\n' + ' Python 2.x and 3.x codebases. See **PEP 414** for more ' + 'information.\n' + '\n' + 'In triple-quoted strings, unescaped newlines and quotes are ' + 'allowed\n' + '(and are retained), except that three unescaped quotes in a ' + 'row\n' + 'terminate the string. (A "quote" is the character used to open ' + 'the\n' + 'string, i.e. either "\'" or """.)\n' + '\n' + 'Unless an "\'r\'" or "\'R\'" prefix is present, escape ' + 'sequences in\n' + 'strings are interpreted according to rules similar to those ' + 'used by\n' + 'Standard C. The recognized escape sequences are:\n' + '\n' + '+-------------------+-----------------------------------+---------+\n' + '| Escape Sequence | Meaning | ' + 'Notes |\n' + '+===================+===================================+=========+\n' + '| "\\newline" | Backslash and newline ignored ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\\\" | Backslash ("\\") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\\'" | Single quote ("\'") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\"" | Double quote (""") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\a" | ASCII Bell (BEL) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\b" | ASCII Backspace (BS) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\f" | ASCII Formfeed (FF) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\n" | ASCII Linefeed (LF) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\r" | ASCII Carriage Return (CR) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\t" | ASCII Horizontal Tab (TAB) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\v" | ASCII Vertical Tab (VT) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\ooo" | Character with octal value *ooo* | ' + '(1,3) |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\xhh" | Character with hex value *hh* | ' + '(2,3) |\n' + '+-------------------+-----------------------------------+---------+\n' + '\n' + 'Escape sequences only recognized in string literals are:\n' + '\n' + '+-------------------+-----------------------------------+---------+\n' + '| Escape Sequence | Meaning | ' + 'Notes |\n' + '+===================+===================================+=========+\n' + '| "\\N{name}" | Character named *name* in the | ' + '(4) |\n' + '| | Unicode database ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\uxxxx" | Character with 16-bit hex value | ' + '(5) |\n' + '| | *xxxx* ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\Uxxxxxxxx" | Character with 32-bit hex value | ' + '(6) |\n' + '| | *xxxxxxxx* ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '\n' + 'Notes:\n' + '\n' + '1. As in Standard C, up to three octal digits are accepted.\n' + '\n' + '2. Unlike in Standard C, exactly two hex digits are required.\n' + '\n' + '3. In a bytes literal, hexadecimal and octal escapes denote ' + 'the\n' + ' byte with the given value. In a string literal, these ' + 'escapes\n' + ' denote a Unicode character with the given value.\n' + '\n' + '4. Changed in version 3.3: Support for name aliases [1] has ' + 'been\n' + ' added.\n' + '\n' + '5. Individual code units which form parts of a surrogate pair ' + 'can\n' + ' be encoded using this escape sequence. Exactly four hex ' + 'digits are\n' + ' required.\n' + '\n' + '6. Any Unicode character can be encoded this way. Exactly ' + 'eight\n' + ' hex digits are required.\n' + '\n' + 'Unlike Standard C, all unrecognized escape sequences are left ' + 'in the\n' + 'string unchanged, i.e., *the backslash is left in the string*. ' + '(This\n' + 'behavior is useful when debugging: if an escape sequence is ' + 'mistyped,\n' + 'the resulting output is more easily recognized as broken.) It ' + 'is also\n' + 'important to note that the escape sequences only recognized in ' + 'string\n' + 'literals fall into the category of unrecognized escapes for ' + 'bytes\n' + 'literals.\n' + '\n' + 'Even in a raw string, string quotes can be escaped with a ' + 'backslash,\n' + 'but the backslash remains in the string; for example, "r"\\""" ' + 'is a\n' + 'valid string literal consisting of two characters: a backslash ' + 'and a\n' + 'double quote; "r"\\"" is not a valid string literal (even a raw ' + 'string\n' + 'cannot end in an odd number of backslashes). Specifically, *a ' + 'raw\n' + 'string cannot end in a single backslash* (since the backslash ' + 'would\n' + 'escape the following quote character). Note also that a ' + 'single\n' + 'backslash followed by a newline is interpreted as those two ' + 'characters\n' + 'as part of the string, *not* as a line continuation.\n', + 'subscriptions': '\n' + 'Subscriptions\n' + '*************\n' + '\n' + 'A subscription selects an item of a sequence (string, ' + 'tuple or list)\n' + 'or mapping (dictionary) object:\n' + '\n' + ' subscription ::= primary "[" expression_list "]"\n' + '\n' + 'The primary must evaluate to an object that supports ' + 'subscription\n' + '(lists or dictionaries for example). User-defined ' + 'objects can support\n' + 'subscription by defining a "__getitem__()" method.\n' + '\n' + 'For built-in objects, there are two types of objects that ' + 'support\n' + 'subscription:\n' + '\n' + 'If the primary is a mapping, the expression list must ' + 'evaluate to an\n' + 'object whose value is one of the keys of the mapping, and ' + 'the\n' + 'subscription selects the value in the mapping that ' + 'corresponds to that\n' + 'key. (The expression list is a tuple except if it has ' + 'exactly one\n' + 'item.)\n' + '\n' + 'If the primary is a sequence, the expression (list) must ' + 'evaluate to\n' + 'an integer or a slice (as discussed in the following ' + 'section).\n' + '\n' + 'The formal syntax makes no special provision for negative ' + 'indices in\n' + 'sequences; however, built-in sequences all provide a ' + '"__getitem__()"\n' + 'method that interprets negative indices by adding the ' + 'length of the\n' + 'sequence to the index (so that "x[-1]" selects the last ' + 'item of "x").\n' + 'The resulting value must be a nonnegative integer less ' + 'than the number\n' + 'of items in the sequence, and the subscription selects ' + 'the item whose\n' + 'index is that value (counting from zero). Since the ' + 'support for\n' + "negative indices and slicing occurs in the object's " + '"__getitem__()"\n' + 'method, subclasses overriding this method will need to ' + 'explicitly add\n' + 'that support.\n' + '\n' + "A string's items are characters. A character is not a " + 'separate data\n' + 'type but a string of exactly one character.\n', + 'truth': '\n' + 'Truth Value Testing\n' + '*******************\n' + '\n' + 'Any object can be tested for truth value, for use in an "if" or\n' + '"while" condition or as operand of the Boolean operations below. ' + 'The\n' + 'following values are considered false:\n' + '\n' + '* "None"\n' + '\n' + '* "False"\n' + '\n' + '* zero of any numeric type, for example, "0", "0.0", "0j".\n' + '\n' + '* any empty sequence, for example, "\'\'", "()", "[]".\n' + '\n' + '* any empty mapping, for example, "{}".\n' + '\n' + '* instances of user-defined classes, if the class defines a\n' + ' "__bool__()" or "__len__()" method, when that method returns ' + 'the\n' + ' integer zero or "bool" value "False". [1]\n' + '\n' + 'All other values are considered true --- so objects of many types ' + 'are\n' + 'always true.\n' + '\n' + 'Operations and built-in functions that have a Boolean result ' + 'always\n' + 'return "0" or "False" for false and "1" or "True" for true, ' + 'unless\n' + 'otherwise stated. (Important exception: the Boolean operations ' + '"or"\n' + 'and "and" always return one of their operands.)\n', + 'try': '\n' + 'The "try" statement\n' + '*******************\n' + '\n' + 'The "try" statement specifies exception handlers and/or cleanup ' + 'code\n' + 'for a group of statements:\n' + '\n' + ' try_stmt ::= try1_stmt | try2_stmt\n' + ' try1_stmt ::= "try" ":" suite\n' + ' ("except" [expression ["as" identifier]] ":" ' + 'suite)+\n' + ' ["else" ":" suite]\n' + ' ["finally" ":" suite]\n' + ' try2_stmt ::= "try" ":" suite\n' + ' "finally" ":" suite\n' + '\n' + 'The "except" clause(s) specify one or more exception handlers. When ' + 'no\n' + 'exception occurs in the "try" clause, no exception handler is\n' + 'executed. When an exception occurs in the "try" suite, a search for ' + 'an\n' + 'exception handler is started. This search inspects the except ' + 'clauses\n' + 'in turn until one is found that matches the exception. An ' + 'expression-\n' + 'less except clause, if present, must be last; it matches any\n' + 'exception. For an except clause with an expression, that ' + 'expression\n' + 'is evaluated, and the clause matches the exception if the ' + 'resulting\n' + 'object is "compatible" with the exception. An object is ' + 'compatible\n' + 'with an exception if it is the class or a base class of the ' + 'exception\n' + 'object or a tuple containing an item compatible with the ' + 'exception.\n' + '\n' + 'If no except clause matches the exception, the search for an ' + 'exception\n' + 'handler continues in the surrounding code and on the invocation ' + 'stack.\n' + '[1]\n' + '\n' + 'If the evaluation of an expression in the header of an except ' + 'clause\n' + 'raises an exception, the original search for a handler is canceled ' + 'and\n' + 'a search starts for the new exception in the surrounding code and ' + 'on\n' + 'the call stack (it is treated as if the entire "try" statement ' + 'raised\n' + 'the exception).\n' + '\n' + 'When a matching except clause is found, the exception is assigned ' + 'to\n' + 'the target specified after the "as" keyword in that except clause, ' + 'if\n' + "present, and the except clause's suite is executed. All except\n" + 'clauses must have an executable block. When the end of this block ' + 'is\n' + 'reached, execution continues normally after the entire try ' + 'statement.\n' + '(This means that if two nested handlers exist for the same ' + 'exception,\n' + 'and the exception occurs in the try clause of the inner handler, ' + 'the\n' + 'outer handler will not handle the exception.)\n' + '\n' + 'When an exception has been assigned using "as target", it is ' + 'cleared\n' + 'at the end of the except clause. This is as if\n' + '\n' + ' except E as N:\n' + ' foo\n' + '\n' + 'was translated to\n' + '\n' + ' except E as N:\n' + ' try:\n' + ' foo\n' + ' finally:\n' + ' del N\n' + '\n' + 'This means the exception must be assigned to a different name to ' + 'be\n' + 'able to refer to it after the except clause. Exceptions are ' + 'cleared\n' + 'because with the traceback attached to them, they form a reference\n' + 'cycle with the stack frame, keeping all locals in that frame alive\n' + 'until the next garbage collection occurs.\n' + '\n' + "Before an except clause's suite is executed, details about the\n" + 'exception are stored in the "sys" module and can be accessed via\n' + '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of ' + 'the\n' + 'exception class, the exception instance and a traceback object ' + '(see\n' + 'section *The standard type hierarchy*) identifying the point in ' + 'the\n' + 'program where the exception occurred. "sys.exc_info()" values are\n' + 'restored to their previous values (before the call) when returning\n' + 'from a function that handled an exception.\n' + '\n' + 'The optional "else" clause is executed if and when control flows ' + 'off\n' + 'the end of the "try" clause. [2] Exceptions in the "else" clause ' + 'are\n' + 'not handled by the preceding "except" clauses.\n' + '\n' + 'If "finally" is present, it specifies a \'cleanup\' handler. The ' + '"try"\n' + 'clause is executed, including any "except" and "else" clauses. If ' + 'an\n' + 'exception occurs in any of the clauses and is not handled, the\n' + 'exception is temporarily saved. The "finally" clause is executed. ' + 'If\n' + 'there is a saved exception it is re-raised at the end of the ' + '"finally"\n' + 'clause. If the "finally" clause raises another exception, the ' + 'saved\n' + 'exception is set as the context of the new exception. If the ' + '"finally"\n' + 'clause executes a "return" or "break" statement, the saved ' + 'exception\n' + 'is discarded:\n' + '\n' + ' >>> def f():\n' + ' ... try:\n' + ' ... 1/0\n' + ' ... finally:\n' + ' ... return 42\n' + ' ...\n' + ' >>> f()\n' + ' 42\n' + '\n' + 'The exception information is not available to the program during\n' + 'execution of the "finally" clause.\n' + '\n' + 'When a "return", "break" or "continue" statement is executed in ' + 'the\n' + '"try" suite of a "try"..."finally" statement, the "finally" clause ' + 'is\n' + 'also executed \'on the way out.\' A "continue" statement is illegal ' + 'in\n' + 'the "finally" clause. (The reason is a problem with the current\n' + 'implementation --- this restriction may be lifted in the future).\n' + '\n' + 'The return value of a function is determined by the last "return"\n' + 'statement executed. Since the "finally" clause always executes, a\n' + '"return" statement executed in the "finally" clause will always be ' + 'the\n' + 'last one executed:\n' + '\n' + ' >>> def foo():\n' + ' ... try:\n' + " ... return 'try'\n" + ' ... finally:\n' + " ... return 'finally'\n" + ' ...\n' + ' >>> foo()\n' + " 'finally'\n" + '\n' + 'Additional information on exceptions can be found in section\n' + '*Exceptions*, and information on using the "raise" statement to\n' + 'generate exceptions may be found in section *The raise statement*.\n', + 'types': '\n' + 'The standard type hierarchy\n' + '***************************\n' + '\n' + 'Below is a list of the types that are built into Python. ' + 'Extension\n' + 'modules (written in C, Java, or other languages, depending on ' + 'the\n' + 'implementation) can define additional types. Future versions of\n' + 'Python may add types to the type hierarchy (e.g., rational ' + 'numbers,\n' + 'efficiently stored arrays of integers, etc.), although such ' + 'additions\n' + 'will often be provided via the standard library instead.\n' + '\n' + 'Some of the type descriptions below contain a paragraph listing\n' + "'special attributes.' These are attributes that provide access " + 'to the\n' + 'implementation and are not intended for general use. Their ' + 'definition\n' + 'may change in the future.\n' + '\n' + 'None\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the built-in name ' + '"None". It\n' + ' is used to signify the absence of a value in many situations, ' + 'e.g.,\n' + " it is returned from functions that don't explicitly return\n" + ' anything. Its truth value is false.\n' + '\n' + 'NotImplemented\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the built-in name\n' + ' "NotImplemented". Numeric methods and rich comparison methods ' + 'may\n' + ' return this value if they do not implement the operation for ' + 'the\n' + ' operands provided. (The interpreter will then try the ' + 'reflected\n' + ' operation, or some other fallback, depending on the ' + 'operator.) Its\n' + ' truth value is true.\n' + '\n' + 'Ellipsis\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the literal "..." or ' + 'the\n' + ' built-in name "Ellipsis". Its truth value is true.\n' + '\n' + '"numbers.Number"\n' + ' These are created by numeric literals and returned as results ' + 'by\n' + ' arithmetic operators and arithmetic built-in functions. ' + 'Numeric\n' + ' objects are immutable; once created their value never ' + 'changes.\n' + ' Python numbers are of course strongly related to mathematical\n' + ' numbers, but subject to the limitations of numerical ' + 'representation\n' + ' in computers.\n' + '\n' + ' Python distinguishes between integers, floating point numbers, ' + 'and\n' + ' complex numbers:\n' + '\n' + ' "numbers.Integral"\n' + ' These represent elements from the mathematical set of ' + 'integers\n' + ' (positive and negative).\n' + '\n' + ' There are two types of integers:\n' + '\n' + ' Integers ("int")\n' + '\n' + ' These represent numbers in an unlimited range, subject ' + 'to\n' + ' available (virtual) memory only. For the purpose of ' + 'shift\n' + ' and mask operations, a binary representation is assumed, ' + 'and\n' + " negative numbers are represented in a variant of 2's\n" + ' complement which gives the illusion of an infinite ' + 'string of\n' + ' sign bits extending to the left.\n' + '\n' + ' Booleans ("bool")\n' + ' These represent the truth values False and True. The ' + 'two\n' + ' objects representing the values "False" and "True" are ' + 'the\n' + ' only Boolean objects. The Boolean type is a subtype of ' + 'the\n' + ' integer type, and Boolean values behave like the values ' + '0 and\n' + ' 1, respectively, in almost all contexts, the exception ' + 'being\n' + ' that when converted to a string, the strings ""False"" ' + 'or\n' + ' ""True"" are returned, respectively.\n' + '\n' + ' The rules for integer representation are intended to give ' + 'the\n' + ' most meaningful interpretation of shift and mask ' + 'operations\n' + ' involving negative integers.\n' + '\n' + ' "numbers.Real" ("float")\n' + ' These represent machine-level double precision floating ' + 'point\n' + ' numbers. You are at the mercy of the underlying machine\n' + ' architecture (and C or Java implementation) for the ' + 'accepted\n' + ' range and handling of overflow. Python does not support ' + 'single-\n' + ' precision floating point numbers; the savings in processor ' + 'and\n' + ' memory usage that are usually the reason for using these ' + 'is\n' + ' dwarfed by the overhead of using objects in Python, so ' + 'there is\n' + ' no reason to complicate the language with two kinds of ' + 'floating\n' + ' point numbers.\n' + '\n' + ' "numbers.Complex" ("complex")\n' + ' These represent complex numbers as a pair of machine-level\n' + ' double precision floating point numbers. The same caveats ' + 'apply\n' + ' as for floating point numbers. The real and imaginary parts ' + 'of a\n' + ' complex number "z" can be retrieved through the read-only\n' + ' attributes "z.real" and "z.imag".\n' + '\n' + 'Sequences\n' + ' These represent finite ordered sets indexed by non-negative\n' + ' numbers. The built-in function "len()" returns the number of ' + 'items\n' + ' of a sequence. When the length of a sequence is *n*, the index ' + 'set\n' + ' contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence ' + '*a* is\n' + ' selected by "a[i]".\n' + '\n' + ' Sequences also support slicing: "a[i:j]" selects all items ' + 'with\n' + ' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n' + ' expression, a slice is a sequence of the same type. This ' + 'implies\n' + ' that the index set is renumbered so that it starts at 0.\n' + '\n' + ' Some sequences also support "extended slicing" with a third ' + '"step"\n' + ' parameter: "a[i:j:k]" selects all items of *a* with index *x* ' + 'where\n' + ' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n' + '\n' + ' Sequences are distinguished according to their mutability:\n' + '\n' + ' Immutable sequences\n' + ' An object of an immutable sequence type cannot change once ' + 'it is\n' + ' created. (If the object contains references to other ' + 'objects,\n' + ' these other objects may be mutable and may be changed; ' + 'however,\n' + ' the collection of objects directly referenced by an ' + 'immutable\n' + ' object cannot change.)\n' + '\n' + ' The following types are immutable sequences:\n' + '\n' + ' Strings\n' + ' A string is a sequence of values that represent Unicode ' + 'code\n' + ' points. All the code points in the range "U+0000 - ' + 'U+10FFFF"\n' + " can be represented in a string. Python doesn't have a " + '"char"\n' + ' type; instead, every code point in the string is ' + 'represented\n' + ' as a string object with length "1". The built-in ' + 'function\n' + ' "ord()" converts a code point from its string form to ' + 'an\n' + ' integer in the range "0 - 10FFFF"; "chr()" converts an\n' + ' integer in the range "0 - 10FFFF" to the corresponding ' + 'length\n' + ' "1" string object. "str.encode()" can be used to convert ' + 'a\n' + ' "str" to "bytes" using the given text encoding, and\n' + ' "bytes.decode()" can be used to achieve the opposite.\n' + '\n' + ' Tuples\n' + ' The items of a tuple are arbitrary Python objects. ' + 'Tuples of\n' + ' two or more items are formed by comma-separated lists ' + 'of\n' + " expressions. A tuple of one item (a 'singleton') can " + 'be\n' + ' formed by affixing a comma to an expression (an ' + 'expression by\n' + ' itself does not create a tuple, since parentheses must ' + 'be\n' + ' usable for grouping of expressions). An empty tuple can ' + 'be\n' + ' formed by an empty pair of parentheses.\n' + '\n' + ' Bytes\n' + ' A bytes object is an immutable array. The items are ' + '8-bit\n' + ' bytes, represented by integers in the range 0 <= x < ' + '256.\n' + ' Bytes literals (like "b\'abc\'") and the built-in ' + 'function\n' + ' "bytes()" can be used to construct bytes objects. ' + 'Also,\n' + ' bytes objects can be decoded to strings via the ' + '"decode()"\n' + ' method.\n' + '\n' + ' Mutable sequences\n' + ' Mutable sequences can be changed after they are created. ' + 'The\n' + ' subscription and slicing notations can be used as the ' + 'target of\n' + ' assignment and "del" (delete) statements.\n' + '\n' + ' There are currently two intrinsic mutable sequence types:\n' + '\n' + ' Lists\n' + ' The items of a list are arbitrary Python objects. Lists ' + 'are\n' + ' formed by placing a comma-separated list of expressions ' + 'in\n' + ' square brackets. (Note that there are no special cases ' + 'needed\n' + ' to form lists of length 0 or 1.)\n' + '\n' + ' Byte Arrays\n' + ' A bytearray object is a mutable array. They are created ' + 'by\n' + ' the built-in "bytearray()" constructor. Aside from ' + 'being\n' + ' mutable (and hence unhashable), byte arrays otherwise ' + 'provide\n' + ' the same interface and functionality as immutable bytes\n' + ' objects.\n' + '\n' + ' The extension module "array" provides an additional example ' + 'of a\n' + ' mutable sequence type, as does the "collections" module.\n' + '\n' + 'Set types\n' + ' These represent unordered, finite sets of unique, immutable\n' + ' objects. As such, they cannot be indexed by any subscript. ' + 'However,\n' + ' they can be iterated over, and the built-in function "len()"\n' + ' returns the number of items in a set. Common uses for sets are ' + 'fast\n' + ' membership testing, removing duplicates from a sequence, and\n' + ' computing mathematical operations such as intersection, ' + 'union,\n' + ' difference, and symmetric difference.\n' + '\n' + ' For set elements, the same immutability rules apply as for\n' + ' dictionary keys. Note that numeric types obey the normal rules ' + 'for\n' + ' numeric comparison: if two numbers compare equal (e.g., "1" ' + 'and\n' + ' "1.0"), only one of them can be contained in a set.\n' + '\n' + ' There are currently two intrinsic set types:\n' + '\n' + ' Sets\n' + ' These represent a mutable set. They are created by the ' + 'built-in\n' + ' "set()" constructor and can be modified afterwards by ' + 'several\n' + ' methods, such as "add()".\n' + '\n' + ' Frozen sets\n' + ' These represent an immutable set. They are created by the\n' + ' built-in "frozenset()" constructor. As a frozenset is ' + 'immutable\n' + ' and *hashable*, it can be used again as an element of ' + 'another\n' + ' set, or as a dictionary key.\n' + '\n' + 'Mappings\n' + ' These represent finite sets of objects indexed by arbitrary ' + 'index\n' + ' sets. The subscript notation "a[k]" selects the item indexed ' + 'by "k"\n' + ' from the mapping "a"; this can be used in expressions and as ' + 'the\n' + ' target of assignments or "del" statements. The built-in ' + 'function\n' + ' "len()" returns the number of items in a mapping.\n' + '\n' + ' There is currently a single intrinsic mapping type:\n' + '\n' + ' Dictionaries\n' + ' These represent finite sets of objects indexed by nearly\n' + ' arbitrary values. The only types of values not acceptable ' + 'as\n' + ' keys are values containing lists or dictionaries or other\n' + ' mutable types that are compared by value rather than by ' + 'object\n' + ' identity, the reason being that the efficient ' + 'implementation of\n' + " dictionaries requires a key's hash value to remain " + 'constant.\n' + ' Numeric types used for keys obey the normal rules for ' + 'numeric\n' + ' comparison: if two numbers compare equal (e.g., "1" and ' + '"1.0")\n' + ' then they can be used interchangeably to index the same\n' + ' dictionary entry.\n' + '\n' + ' Dictionaries are mutable; they can be created by the ' + '"{...}"\n' + ' notation (see section *Dictionary displays*).\n' + '\n' + ' The extension modules "dbm.ndbm" and "dbm.gnu" provide\n' + ' additional examples of mapping types, as does the ' + '"collections"\n' + ' module.\n' + '\n' + 'Callable types\n' + ' These are the types to which the function call operation (see\n' + ' section *Calls*) can be applied:\n' + '\n' + ' User-defined functions\n' + ' A user-defined function object is created by a function\n' + ' definition (see section *Function definitions*). It should ' + 'be\n' + ' called with an argument list containing the same number of ' + 'items\n' + " as the function's formal parameter list.\n" + '\n' + ' Special attributes:\n' + '\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | Attribute | ' + 'Meaning | |\n' + ' ' + '+===========================+=================================+=============+\n' + ' | "__doc__" | The function\'s ' + 'documentation | Writable |\n' + ' | | string, or "None" ' + 'if | |\n' + ' | | ' + 'unavailable | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__name__" | The function\'s ' + 'name | Writable |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__qualname__" | The function\'s *qualified ' + 'name* | Writable |\n' + ' | | New in version ' + '3.3. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__module__" | The name of the module ' + 'the | Writable |\n' + ' | | function was defined in, ' + 'or | |\n' + ' | | "None" if ' + 'unavailable. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__defaults__" | A tuple containing ' + 'default | Writable |\n' + ' | | argument values for ' + 'those | |\n' + ' | | arguments that have ' + 'defaults, | |\n' + ' | | or "None" if no arguments ' + 'have | |\n' + ' | | a default ' + 'value | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__code__" | The code object ' + 'representing | Writable |\n' + ' | | the compiled function ' + 'body. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__globals__" | A reference to the ' + 'dictionary | Read-only |\n' + ' | | that holds the ' + "function's | |\n" + ' | | global variables --- the ' + 'global | |\n' + ' | | namespace of the module ' + 'in | |\n' + ' | | which the function was ' + 'defined. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__dict__" | The namespace ' + 'supporting | Writable |\n' + ' | | arbitrary function ' + 'attributes. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__closure__" | "None" or a tuple of cells ' + 'that | Read-only |\n' + ' | | contain bindings for ' + 'the | |\n' + " | | function's free " + 'variables. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__annotations__" | A dict containing ' + 'annotations | Writable |\n' + ' | | of parameters. The keys of ' + 'the | |\n' + ' | | dict are the parameter ' + 'names, | |\n' + ' | | and "\'return\'" for the ' + 'return | |\n' + ' | | annotation, if ' + 'provided. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__kwdefaults__" | A dict containing defaults ' + 'for | Writable |\n' + ' | | keyword-only ' + 'parameters. | |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + '\n' + ' Most of the attributes labelled "Writable" check the type ' + 'of the\n' + ' assigned value.\n' + '\n' + ' Function objects also support getting and setting ' + 'arbitrary\n' + ' attributes, which can be used, for example, to attach ' + 'metadata\n' + ' to functions. Regular attribute dot-notation is used to ' + 'get and\n' + ' set such attributes. *Note that the current implementation ' + 'only\n' + ' supports function attributes on user-defined functions. ' + 'Function\n' + ' attributes on built-in functions may be supported in the\n' + ' future.*\n' + '\n' + " Additional information about a function's definition can " + 'be\n' + ' retrieved from its code object; see the description of ' + 'internal\n' + ' types below.\n' + '\n' + ' Instance methods\n' + ' An instance method object combines a class, a class ' + 'instance and\n' + ' any callable object (normally a user-defined function).\n' + '\n' + ' Special read-only attributes: "__self__" is the class ' + 'instance\n' + ' object, "__func__" is the function object; "__doc__" is ' + 'the\n' + ' method\'s documentation (same as "__func__.__doc__"); ' + '"__name__"\n' + ' is the method name (same as "__func__.__name__"); ' + '"__module__"\n' + ' is the name of the module the method was defined in, or ' + '"None"\n' + ' if unavailable.\n' + '\n' + ' Methods also support accessing (but not setting) the ' + 'arbitrary\n' + ' function attributes on the underlying function object.\n' + '\n' + ' User-defined method objects may be created when getting an\n' + ' attribute of a class (perhaps via an instance of that ' + 'class), if\n' + ' that attribute is a user-defined function object or a ' + 'class\n' + ' method object.\n' + '\n' + ' When an instance method object is created by retrieving a ' + 'user-\n' + ' defined function object from a class via one of its ' + 'instances,\n' + ' its "__self__" attribute is the instance, and the method ' + 'object\n' + ' is said to be bound. The new method\'s "__func__" ' + 'attribute is\n' + ' the original function object.\n' + '\n' + ' When a user-defined method object is created by retrieving\n' + ' another method object from a class or instance, the ' + 'behaviour is\n' + ' the same as for a function object, except that the ' + '"__func__"\n' + ' attribute of the new instance is not the original method ' + 'object\n' + ' but its "__func__" attribute.\n' + '\n' + ' When an instance method object is created by retrieving a ' + 'class\n' + ' method object from a class or instance, its "__self__" ' + 'attribute\n' + ' is the class itself, and its "__func__" attribute is the\n' + ' function object underlying the class method.\n' + '\n' + ' When an instance method object is called, the underlying\n' + ' function ("__func__") is called, inserting the class ' + 'instance\n' + ' ("__self__") in front of the argument list. For instance, ' + 'when\n' + ' "C" is a class which contains a definition for a function ' + '"f()",\n' + ' and "x" is an instance of "C", calling "x.f(1)" is ' + 'equivalent to\n' + ' calling "C.f(x, 1)".\n' + '\n' + ' When an instance method object is derived from a class ' + 'method\n' + ' object, the "class instance" stored in "__self__" will ' + 'actually\n' + ' be the class itself, so that calling either "x.f(1)" or ' + '"C.f(1)"\n' + ' is equivalent to calling "f(C,1)" where "f" is the ' + 'underlying\n' + ' function.\n' + '\n' + ' Note that the transformation from function object to ' + 'instance\n' + ' method object happens each time the attribute is retrieved ' + 'from\n' + ' the instance. In some cases, a fruitful optimization is ' + 'to\n' + ' assign the attribute to a local variable and call that ' + 'local\n' + ' variable. Also notice that this transformation only happens ' + 'for\n' + ' user-defined functions; other callable objects (and all ' + 'non-\n' + ' callable objects) are retrieved without transformation. It ' + 'is\n' + ' also important to note that user-defined functions which ' + 'are\n' + ' attributes of a class instance are not converted to bound\n' + ' methods; this *only* happens when the function is an ' + 'attribute\n' + ' of the class.\n' + '\n' + ' Generator functions\n' + ' A function or method which uses the "yield" statement (see\n' + ' section *The yield statement*) is called a *generator ' + 'function*.\n' + ' Such a function, when called, always returns an iterator ' + 'object\n' + ' which can be used to execute the body of the function: ' + 'calling\n' + ' the iterator\'s "iterator.__next__()" method will cause ' + 'the\n' + ' function to execute until it provides a value using the ' + '"yield"\n' + ' statement. When the function executes a "return" statement ' + 'or\n' + ' falls off the end, a "StopIteration" exception is raised ' + 'and the\n' + ' iterator will have reached the end of the set of values to ' + 'be\n' + ' returned.\n' + '\n' + ' Built-in functions\n' + ' A built-in function object is a wrapper around a C ' + 'function.\n' + ' Examples of built-in functions are "len()" and ' + '"math.sin()"\n' + ' ("math" is a standard built-in module). The number and type ' + 'of\n' + ' the arguments are determined by the C function. Special ' + 'read-\n' + ' only attributes: "__doc__" is the function\'s ' + 'documentation\n' + ' string, or "None" if unavailable; "__name__" is the ' + "function's\n" + ' name; "__self__" is set to "None" (but see the next item);\n' + ' "__module__" is the name of the module the function was ' + 'defined\n' + ' in or "None" if unavailable.\n' + '\n' + ' Built-in methods\n' + ' This is really a different disguise of a built-in function, ' + 'this\n' + ' time containing an object passed to the C function as an\n' + ' implicit extra argument. An example of a built-in method ' + 'is\n' + ' "alist.append()", assuming *alist* is a list object. In ' + 'this\n' + ' case, the special read-only attribute "__self__" is set to ' + 'the\n' + ' object denoted by *alist*.\n' + '\n' + ' Classes\n' + ' Classes are callable. These objects normally act as ' + 'factories\n' + ' for new instances of themselves, but variations are ' + 'possible for\n' + ' class types that override "__new__()". The arguments of ' + 'the\n' + ' call are passed to "__new__()" and, in the typical case, ' + 'to\n' + ' "__init__()" to initialize the new instance.\n' + '\n' + ' Class Instances\n' + ' Instances of arbitrary classes can be made callable by ' + 'defining\n' + ' a "__call__()" method in their class.\n' + '\n' + 'Modules\n' + ' Modules are a basic organizational unit of Python code, and ' + 'are\n' + ' created by the *import system* as invoked either by the ' + '"import"\n' + ' statement (see "import"), or by calling functions such as\n' + ' "importlib.import_module()" and built-in "__import__()". A ' + 'module\n' + ' object has a namespace implemented by a dictionary object ' + '(this is\n' + ' the dictionary referenced by the "__globals__" attribute of\n' + ' functions defined in the module). Attribute references are\n' + ' translated to lookups in this dictionary, e.g., "m.x" is ' + 'equivalent\n' + ' to "m.__dict__["x"]". A module object does not contain the ' + 'code\n' + " object used to initialize the module (since it isn't needed " + 'once\n' + ' the initialization is done).\n' + '\n' + " Attribute assignment updates the module's namespace " + 'dictionary,\n' + ' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n' + '\n' + ' Special read-only attribute: "__dict__" is the module\'s ' + 'namespace\n' + ' as a dictionary object.\n' + '\n' + ' **CPython implementation detail:** Because of the way CPython\n' + ' clears module dictionaries, the module dictionary will be ' + 'cleared\n' + ' when the module falls out of scope even if the dictionary ' + 'still has\n' + ' live references. To avoid this, copy the dictionary or keep ' + 'the\n' + ' module around while using its dictionary directly.\n' + '\n' + ' Predefined (writable) attributes: "__name__" is the module\'s ' + 'name;\n' + ' "__doc__" is the module\'s documentation string, or "None" if\n' + ' unavailable; "__file__" is the pathname of the file from which ' + 'the\n' + ' module was loaded, if it was loaded from a file. The ' + '"__file__"\n' + ' attribute may be missing for certain types of modules, such as ' + 'C\n' + ' modules that are statically linked into the interpreter; for\n' + ' extension modules loaded dynamically from a shared library, it ' + 'is\n' + ' the pathname of the shared library file.\n' + '\n' + 'Custom classes\n' + ' Custom class types are typically created by class definitions ' + '(see\n' + ' section *Class definitions*). A class has a namespace ' + 'implemented\n' + ' by a dictionary object. Class attribute references are ' + 'translated\n' + ' to lookups in this dictionary, e.g., "C.x" is translated to\n' + ' "C.__dict__["x"]" (although there are a number of hooks which ' + 'allow\n' + ' for other means of locating attributes). When the attribute ' + 'name is\n' + ' not found there, the attribute search continues in the base\n' + ' classes. This search of the base classes uses the C3 method\n' + ' resolution order which behaves correctly even in the presence ' + 'of\n' + " 'diamond' inheritance structures where there are multiple\n" + ' inheritance paths leading back to a common ancestor. ' + 'Additional\n' + ' details on the C3 MRO used by Python can be found in the\n' + ' documentation accompanying the 2.3 release at\n' + ' http://www.python.org/download/releases/2.3/mro/.\n' + '\n' + ' When a class attribute reference (for class "C", say) would ' + 'yield a\n' + ' class method object, it is transformed into an instance ' + 'method\n' + ' object whose "__self__" attributes is "C". When it would ' + 'yield a\n' + ' static method object, it is transformed into the object ' + 'wrapped by\n' + ' the static method object. See section *Implementing ' + 'Descriptors*\n' + ' for another way in which attributes retrieved from a class ' + 'may\n' + ' differ from those actually contained in its "__dict__".\n' + '\n' + " Class attribute assignments update the class's dictionary, " + 'never\n' + ' the dictionary of a base class.\n' + '\n' + ' A class object can be called (see above) to yield a class ' + 'instance\n' + ' (see below).\n' + '\n' + ' Special attributes: "__name__" is the class name; "__module__" ' + 'is\n' + ' the module name in which the class was defined; "__dict__" is ' + 'the\n' + ' dictionary containing the class\'s namespace; "__bases__" is a ' + 'tuple\n' + ' (possibly empty or a singleton) containing the base classes, ' + 'in the\n' + ' order of their occurrence in the base class list; "__doc__" is ' + 'the\n' + " class's documentation string, or None if undefined.\n" + '\n' + 'Class instances\n' + ' A class instance is created by calling a class object (see ' + 'above).\n' + ' A class instance has a namespace implemented as a dictionary ' + 'which\n' + ' is the first place in which attribute references are ' + 'searched.\n' + " When an attribute is not found there, and the instance's class " + 'has\n' + ' an attribute by that name, the search continues with the ' + 'class\n' + ' attributes. If a class attribute is found that is a ' + 'user-defined\n' + ' function object, it is transformed into an instance method ' + 'object\n' + ' whose "__self__" attribute is the instance. Static method ' + 'and\n' + ' class method objects are also transformed; see above under\n' + ' "Classes". See section *Implementing Descriptors* for another ' + 'way\n' + ' in which attributes of a class retrieved via its instances ' + 'may\n' + " differ from the objects actually stored in the class's " + '"__dict__".\n' + " If no class attribute is found, and the object's class has a\n" + ' "__getattr__()" method, that is called to satisfy the lookup.\n' + '\n' + " Attribute assignments and deletions update the instance's\n" + " dictionary, never a class's dictionary. If the class has a\n" + ' "__setattr__()" or "__delattr__()" method, this is called ' + 'instead\n' + ' of updating the instance dictionary directly.\n' + '\n' + ' Class instances can pretend to be numbers, sequences, or ' + 'mappings\n' + ' if they have methods with certain special names. See section\n' + ' *Special method names*.\n' + '\n' + ' Special attributes: "__dict__" is the attribute dictionary;\n' + ' "__class__" is the instance\'s class.\n' + '\n' + 'I/O objects (also known as file objects)\n' + ' A *file object* represents an open file. Various shortcuts ' + 'are\n' + ' available to create file objects: the "open()" built-in ' + 'function,\n' + ' and also "os.popen()", "os.fdopen()", and the "makefile()" ' + 'method\n' + ' of socket objects (and perhaps by other functions or methods\n' + ' provided by extension modules).\n' + '\n' + ' The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n' + ' initialized to file objects corresponding to the ' + "interpreter's\n" + ' standard input, output and error streams; they are all open in ' + 'text\n' + ' mode and therefore follow the interface defined by the\n' + ' "io.TextIOBase" abstract class.\n' + '\n' + 'Internal types\n' + ' A few types used internally by the interpreter are exposed to ' + 'the\n' + ' user. Their definitions may change with future versions of ' + 'the\n' + ' interpreter, but they are mentioned here for completeness.\n' + '\n' + ' Code objects\n' + ' Code objects represent *byte-compiled* executable Python ' + 'code,\n' + ' or *bytecode*. The difference between a code object and a\n' + ' function object is that the function object contains an ' + 'explicit\n' + " reference to the function's globals (the module in which it " + 'was\n' + ' defined), while a code object contains no context; also ' + 'the\n' + ' default argument values are stored in the function object, ' + 'not\n' + ' in the code object (because they represent values ' + 'calculated at\n' + ' run-time). Unlike function objects, code objects are ' + 'immutable\n' + ' and contain no references (directly or indirectly) to ' + 'mutable\n' + ' objects.\n' + '\n' + ' Special read-only attributes: "co_name" gives the function ' + 'name;\n' + ' "co_argcount" is the number of positional arguments ' + '(including\n' + ' arguments with default values); "co_nlocals" is the number ' + 'of\n' + ' local variables used by the function (including ' + 'arguments);\n' + ' "co_varnames" is a tuple containing the names of the local\n' + ' variables (starting with the argument names); "co_cellvars" ' + 'is a\n' + ' tuple containing the names of local variables that are\n' + ' referenced by nested functions; "co_freevars" is a tuple\n' + ' containing the names of free variables; "co_code" is a ' + 'string\n' + ' representing the sequence of bytecode instructions; ' + '"co_consts"\n' + ' is a tuple containing the literals used by the bytecode;\n' + ' "co_names" is a tuple containing the names used by the ' + 'bytecode;\n' + ' "co_filename" is the filename from which the code was ' + 'compiled;\n' + ' "co_firstlineno" is the first line number of the function;\n' + ' "co_lnotab" is a string encoding the mapping from bytecode\n' + ' offsets to line numbers (for details see the source code of ' + 'the\n' + ' interpreter); "co_stacksize" is the required stack size\n' + ' (including local variables); "co_flags" is an integer ' + 'encoding a\n' + ' number of flags for the interpreter.\n' + '\n' + ' The following flag bits are defined for "co_flags": bit ' + '"0x04"\n' + ' is set if the function uses the "*arguments" syntax to ' + 'accept an\n' + ' arbitrary number of positional arguments; bit "0x08" is set ' + 'if\n' + ' the function uses the "**keywords" syntax to accept ' + 'arbitrary\n' + ' keyword arguments; bit "0x20" is set if the function is a\n' + ' generator.\n' + '\n' + ' Future feature declarations ("from __future__ import ' + 'division")\n' + ' also use bits in "co_flags" to indicate whether a code ' + 'object\n' + ' was compiled with a particular feature enabled: bit ' + '"0x2000" is\n' + ' set if the function was compiled with future division ' + 'enabled;\n' + ' bits "0x10" and "0x1000" were used in earlier versions of\n' + ' Python.\n' + '\n' + ' Other bits in "co_flags" are reserved for internal use.\n' + '\n' + ' If a code object represents a function, the first item in\n' + ' "co_consts" is the documentation string of the function, ' + 'or\n' + ' "None" if undefined.\n' + '\n' + ' Frame objects\n' + ' Frame objects represent execution frames. They may occur ' + 'in\n' + ' traceback objects (see below).\n' + '\n' + ' Special read-only attributes: "f_back" is to the previous ' + 'stack\n' + ' frame (towards the caller), or "None" if this is the ' + 'bottom\n' + ' stack frame; "f_code" is the code object being executed in ' + 'this\n' + ' frame; "f_locals" is the dictionary used to look up local\n' + ' variables; "f_globals" is used for global variables;\n' + ' "f_builtins" is used for built-in (intrinsic) names; ' + '"f_lasti"\n' + ' gives the precise instruction (this is an index into the\n' + ' bytecode string of the code object).\n' + '\n' + ' Special writable attributes: "f_trace", if not "None", is ' + 'a\n' + ' function called at the start of each source code line (this ' + 'is\n' + ' used by the debugger); "f_lineno" is the current line ' + 'number of\n' + ' the frame --- writing to this from within a trace function ' + 'jumps\n' + ' to the given line (only for the bottom-most frame). A ' + 'debugger\n' + ' can implement a Jump command (aka Set Next Statement) by ' + 'writing\n' + ' to f_lineno.\n' + '\n' + ' Frame objects support one method:\n' + '\n' + ' frame.clear()\n' + '\n' + ' This method clears all references to local variables ' + 'held by\n' + ' the frame. Also, if the frame belonged to a generator, ' + 'the\n' + ' generator is finalized. This helps break reference ' + 'cycles\n' + ' involving frame objects (for example when catching an\n' + ' exception and storing its traceback for later use).\n' + '\n' + ' "RuntimeError" is raised if the frame is currently ' + 'executing.\n' + '\n' + ' New in version 3.4.\n' + '\n' + ' Traceback objects\n' + ' Traceback objects represent a stack trace of an exception. ' + 'A\n' + ' traceback object is created when an exception occurs. When ' + 'the\n' + ' search for an exception handler unwinds the execution ' + 'stack, at\n' + ' each unwound level a traceback object is inserted in front ' + 'of\n' + ' the current traceback. When an exception handler is ' + 'entered,\n' + ' the stack trace is made available to the program. (See ' + 'section\n' + ' *The try statement*.) It is accessible as the third item of ' + 'the\n' + ' tuple returned by "sys.exc_info()". When the program ' + 'contains no\n' + ' suitable handler, the stack trace is written (nicely ' + 'formatted)\n' + ' to the standard error stream; if the interpreter is ' + 'interactive,\n' + ' it is also made available to the user as ' + '"sys.last_traceback".\n' + '\n' + ' Special read-only attributes: "tb_next" is the next level ' + 'in the\n' + ' stack trace (towards the frame where the exception ' + 'occurred), or\n' + ' "None" if there is no next level; "tb_frame" points to the\n' + ' execution frame of the current level; "tb_lineno" gives the ' + 'line\n' + ' number where the exception occurred; "tb_lasti" indicates ' + 'the\n' + ' precise instruction. The line number and last instruction ' + 'in\n' + ' the traceback may differ from the line number of its frame\n' + ' object if the exception occurred in a "try" statement with ' + 'no\n' + ' matching except clause or with a finally clause.\n' + '\n' + ' Slice objects\n' + ' Slice objects are used to represent slices for ' + '"__getitem__()"\n' + ' methods. They are also created by the built-in "slice()"\n' + ' function.\n' + '\n' + ' Special read-only attributes: "start" is the lower bound; ' + '"stop"\n' + ' is the upper bound; "step" is the step value; each is ' + '"None" if\n' + ' omitted. These attributes can have any type.\n' + '\n' + ' Slice objects support one method:\n' + '\n' + ' slice.indices(self, length)\n' + '\n' + ' This method takes a single integer argument *length* ' + 'and\n' + ' computes information about the slice that the slice ' + 'object\n' + ' would describe if applied to a sequence of *length* ' + 'items.\n' + ' It returns a tuple of three integers; respectively these ' + 'are\n' + ' the *start* and *stop* indices and the *step* or stride\n' + ' length of the slice. Missing or out-of-bounds indices ' + 'are\n' + ' handled in a manner consistent with regular slices.\n' + '\n' + ' Static method objects\n' + ' Static method objects provide a way of defeating the\n' + ' transformation of function objects to method objects ' + 'described\n' + ' above. A static method object is a wrapper around any ' + 'other\n' + ' object, usually a user-defined method object. When a ' + 'static\n' + ' method object is retrieved from a class or a class ' + 'instance, the\n' + ' object actually returned is the wrapped object, which is ' + 'not\n' + ' subject to any further transformation. Static method ' + 'objects are\n' + ' not themselves callable, although the objects they wrap ' + 'usually\n' + ' are. Static method objects are created by the built-in\n' + ' "staticmethod()" constructor.\n' + '\n' + ' Class method objects\n' + ' A class method object, like a static method object, is a ' + 'wrapper\n' + ' around another object that alters the way in which that ' + 'object\n' + ' is retrieved from classes and class instances. The ' + 'behaviour of\n' + ' class method objects upon such retrieval is described ' + 'above,\n' + ' under "User-defined methods". Class method objects are ' + 'created\n' + ' by the built-in "classmethod()" constructor.\n', + 'typesfunctions': '\n' + 'Functions\n' + '*********\n' + '\n' + 'Function objects are created by function definitions. ' + 'The only\n' + 'operation on a function object is to call it: ' + '"func(argument-list)".\n' + '\n' + 'There are really two flavors of function objects: ' + 'built-in functions\n' + 'and user-defined functions. Both support the same ' + 'operation (to call\n' + 'the function), but the implementation is different, ' + 'hence the\n' + 'different object types.\n' + '\n' + 'See *Function definitions* for more information.\n', + 'typesmapping': '\n' + 'Mapping Types --- "dict"\n' + '************************\n' + '\n' + 'A *mapping* object maps *hashable* values to arbitrary ' + 'objects.\n' + 'Mappings are mutable objects. There is currently only one ' + 'standard\n' + 'mapping type, the *dictionary*. (For other containers see ' + 'the built-\n' + 'in "list", "set", and "tuple" classes, and the ' + '"collections" module.)\n' + '\n' + "A dictionary's keys are *almost* arbitrary values. Values " + 'that are\n' + 'not *hashable*, that is, values containing lists, ' + 'dictionaries or\n' + 'other mutable types (that are compared by value rather ' + 'than by object\n' + 'identity) may not be used as keys. Numeric types used for ' + 'keys obey\n' + 'the normal rules for numeric comparison: if two numbers ' + 'compare equal\n' + '(such as "1" and "1.0") then they can be used ' + 'interchangeably to index\n' + 'the same dictionary entry. (Note however, that since ' + 'computers store\n' + 'floating-point numbers as approximations it is usually ' + 'unwise to use\n' + 'them as dictionary keys.)\n' + '\n' + 'Dictionaries can be created by placing a comma-separated ' + 'list of "key:\n' + 'value" pairs within braces, for example: "{\'jack\': 4098, ' + "'sjoerd':\n" + '4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the ' + '"dict"\n' + 'constructor.\n' + '\n' + 'class class dict(**kwarg)\n' + 'class class dict(mapping, **kwarg)\n' + 'class class dict(iterable, **kwarg)\n' + '\n' + ' Return a new dictionary initialized from an optional ' + 'positional\n' + ' argument and a possibly empty set of keyword ' + 'arguments.\n' + '\n' + ' If no positional argument is given, an empty dictionary ' + 'is created.\n' + ' If a positional argument is given and it is a mapping ' + 'object, a\n' + ' dictionary is created with the same key-value pairs as ' + 'the mapping\n' + ' object. Otherwise, the positional argument must be an ' + '*iterable*\n' + ' object. Each item in the iterable must itself be an ' + 'iterable with\n' + ' exactly two objects. The first object of each item ' + 'becomes a key\n' + ' in the new dictionary, and the second object the ' + 'corresponding\n' + ' value. If a key occurs more than once, the last value ' + 'for that key\n' + ' becomes the corresponding value in the new dictionary.\n' + '\n' + ' If keyword arguments are given, the keyword arguments ' + 'and their\n' + ' values are added to the dictionary created from the ' + 'positional\n' + ' argument. If a key being added is already present, the ' + 'value from\n' + ' the keyword argument replaces the value from the ' + 'positional\n' + ' argument.\n' + '\n' + ' To illustrate, the following examples all return a ' + 'dictionary equal\n' + ' to "{"one": 1, "two": 2, "three": 3}":\n' + '\n' + ' >>> a = dict(one=1, two=2, three=3)\n' + " >>> b = {'one': 1, 'two': 2, 'three': 3}\n" + " >>> c = dict(zip(['one', 'two', 'three'], [1, 2, " + '3]))\n' + " >>> d = dict([('two', 2), ('one', 1), ('three', " + '3)])\n' + " >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n" + ' >>> a == b == c == d == e\n' + ' True\n' + '\n' + ' Providing keyword arguments as in the first example ' + 'only works for\n' + ' keys that are valid Python identifiers. Otherwise, any ' + 'valid keys\n' + ' can be used.\n' + '\n' + ' These are the operations that dictionaries support (and ' + 'therefore,\n' + ' custom mapping types should support too):\n' + '\n' + ' len(d)\n' + '\n' + ' Return the number of items in the dictionary *d*.\n' + '\n' + ' d[key]\n' + '\n' + ' Return the item of *d* with key *key*. Raises a ' + '"KeyError" if\n' + ' *key* is not in the map.\n' + '\n' + ' If a subclass of dict defines a method ' + '"__missing__()", if the\n' + ' key *key* is not present, the "d[key]" operation ' + 'calls that\n' + ' method with the key *key* as argument. The "d[key]" ' + 'operation\n' + ' then returns or raises whatever is returned or ' + 'raised by the\n' + ' "__missing__(key)" call if the key is not present. ' + 'No other\n' + ' operations or methods invoke "__missing__()". If ' + '"__missing__()"\n' + ' is not defined, "KeyError" is raised. ' + '"__missing__()" must be a\n' + ' method; it cannot be an instance variable:\n' + '\n' + ' >>> class Counter(dict):\n' + ' ... def __missing__(self, key):\n' + ' ... return 0\n' + ' >>> c = Counter()\n' + " >>> c['red']\n" + ' 0\n' + " >>> c['red'] += 1\n" + " >>> c['red']\n" + ' 1\n' + '\n' + ' See "collections.Counter" for a complete ' + 'implementation\n' + ' including other methods helpful for accumulating and ' + 'managing\n' + ' tallies.\n' + '\n' + ' d[key] = value\n' + '\n' + ' Set "d[key]" to *value*.\n' + '\n' + ' del d[key]\n' + '\n' + ' Remove "d[key]" from *d*. Raises a "KeyError" if ' + '*key* is not\n' + ' in the map.\n' + '\n' + ' key in d\n' + '\n' + ' Return "True" if *d* has a key *key*, else "False".\n' + '\n' + ' key not in d\n' + '\n' + ' Equivalent to "not key in d".\n' + '\n' + ' iter(d)\n' + '\n' + ' Return an iterator over the keys of the dictionary. ' + 'This is a\n' + ' shortcut for "iter(d.keys())".\n' + '\n' + ' clear()\n' + '\n' + ' Remove all items from the dictionary.\n' + '\n' + ' copy()\n' + '\n' + ' Return a shallow copy of the dictionary.\n' + '\n' + ' classmethod fromkeys(seq[, value])\n' + '\n' + ' Create a new dictionary with keys from *seq* and ' + 'values set to\n' + ' *value*.\n' + '\n' + ' "fromkeys()" is a class method that returns a new ' + 'dictionary.\n' + ' *value* defaults to "None".\n' + '\n' + ' get(key[, default])\n' + '\n' + ' Return the value for *key* if *key* is in the ' + 'dictionary, else\n' + ' *default*. If *default* is not given, it defaults to ' + '"None", so\n' + ' that this method never raises a "KeyError".\n' + '\n' + ' items()\n' + '\n' + ' Return a new view of the dictionary\'s items ("(key, ' + 'value)"\n' + ' pairs). See the *documentation of view objects*.\n' + '\n' + ' keys()\n' + '\n' + " Return a new view of the dictionary's keys. See " + 'the\n' + ' *documentation of view objects*.\n' + '\n' + ' pop(key[, default])\n' + '\n' + ' If *key* is in the dictionary, remove it and return ' + 'its value,\n' + ' else return *default*. If *default* is not given ' + 'and *key* is\n' + ' not in the dictionary, a "KeyError" is raised.\n' + '\n' + ' popitem()\n' + '\n' + ' Remove and return an arbitrary "(key, value)" pair ' + 'from the\n' + ' dictionary.\n' + '\n' + ' "popitem()" is useful to destructively iterate over ' + 'a\n' + ' dictionary, as often used in set algorithms. If the ' + 'dictionary\n' + ' is empty, calling "popitem()" raises a "KeyError".\n' + '\n' + ' setdefault(key[, default])\n' + '\n' + ' If *key* is in the dictionary, return its value. If ' + 'not, insert\n' + ' *key* with a value of *default* and return ' + '*default*. *default*\n' + ' defaults to "None".\n' + '\n' + ' update([other])\n' + '\n' + ' Update the dictionary with the key/value pairs from ' + '*other*,\n' + ' overwriting existing keys. Return "None".\n' + '\n' + ' "update()" accepts either another dictionary object ' + 'or an\n' + ' iterable of key/value pairs (as tuples or other ' + 'iterables of\n' + ' length two). If keyword arguments are specified, ' + 'the dictionary\n' + ' is then updated with those key/value pairs: ' + '"d.update(red=1,\n' + ' blue=2)".\n' + '\n' + ' values()\n' + '\n' + " Return a new view of the dictionary's values. See " + 'the\n' + ' *documentation of view objects*.\n' + '\n' + 'See also: "types.MappingProxyType" can be used to create a ' + 'read-only\n' + ' view of a "dict".\n' + '\n' + '\n' + 'Dictionary view objects\n' + '=======================\n' + '\n' + 'The objects returned by "dict.keys()", "dict.values()" ' + 'and\n' + '"dict.items()" are *view objects*. They provide a dynamic ' + 'view on the\n' + "dictionary's entries, which means that when the dictionary " + 'changes,\n' + 'the view reflects these changes.\n' + '\n' + 'Dictionary views can be iterated over to yield their ' + 'respective data,\n' + 'and support membership tests:\n' + '\n' + 'len(dictview)\n' + '\n' + ' Return the number of entries in the dictionary.\n' + '\n' + 'iter(dictview)\n' + '\n' + ' Return an iterator over the keys, values or items ' + '(represented as\n' + ' tuples of "(key, value)") in the dictionary.\n' + '\n' + ' Keys and values are iterated over in an arbitrary order ' + 'which is\n' + ' non-random, varies across Python implementations, and ' + 'depends on\n' + " the dictionary's history of insertions and deletions. " + 'If keys,\n' + ' values and items views are iterated over with no ' + 'intervening\n' + ' modifications to the dictionary, the order of items ' + 'will directly\n' + ' correspond. This allows the creation of "(value, key)" ' + 'pairs using\n' + ' "zip()": "pairs = zip(d.values(), d.keys())". Another ' + 'way to\n' + ' create the same list is "pairs = [(v, k) for (k, v) in ' + 'd.items()]".\n' + '\n' + ' Iterating views while adding or deleting entries in the ' + 'dictionary\n' + ' may raise a "RuntimeError" or fail to iterate over all ' + 'entries.\n' + '\n' + 'x in dictview\n' + '\n' + ' Return "True" if *x* is in the underlying dictionary\'s ' + 'keys, values\n' + ' or items (in the latter case, *x* should be a "(key, ' + 'value)"\n' + ' tuple).\n' + '\n' + 'Keys views are set-like since their entries are unique and ' + 'hashable.\n' + 'If all values are hashable, so that "(key, value)" pairs ' + 'are unique\n' + 'and hashable, then the items view is also set-like. ' + '(Values views are\n' + 'not treated as set-like since the entries are generally ' + 'not unique.)\n' + 'For set-like views, all of the operations defined for the ' + 'abstract\n' + 'base class "collections.abc.Set" are available (for ' + 'example, "==",\n' + '"<", or "^").\n' + '\n' + 'An example of dictionary view usage:\n' + '\n' + " >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, " + "'spam': 500}\n" + ' >>> keys = dishes.keys()\n' + ' >>> values = dishes.values()\n' + '\n' + ' >>> # iteration\n' + ' >>> n = 0\n' + ' >>> for val in values:\n' + ' ... n += val\n' + ' >>> print(n)\n' + ' 504\n' + '\n' + ' >>> # keys and values are iterated over in the same ' + 'order\n' + ' >>> list(keys)\n' + " ['eggs', 'bacon', 'sausage', 'spam']\n" + ' >>> list(values)\n' + ' [2, 1, 1, 500]\n' + '\n' + ' >>> # view objects are dynamic and reflect dict ' + 'changes\n' + " >>> del dishes['eggs']\n" + " >>> del dishes['sausage']\n" + ' >>> list(keys)\n' + " ['spam', 'bacon']\n" + '\n' + ' >>> # set operations\n' + " >>> keys & {'eggs', 'bacon', 'salad'}\n" + " {'bacon'}\n" + " >>> keys ^ {'sausage', 'juice'}\n" + " {'juice', 'sausage', 'bacon', 'spam'}\n", + 'typesmethods': '\n' + 'Methods\n' + '*******\n' + '\n' + 'Methods are functions that are called using the attribute ' + 'notation.\n' + 'There are two flavors: built-in methods (such as ' + '"append()" on lists)\n' + 'and class instance methods. Built-in methods are ' + 'described with the\n' + 'types that support them.\n' + '\n' + 'If you access a method (a function defined in a class ' + 'namespace)\n' + 'through an instance, you get a special object: a *bound ' + 'method* (also\n' + 'called *instance method*) object. When called, it will add ' + 'the "self"\n' + 'argument to the argument list. Bound methods have two ' + 'special read-\n' + 'only attributes: "m.__self__" is the object on which the ' + 'method\n' + 'operates, and "m.__func__" is the function implementing ' + 'the method.\n' + 'Calling "m(arg-1, arg-2, ..., arg-n)" is completely ' + 'equivalent to\n' + 'calling "m.__func__(m.__self__, arg-1, arg-2, ..., ' + 'arg-n)".\n' + '\n' + 'Like function objects, bound method objects support ' + 'getting arbitrary\n' + 'attributes. However, since method attributes are actually ' + 'stored on\n' + 'the underlying function object ("meth.__func__"), setting ' + 'method\n' + 'attributes on bound methods is disallowed. Attempting to ' + 'set an\n' + 'attribute on a method results in an "AttributeError" being ' + 'raised. In\n' + 'order to set a method attribute, you need to explicitly ' + 'set it on the\n' + 'underlying function object:\n' + '\n' + ' >>> class C:\n' + ' ... def method(self):\n' + ' ... pass\n' + ' ...\n' + ' >>> c = C()\n' + " >>> c.method.whoami = 'my name is method' # can't set " + 'on the method\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " AttributeError: 'method' object has no attribute " + "'whoami'\n" + " >>> c.method.__func__.whoami = 'my name is method'\n" + ' >>> c.method.whoami\n' + " 'my name is method'\n" + '\n' + 'See *The standard type hierarchy* for more information.\n', + 'typesmodules': '\n' + 'Modules\n' + '*******\n' + '\n' + 'The only special operation on a module is attribute ' + 'access: "m.name",\n' + 'where *m* is a module and *name* accesses a name defined ' + "in *m*'s\n" + 'symbol table. Module attributes can be assigned to. (Note ' + 'that the\n' + '"import" statement is not, strictly speaking, an operation ' + 'on a module\n' + 'object; "import foo" does not require a module object ' + 'named *foo* to\n' + 'exist, rather it requires an (external) *definition* for a ' + 'module\n' + 'named *foo* somewhere.)\n' + '\n' + 'A special attribute of every module is "__dict__". This is ' + 'the\n' + "dictionary containing the module's symbol table. Modifying " + 'this\n' + "dictionary will actually change the module's symbol table, " + 'but direct\n' + 'assignment to the "__dict__" attribute is not possible ' + '(you can write\n' + '"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", ' + "but you can't\n" + 'write "m.__dict__ = {}"). Modifying "__dict__" directly ' + 'is not\n' + 'recommended.\n' + '\n' + 'Modules built into the interpreter are written like this: ' + '"". If loaded from a file, they are ' + 'written as\n' + '"".\n', + 'typesseq': '\n' + 'Sequence Types --- "list", "tuple", "range"\n' + '*******************************************\n' + '\n' + 'There are three basic sequence types: lists, tuples, and ' + 'range\n' + 'objects. Additional sequence types tailored for processing of ' + '*binary\n' + 'data* and *text strings* are described in dedicated sections.\n' + '\n' + '\n' + 'Common Sequence Operations\n' + '==========================\n' + '\n' + 'The operations in the following table are supported by most ' + 'sequence\n' + 'types, both mutable and immutable. The ' + '"collections.abc.Sequence" ABC\n' + 'is provided to make it easier to correctly implement these ' + 'operations\n' + 'on custom sequence types.\n' + '\n' + 'This table lists the sequence operations sorted in ascending ' + 'priority\n' + '(operations in the same box have the same priority). In the ' + 'table,\n' + '*s* and *t* are sequences of the same type, *n*, *i*, *j* and ' + '*k* are\n' + 'integers and *x* is an arbitrary object that meets any type ' + 'and value\n' + 'restrictions imposed by *s*.\n' + '\n' + 'The "in" and "not in" operations have the same priorities as ' + 'the\n' + 'comparison operations. The "+" (concatenation) and "*" ' + '(repetition)\n' + 'operations have the same priority as the corresponding ' + 'numeric\n' + 'operations.\n' + '\n' + '+----------------------------+----------------------------------+------------+\n' + '| Operation | ' + 'Result | Notes |\n' + '+============================+==================================+============+\n' + '| "x in s" | "True" if an item of *s* ' + 'is | (1) |\n' + '| | equal to *x*, else ' + '"False" | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "x not in s" | "False" if an item of *s* ' + 'is | (1) |\n' + '| | equal to *x*, else ' + '"True" | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s + t" | the concatenation of *s* and ' + '*t* | (6)(7) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s * n" or "n * s" | *n* shallow copies of ' + '*s* | (2)(7) |\n' + '| | ' + 'concatenated | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i]" | *i*th item of *s*, origin ' + '0 | (3) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i:j]" | slice of *s* from *i* to ' + '*j* | (3)(4) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i:j:k]" | slice of *s* from *i* to ' + '*j* | (3)(5) |\n' + '| | with step ' + '*k* | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "len(s)" | length of ' + '*s* | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "min(s)" | smallest item of ' + '*s* | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "max(s)" | largest item of ' + '*s* | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s.index(x[, i[, j]])" | index of the first occurrence ' + 'of | (8) |\n' + '| | *x* in *s* (at or after ' + 'index | |\n' + '| | *i* and before index ' + '*j*) | |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s.count(x)" | total number of occurrences ' + 'of | |\n' + '| | *x* in ' + '*s* | |\n' + '+----------------------------+----------------------------------+------------+\n' + '\n' + 'Sequences of the same type also support comparisons. In ' + 'particular,\n' + 'tuples and lists are compared lexicographically by comparing\n' + 'corresponding elements. This means that to compare equal, ' + 'every\n' + 'element must compare equal and the two sequences must be of ' + 'the same\n' + 'type and have the same length. (For full details see ' + '*Comparisons* in\n' + 'the language reference.)\n' + '\n' + 'Notes:\n' + '\n' + '1. While the "in" and "not in" operations are used only for ' + 'simple\n' + ' containment testing in the general case, some specialised ' + 'sequences\n' + ' (such as "str", "bytes" and "bytearray") also use them for\n' + ' subsequence testing:\n' + '\n' + ' >>> "gg" in "eggs"\n' + ' True\n' + '\n' + '2. Values of *n* less than "0" are treated as "0" (which ' + 'yields an\n' + ' empty sequence of the same type as *s*). Note also that ' + 'the copies\n' + ' are shallow; nested structures are not copied. This often ' + 'haunts\n' + ' new Python programmers; consider:\n' + '\n' + ' >>> lists = [[]] * 3\n' + ' >>> lists\n' + ' [[], [], []]\n' + ' >>> lists[0].append(3)\n' + ' >>> lists\n' + ' [[3], [3], [3]]\n' + '\n' + ' What has happened is that "[[]]" is a one-element list ' + 'containing\n' + ' an empty list, so all three elements of "[[]] * 3" are ' + '(pointers\n' + ' to) this single empty list. Modifying any of the elements ' + 'of\n' + ' "lists" modifies this single list. You can create a list ' + 'of\n' + ' different lists this way:\n' + '\n' + ' >>> lists = [[] for i in range(3)]\n' + ' >>> lists[0].append(3)\n' + ' >>> lists[1].append(5)\n' + ' >>> lists[2].append(7)\n' + ' >>> lists\n' + ' [[3], [5], [7]]\n' + '\n' + '3. If *i* or *j* is negative, the index is relative to the end ' + 'of\n' + ' the string: "len(s) + i" or "len(s) + j" is substituted. ' + 'But note\n' + ' that "-0" is still "0".\n' + '\n' + '4. The slice of *s* from *i* to *j* is defined as the sequence ' + 'of\n' + ' items with index *k* such that "i <= k < j". If *i* or *j* ' + 'is\n' + ' greater than "len(s)", use "len(s)". If *i* is omitted or ' + '"None",\n' + ' use "0". If *j* is omitted or "None", use "len(s)". If ' + '*i* is\n' + ' greater than or equal to *j*, the slice is empty.\n' + '\n' + '5. The slice of *s* from *i* to *j* with step *k* is defined ' + 'as the\n' + ' sequence of items with index "x = i + n*k" such that "0 <= ' + 'n <\n' + ' (j-i)/k". In other words, the indices are "i", "i+k", ' + '"i+2*k",\n' + ' "i+3*k" and so on, stopping when *j* is reached (but never\n' + ' including *j*). If *i* or *j* is greater than "len(s)", ' + 'use\n' + ' "len(s)". If *i* or *j* are omitted or "None", they become ' + '"end"\n' + ' values (which end depends on the sign of *k*). Note, *k* ' + 'cannot be\n' + ' zero. If *k* is "None", it is treated like "1".\n' + '\n' + '6. Concatenating immutable sequences always results in a new\n' + ' object. This means that building up a sequence by repeated\n' + ' concatenation will have a quadratic runtime cost in the ' + 'total\n' + ' sequence length. To get a linear runtime cost, you must ' + 'switch to\n' + ' one of the alternatives below:\n' + '\n' + ' * if concatenating "str" objects, you can build a list and ' + 'use\n' + ' "str.join()" at the end or else write to a "io.StringIO" ' + 'instance\n' + ' and retrieve its value when complete\n' + '\n' + ' * if concatenating "bytes" objects, you can similarly use\n' + ' "bytes.join()" or "io.BytesIO", or you can do in-place\n' + ' concatenation with a "bytearray" object. "bytearray" ' + 'objects are\n' + ' mutable and have an efficient overallocation mechanism\n' + '\n' + ' * if concatenating "tuple" objects, extend a "list" ' + 'instead\n' + '\n' + ' * for other types, investigate the relevant class ' + 'documentation\n' + '\n' + '7. Some sequence types (such as "range") only support item\n' + " sequences that follow specific patterns, and hence don't " + 'support\n' + ' sequence concatenation or repetition.\n' + '\n' + '8. "index" raises "ValueError" when *x* is not found in *s*. ' + 'When\n' + ' supported, the additional arguments to the index method ' + 'allow\n' + ' efficient searching of subsections of the sequence. Passing ' + 'the\n' + ' extra arguments is roughly equivalent to using ' + '"s[i:j].index(x)",\n' + ' only without copying any data and with the returned index ' + 'being\n' + ' relative to the start of the sequence rather than the start ' + 'of the\n' + ' slice.\n' + '\n' + '\n' + 'Immutable Sequence Types\n' + '========================\n' + '\n' + 'The only operation that immutable sequence types generally ' + 'implement\n' + 'that is not also implemented by mutable sequence types is ' + 'support for\n' + 'the "hash()" built-in.\n' + '\n' + 'This support allows immutable sequences, such as "tuple" ' + 'instances, to\n' + 'be used as "dict" keys and stored in "set" and "frozenset" ' + 'instances.\n' + '\n' + 'Attempting to hash an immutable sequence that contains ' + 'unhashable\n' + 'values will result in "TypeError".\n' + '\n' + '\n' + 'Mutable Sequence Types\n' + '======================\n' + '\n' + 'The operations in the following table are defined on mutable ' + 'sequence\n' + 'types. The "collections.abc.MutableSequence" ABC is provided ' + 'to make\n' + 'it easier to correctly implement these operations on custom ' + 'sequence\n' + 'types.\n' + '\n' + 'In the table *s* is an instance of a mutable sequence type, ' + '*t* is any\n' + 'iterable object and *x* is an arbitrary object that meets any ' + 'type and\n' + 'value restrictions imposed by *s* (for example, "bytearray" ' + 'only\n' + 'accepts integers that meet the value restriction "0 <= x <= ' + '255").\n' + '\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| Operation | ' + 'Result | Notes |\n' + '+================================+==================================+=======================+\n' + '| "s[i] = x" | item *i* of *s* is replaced ' + 'by | |\n' + '| | ' + '*x* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j] = t" | slice of *s* from *i* to ' + '*j* is | |\n' + '| | replaced by the contents of ' + 'the | |\n' + '| | iterable ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j]" | same as "s[i:j] = ' + '[]" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j:k] = t" | the elements of "s[i:j:k]" ' + 'are | (1) |\n' + '| | replaced by those of ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j:k]" | removes the elements ' + 'of | |\n' + '| | "s[i:j:k]" from the ' + 'list | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.append(x)" | appends *x* to the end of ' + 'the | |\n' + '| | sequence (same ' + 'as | |\n' + '| | "s[len(s):len(s)] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.clear()" | removes all items from "s" ' + '(same | (5) |\n' + '| | as "del ' + 's[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.copy()" | creates a shallow copy of ' + '"s" | (5) |\n' + '| | (same as ' + '"s[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.extend(t)" | extends *s* with the ' + 'contents of | |\n' + '| | *t* (same as ' + '"s[len(s):len(s)] = | |\n' + '| | ' + 't") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.insert(i, x)" | inserts *x* into *s* at ' + 'the | |\n' + '| | index given by *i* (same ' + 'as | |\n' + '| | "s[i:i] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.pop([i])" | retrieves the item at *i* ' + 'and | (2) |\n' + '| | also removes it from ' + '*s* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.remove(x)" | remove the first item from ' + '*s* | (3) |\n' + '| | where "s[i] == ' + 'x" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.reverse()" | reverses the items of *s* ' + 'in | (4) |\n' + '| | ' + 'place | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '\n' + 'Notes:\n' + '\n' + '1. *t* must have the same length as the slice it is ' + 'replacing.\n' + '\n' + '2. The optional argument *i* defaults to "-1", so that by ' + 'default\n' + ' the last item is removed and returned.\n' + '\n' + '3. "remove" raises "ValueError" when *x* is not found in *s*.\n' + '\n' + '4. The "reverse()" method modifies the sequence in place for\n' + ' economy of space when reversing a large sequence. To ' + 'remind users\n' + ' that it operates by side effect, it does not return the ' + 'reversed\n' + ' sequence.\n' + '\n' + '5. "clear()" and "copy()" are included for consistency with ' + 'the\n' + " interfaces of mutable containers that don't support " + 'slicing\n' + ' operations (such as "dict" and "set")\n' + '\n' + ' New in version 3.3: "clear()" and "copy()" methods.\n' + '\n' + '\n' + 'Lists\n' + '=====\n' + '\n' + 'Lists are mutable sequences, typically used to store ' + 'collections of\n' + 'homogeneous items (where the precise degree of similarity will ' + 'vary by\n' + 'application).\n' + '\n' + 'class class list([iterable])\n' + '\n' + ' Lists may be constructed in several ways:\n' + '\n' + ' * Using a pair of square brackets to denote the empty list: ' + '"[]"\n' + '\n' + ' * Using square brackets, separating items with commas: ' + '"[a]",\n' + ' "[a, b, c]"\n' + '\n' + ' * Using a list comprehension: "[x for x in iterable]"\n' + '\n' + ' * Using the type constructor: "list()" or "list(iterable)"\n' + '\n' + ' The constructor builds a list whose items are the same and ' + 'in the\n' + " same order as *iterable*'s items. *iterable* may be either " + 'a\n' + ' sequence, a container that supports iteration, or an ' + 'iterator\n' + ' object. If *iterable* is already a list, a copy is made ' + 'and\n' + ' returned, similar to "iterable[:]". For example, ' + '"list(\'abc\')"\n' + ' returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" ' + 'returns "[1, 2,\n' + ' 3]". If no argument is given, the constructor creates a new ' + 'empty\n' + ' list, "[]".\n' + '\n' + ' Many other operations also produce lists, including the ' + '"sorted()"\n' + ' built-in.\n' + '\n' + ' Lists implement all of the *common* and *mutable* sequence\n' + ' operations. Lists also provide the following additional ' + 'method:\n' + '\n' + ' sort(*, key=None, reverse=None)\n' + '\n' + ' This method sorts the list in place, using only "<" ' + 'comparisons\n' + ' between items. Exceptions are not suppressed - if any ' + 'comparison\n' + ' operations fail, the entire sort operation will fail ' + '(and the\n' + ' list will likely be left in a partially modified ' + 'state).\n' + '\n' + ' "sort()" accepts two arguments that can only be passed ' + 'by\n' + ' keyword (*keyword-only arguments*):\n' + '\n' + ' *key* specifies a function of one argument that is used ' + 'to\n' + ' extract a comparison key from each list element (for ' + 'example,\n' + ' "key=str.lower"). The key corresponding to each item in ' + 'the list\n' + ' is calculated once and then used for the entire sorting ' + 'process.\n' + ' The default value of "None" means that list items are ' + 'sorted\n' + ' directly without calculating a separate key value.\n' + '\n' + ' The "functools.cmp_to_key()" utility is available to ' + 'convert a\n' + ' 2.x style *cmp* function to a *key* function.\n' + '\n' + ' *reverse* is a boolean value. If set to "True", then ' + 'the list\n' + ' elements are sorted as if each comparison were ' + 'reversed.\n' + '\n' + ' This method modifies the sequence in place for economy ' + 'of space\n' + ' when sorting a large sequence. To remind users that it ' + 'operates\n' + ' by side effect, it does not return the sorted sequence ' + '(use\n' + ' "sorted()" to explicitly request a new sorted list ' + 'instance).\n' + '\n' + ' The "sort()" method is guaranteed to be stable. A sort ' + 'is\n' + ' stable if it guarantees not to change the relative order ' + 'of\n' + ' elements that compare equal --- this is helpful for ' + 'sorting in\n' + ' multiple passes (for example, sort by department, then ' + 'by salary\n' + ' grade).\n' + '\n' + ' **CPython implementation detail:** While a list is being ' + 'sorted,\n' + ' the effect of attempting to mutate, or even inspect, the ' + 'list is\n' + ' undefined. The C implementation of Python makes the ' + 'list appear\n' + ' empty for the duration, and raises "ValueError" if it ' + 'can detect\n' + ' that the list has been mutated during a sort.\n' + '\n' + '\n' + 'Tuples\n' + '======\n' + '\n' + 'Tuples are immutable sequences, typically used to store ' + 'collections of\n' + 'heterogeneous data (such as the 2-tuples produced by the ' + '"enumerate()"\n' + 'built-in). Tuples are also used for cases where an immutable ' + 'sequence\n' + 'of homogeneous data is needed (such as allowing storage in a ' + '"set" or\n' + '"dict" instance).\n' + '\n' + 'class class tuple([iterable])\n' + '\n' + ' Tuples may be constructed in a number of ways:\n' + '\n' + ' * Using a pair of parentheses to denote the empty tuple: ' + '"()"\n' + '\n' + ' * Using a trailing comma for a singleton tuple: "a," or ' + '"(a,)"\n' + '\n' + ' * Separating items with commas: "a, b, c" or "(a, b, c)"\n' + '\n' + ' * Using the "tuple()" built-in: "tuple()" or ' + '"tuple(iterable)"\n' + '\n' + ' The constructor builds a tuple whose items are the same and ' + 'in the\n' + " same order as *iterable*'s items. *iterable* may be either " + 'a\n' + ' sequence, a container that supports iteration, or an ' + 'iterator\n' + ' object. If *iterable* is already a tuple, it is returned\n' + ' unchanged. For example, "tuple(\'abc\')" returns "(\'a\', ' + '\'b\', \'c\')"\n' + ' and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no ' + 'argument is\n' + ' given, the constructor creates a new empty tuple, "()".\n' + '\n' + ' Note that it is actually the comma which makes a tuple, not ' + 'the\n' + ' parentheses. The parentheses are optional, except in the ' + 'empty\n' + ' tuple case, or when they are needed to avoid syntactic ' + 'ambiguity.\n' + ' For example, "f(a, b, c)" is a function call with three ' + 'arguments,\n' + ' while "f((a, b, c))" is a function call with a 3-tuple as ' + 'the sole\n' + ' argument.\n' + '\n' + ' Tuples implement all of the *common* sequence operations.\n' + '\n' + 'For heterogeneous collections of data where access by name is ' + 'clearer\n' + 'than access by index, "collections.namedtuple()" may be a ' + 'more\n' + 'appropriate choice than a simple tuple object.\n' + '\n' + '\n' + 'Ranges\n' + '======\n' + '\n' + 'The "range" type represents an immutable sequence of numbers ' + 'and is\n' + 'commonly used for looping a specific number of times in "for" ' + 'loops.\n' + '\n' + 'class class range(stop)\n' + 'class class range(start, stop[, step])\n' + '\n' + ' The arguments to the range constructor must be integers ' + '(either\n' + ' built-in "int" or any object that implements the ' + '"__index__"\n' + ' special method). If the *step* argument is omitted, it ' + 'defaults to\n' + ' "1". If the *start* argument is omitted, it defaults to ' + '"0". If\n' + ' *step* is zero, "ValueError" is raised.\n' + '\n' + ' For a positive *step*, the contents of a range "r" are ' + 'determined\n' + ' by the formula "r[i] = start + step*i" where "i >= 0" and ' + '"r[i] <\n' + ' stop".\n' + '\n' + ' For a negative *step*, the contents of the range are still\n' + ' determined by the formula "r[i] = start + step*i", but the\n' + ' constraints are "i >= 0" and "r[i] > stop".\n' + '\n' + ' A range object will be empty if "r[0]" does not meet the ' + 'value\n' + ' constraint. Ranges do support negative indices, but these ' + 'are\n' + ' interpreted as indexing from the end of the sequence ' + 'determined by\n' + ' the positive indices.\n' + '\n' + ' Ranges containing absolute values larger than "sys.maxsize" ' + 'are\n' + ' permitted but some features (such as "len()") may raise\n' + ' "OverflowError".\n' + '\n' + ' Range examples:\n' + '\n' + ' >>> list(range(10))\n' + ' [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n' + ' >>> list(range(1, 11))\n' + ' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n' + ' >>> list(range(0, 30, 5))\n' + ' [0, 5, 10, 15, 20, 25]\n' + ' >>> list(range(0, 10, 3))\n' + ' [0, 3, 6, 9]\n' + ' >>> list(range(0, -10, -1))\n' + ' [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n' + ' >>> list(range(0))\n' + ' []\n' + ' >>> list(range(1, 0))\n' + ' []\n' + '\n' + ' Ranges implement all of the *common* sequence operations ' + 'except\n' + ' concatenation and repetition (due to the fact that range ' + 'objects\n' + ' can only represent sequences that follow a strict pattern ' + 'and\n' + ' repetition and concatenation will usually violate that ' + 'pattern).\n' + '\n' + 'The advantage of the "range" type over a regular "list" or ' + '"tuple" is\n' + 'that a "range" object will always take the same (small) amount ' + 'of\n' + 'memory, no matter the size of the range it represents (as it ' + 'only\n' + 'stores the "start", "stop" and "step" values, calculating ' + 'individual\n' + 'items and subranges as needed).\n' + '\n' + 'Range objects implement the "collections.abc.Sequence" ABC, ' + 'and\n' + 'provide features such as containment tests, element index ' + 'lookup,\n' + 'slicing and support for negative indices (see *Sequence Types ' + '---\n' + 'list, tuple, range*):\n' + '\n' + '>>> r = range(0, 20, 2)\n' + '>>> r\n' + 'range(0, 20, 2)\n' + '>>> 11 in r\n' + 'False\n' + '>>> 10 in r\n' + 'True\n' + '>>> r.index(10)\n' + '5\n' + '>>> r[5]\n' + '10\n' + '>>> r[:5]\n' + 'range(0, 10, 2)\n' + '>>> r[-1]\n' + '18\n' + '\n' + 'Testing range objects for equality with "==" and "!=" compares ' + 'them as\n' + 'sequences. That is, two range objects are considered equal if ' + 'they\n' + 'represent the same sequence of values. (Note that two range ' + 'objects\n' + 'that compare equal might have different "start", "stop" and ' + '"step"\n' + 'attributes, for example "range(0) == range(2, 1, 3)" or ' + '"range(0, 3,\n' + '2) == range(0, 4, 2)".)\n' + '\n' + 'Changed in version 3.2: Implement the Sequence ABC. Support ' + 'slicing\n' + 'and negative indices. Test "int" objects for membership in ' + 'constant\n' + 'time instead of iterating through all items.\n' + '\n' + "Changed in version 3.3: Define '==' and '!=' to compare range " + 'objects\n' + 'based on the sequence of values they define (instead of ' + 'comparing\n' + 'based on object identity).\n' + '\n' + 'New in version 3.3: The "start", "stop" and "step" ' + 'attributes.\n', + 'typesseq-mutable': '\n' + 'Mutable Sequence Types\n' + '**********************\n' + '\n' + 'The operations in the following table are defined on ' + 'mutable sequence\n' + 'types. The "collections.abc.MutableSequence" ABC is ' + 'provided to make\n' + 'it easier to correctly implement these operations on ' + 'custom sequence\n' + 'types.\n' + '\n' + 'In the table *s* is an instance of a mutable sequence ' + 'type, *t* is any\n' + 'iterable object and *x* is an arbitrary object that ' + 'meets any type and\n' + 'value restrictions imposed by *s* (for example, ' + '"bytearray" only\n' + 'accepts integers that meet the value restriction "0 <= ' + 'x <= 255").\n' + '\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| Operation | ' + 'Result | ' + 'Notes |\n' + '+================================+==================================+=======================+\n' + '| "s[i] = x" | item *i* of *s* is ' + 'replaced by | |\n' + '| | ' + '*x* ' + '| |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j] = t" | slice of *s* from ' + '*i* to *j* is | |\n' + '| | replaced by the ' + 'contents of the | |\n' + '| | iterable ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j]" | same as "s[i:j] = ' + '[]" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j:k] = t" | the elements of ' + '"s[i:j:k]" are | (1) |\n' + '| | replaced by those ' + 'of *t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j:k]" | removes the ' + 'elements of | |\n' + '| | "s[i:j:k]" from the ' + 'list | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.append(x)" | appends *x* to the ' + 'end of the | |\n' + '| | sequence (same ' + 'as | |\n' + '| | "s[len(s):len(s)] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.clear()" | removes all items ' + 'from "s" (same | (5) |\n' + '| | as "del ' + 's[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.copy()" | creates a shallow ' + 'copy of "s" | (5) |\n' + '| | (same as ' + '"s[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.extend(t)" | extends *s* with ' + 'the contents of | |\n' + '| | *t* (same as ' + '"s[len(s):len(s)] = | |\n' + '| | ' + 't") ' + '| |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.insert(i, x)" | inserts *x* into ' + '*s* at the | |\n' + '| | index given by *i* ' + '(same as | |\n' + '| | "s[i:i] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.pop([i])" | retrieves the item ' + 'at *i* and | (2) |\n' + '| | also removes it ' + 'from *s* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.remove(x)" | remove the first ' + 'item from *s* | (3) |\n' + '| | where "s[i] == ' + 'x" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.reverse()" | reverses the items ' + 'of *s* in | (4) |\n' + '| | ' + 'place ' + '| |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '\n' + 'Notes:\n' + '\n' + '1. *t* must have the same length as the slice it is ' + 'replacing.\n' + '\n' + '2. The optional argument *i* defaults to "-1", so that ' + 'by default\n' + ' the last item is removed and returned.\n' + '\n' + '3. "remove" raises "ValueError" when *x* is not found ' + 'in *s*.\n' + '\n' + '4. The "reverse()" method modifies the sequence in ' + 'place for\n' + ' economy of space when reversing a large sequence. ' + 'To remind users\n' + ' that it operates by side effect, it does not return ' + 'the reversed\n' + ' sequence.\n' + '\n' + '5. "clear()" and "copy()" are included for consistency ' + 'with the\n' + " interfaces of mutable containers that don't support " + 'slicing\n' + ' operations (such as "dict" and "set")\n' + '\n' + ' New in version 3.3: "clear()" and "copy()" ' + 'methods.\n', + 'unary': '\n' + 'Unary arithmetic and bitwise operations\n' + '***************************************\n' + '\n' + 'All unary arithmetic and bitwise operations have the same ' + 'priority:\n' + '\n' + ' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n' + '\n' + 'The unary "-" (minus) operator yields the negation of its ' + 'numeric\n' + 'argument.\n' + '\n' + 'The unary "+" (plus) operator yields its numeric argument ' + 'unchanged.\n' + '\n' + 'The unary "~" (invert) operator yields the bitwise inversion of ' + 'its\n' + 'integer argument. The bitwise inversion of "x" is defined as\n' + '"-(x+1)". It only applies to integral numbers.\n' + '\n' + 'In all three cases, if the argument does not have the proper ' + 'type, a\n' + '"TypeError" exception is raised.\n', + 'while': '\n' + 'The "while" statement\n' + '*********************\n' + '\n' + 'The "while" statement is used for repeated execution as long as ' + 'an\n' + 'expression is true:\n' + '\n' + ' while_stmt ::= "while" expression ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'This repeatedly tests the expression and, if it is true, executes ' + 'the\n' + 'first suite; if the expression is false (which may be the first ' + 'time\n' + 'it is tested) the suite of the "else" clause, if present, is ' + 'executed\n' + 'and the loop terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause\'s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and goes ' + 'back\n' + 'to testing the expression.\n', + 'with': '\n' + 'The "with" statement\n' + '********************\n' + '\n' + 'The "with" statement is used to wrap the execution of a block ' + 'with\n' + 'methods defined by a context manager (see section *With Statement\n' + 'Context Managers*). This allows common ' + '"try"..."except"..."finally"\n' + 'usage patterns to be encapsulated for convenient reuse.\n' + '\n' + ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n' + ' with_item ::= expression ["as" target]\n' + '\n' + 'The execution of the "with" statement with one "item" proceeds as\n' + 'follows:\n' + '\n' + '1. The context expression (the expression given in the ' + '"with_item")\n' + ' is evaluated to obtain a context manager.\n' + '\n' + '2. The context manager\'s "__exit__()" is loaded for later use.\n' + '\n' + '3. The context manager\'s "__enter__()" method is invoked.\n' + '\n' + '4. If a target was included in the "with" statement, the return\n' + ' value from "__enter__()" is assigned to it.\n' + '\n' + ' Note: The "with" statement guarantees that if the ' + '"__enter__()"\n' + ' method returns without an error, then "__exit__()" will ' + 'always be\n' + ' called. Thus, if an error occurs during the assignment to ' + 'the\n' + ' target list, it will be treated the same as an error ' + 'occurring\n' + ' within the suite would be. See step 6 below.\n' + '\n' + '5. The suite is executed.\n' + '\n' + '6. The context manager\'s "__exit__()" method is invoked. If an\n' + ' exception caused the suite to be exited, its type, value, and\n' + ' traceback are passed as arguments to "__exit__()". Otherwise, ' + 'three\n' + ' "None" arguments are supplied.\n' + '\n' + ' If the suite was exited due to an exception, and the return ' + 'value\n' + ' from the "__exit__()" method was false, the exception is ' + 'reraised.\n' + ' If the return value was true, the exception is suppressed, and\n' + ' execution continues with the statement following the "with"\n' + ' statement.\n' + '\n' + ' If the suite was exited for any reason other than an exception, ' + 'the\n' + ' return value from "__exit__()" is ignored, and execution ' + 'proceeds\n' + ' at the normal location for the kind of exit that was taken.\n' + '\n' + 'With more than one item, the context managers are processed as if\n' + 'multiple "with" statements were nested:\n' + '\n' + ' with A() as a, B() as b:\n' + ' suite\n' + '\n' + 'is equivalent to\n' + '\n' + ' with A() as a:\n' + ' with B() as b:\n' + ' suite\n' + '\n' + 'Changed in version 3.1: Support for multiple context expressions.\n' + '\n' + 'See also: **PEP 0343** - The "with" statement\n' + '\n' + ' The specification, background, and examples for the Python ' + '"with"\n' + ' statement.\n', + 'yield': '\n' + 'The "yield" statement\n' + '*********************\n' + '\n' + ' yield_stmt ::= yield_expression\n' + '\n' + 'A "yield" statement is semantically equivalent to a *yield\n' + 'expression*. The yield statement can be used to omit the ' + 'parentheses\n' + 'that would otherwise be required in the equivalent yield ' + 'expression\n' + 'statement. For example, the yield statements\n' + '\n' + ' yield \n' + ' yield from \n' + '\n' + 'are equivalent to the yield expression statements\n' + '\n' + ' (yield )\n' + ' (yield from )\n' + '\n' + 'Yield expressions and statements are only used when defining a\n' + '*generator* function, and are only used in the body of the ' + 'generator\n' + 'function. Using yield in a function definition is sufficient to ' + 'cause\n' + 'that definition to create a generator function instead of a ' + 'normal\n' + 'function.\n' + '\n' + 'For full details of "yield" semantics, refer to the *Yield\n' + 'expressions* section.\n'} -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 00:59:01 2014 From: python-checkins at python.org (larry.hastings) Date: Mon, 22 Sep 2014 22:59:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge=2E?= Message-ID: <20140922225900.112410.21484@mail.hg.python.org> https://hg.python.org/cpython/rev/ad45c2707006 changeset: 92530:ad45c2707006 parent: 92527:f64e90680acd parent: 92529:81f2d5071da3 user: Larry Hastings date: Mon Sep 22 23:58:41 2014 +0100 summary: Null merge. files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 02:17:30 2014 From: python-checkins at python.org (larry.hastings) Date: Tue, 23 Sep 2014 00:17:30 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_New_PEP=3A_PEP_478=2C_Release?= =?utf-8?q?_Schedule_For_Python_3=2E5=2E?= Message-ID: <20140923001725.44096.68995@mail.hg.python.org> https://hg.python.org/peps/rev/0f9e46e5e65d changeset: 5558:0f9e46e5e65d user: Larry Hastings date: Tue Sep 23 01:17:13 2014 +0100 summary: New PEP: PEP 478, Release Schedule For Python 3.5. files: pep-0478.txt | 81 ++++++++++++++++++++++++++++++++++++++++ 1 files changed, 81 insertions(+), 0 deletions(-) diff --git a/pep-0478.txt b/pep-0478.txt new file mode 100644 --- /dev/null +++ b/pep-0478.txt @@ -0,0 +1,81 @@ +PEP: 478 +Title: Python 3.5 Release Schedule +Version: $Revision$ +Last-Modified: $Date$ +Author: Larry Hastings +Status: Active +Type: Informational +Content-Type: text/x-rst +Created: 22-Sep-2014 +Python-Version: 3.5 + + +Abstract +======== + +This document describes the development and release schedule for +Python 3.5. The schedule primarily concerns itself with PEP-sized +items. + +.. Small features may be added up to the first beta + release. Bugs may be fixed until the final release, + which is planned for September 2015. + + +Release Manager and Crew +======================== + +- 3.4 Release Manager: Larry Hastings +- Windows installers: Martin v. L?wis +- Mac installers: Ned Deily / Ronald Oussoren +- Documentation: Georg Brandl + + +Release Schedule +================ + +The releases: + +- 3.4.0 alpha 1: February 1, 2015 +- 3.4.0 alpha 2: March 8, 2015 +- 3.4.0 alpha 3: March 28, 2015 +- 3.4.0 alpha 4: April 19, 2015 +- 3.4.0 beta 1: May 24, 2015 +- 3.4.0 beta 2: July 5, 2015 +- 3.4.0 beta 3: July 26, 2015 +- 3.4.0 candidate 1: August 9, 2015 +- 3.4.0 candidate 2: August 23, 2015 +- 3.4.0 candidate 3: September 6, 2015 +- 3.4.0 final: September 13, 2015 + +(Beta 1 is also "feature freeze"--no new features beyond this point.) + + + +Features for 3.4 +================ + +Proposed changes for 3.5: + +* PEP 431, improved support for time zone databases +* PEP 441, improved Python zip application support +* PEP 447, support for __locallookup__ metaclass method +* PEP 448, additional unpacking generalizations +* PEP 455, key transforming dictionary + + +Copyright +========= + +This document has been placed in the public domain. + + + +.. + Local Variables: + mode: indented-text + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 70 + coding: utf-8 + End: -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 23 04:44:50 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 23 Sep 2014 02:44:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI0NTkp?= Message-ID: <20140923024436.50562.90722@mail.hg.python.org> https://hg.python.org/cpython/rev/6dcc96fa3970 changeset: 92532:6dcc96fa3970 parent: 92530:ad45c2707006 parent: 92531:8eb4eec8626c user: Benjamin Peterson date: Mon Sep 22 22:44:21 2014 -0400 summary: merge 3.4 (#22459) files: Doc/library/stdtypes.rst | 4 ++-- Misc/ACKS | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1830,7 +1830,7 @@ >>> '1,2,3'.split(',') ['1', '2', '3'] >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2 3'] + ['1', '2,3'] >>> '1,2,,3,'.split(',') ['1', '2', '', '3', ''] @@ -2695,7 +2695,7 @@ >>> b'1,2,3'.split(b',') [b'1', b'2', b'3'] >>> b'1,2,3'.split(b',', maxsplit=1) - [b'1', b'2 3'] + [b'1', b'2,3'] >>> b'1,2,,3,'.split(b',') [b'1', b'2', b'', b'3', b''] diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -298,6 +298,7 @@ Joaquin Cuenca Abela John Cugini Tom Culliton +Ra?l Cumplido Antonio Cuni Brian Curtin Lisandro Dalcin -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 04:44:50 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 23 Sep 2014 02:44:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_fix_error_in_s?= =?utf-8?q?plit=28=29_examples_=28closes_=2322459=29?= Message-ID: <20140923024436.44096.7776@mail.hg.python.org> https://hg.python.org/cpython/rev/8eb4eec8626c changeset: 92531:8eb4eec8626c branch: 3.4 parent: 92529:81f2d5071da3 user: Benjamin Peterson date: Mon Sep 22 22:43:50 2014 -0400 summary: fix error in split() examples (closes #22459) Patch by Ra?l Cumplido. files: Doc/library/stdtypes.rst | 4 ++-- Misc/ACKS | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1830,7 +1830,7 @@ >>> '1,2,3'.split(',') ['1', '2', '3'] >>> '1,2,3'.split(',', maxsplit=1) - ['1', '2 3'] + ['1', '2,3'] >>> '1,2,,3,'.split(',') ['1', '2', '', '3', ''] @@ -2695,7 +2695,7 @@ >>> b'1,2,3'.split(b',') [b'1', b'2', b'3'] >>> b'1,2,3'.split(b',', maxsplit=1) - [b'1', b'2 3'] + [b'1', b'2,3'] >>> b'1,2,,3,'.split(b',') [b'1', b'2', b'', b'3', b''] diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -296,6 +296,7 @@ Joaquin Cuenca Abela John Cugini Tom Culliton +Ra?l Cumplido Antonio Cuni Brian Curtin Lisandro Dalcin -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Tue Sep 23 09:50:42 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 23 Sep 2014 09:50:42 +0200 Subject: [Python-checkins] Daily reference leaks (ad45c2707006): sum=1 Message-ID: results for ad45c2707006 on branch "default" -------------------------------------------- test_collections leaked [-2, 0, 0] references, sum=-2 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 0] references, sum=0 test_site leaked [2, -2, 0] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogGwY1I5', '-x'] From python-checkins at python.org Tue Sep 23 11:44:47 2014 From: python-checkins at python.org (nick.coghlan) Date: Tue, 23 Sep 2014 09:44:47 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Some_PEP_status_updates_based?= =?utf-8?q?_on_my_current_priorities?= Message-ID: <20140923094441.50558.89047@mail.hg.python.org> https://hg.python.org/peps/rev/e2394256edf2 changeset: 5559:e2394256edf2 user: Nick Coghlan date: Tue Sep 23 19:44:33 2014 +1000 summary: Some PEP status updates based on my current priorities files: pep-0432.txt | 27 +-------------------------- pep-0462.txt | 9 ++++++++- pep-0474.txt | 9 ++++++++- 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/pep-0432.txt b/pep-0432.txt --- a/pep-0432.txt +++ b/pep-0432.txt @@ -3,7 +3,7 @@ Version: $Revision$ Last-Modified: $Date$ Author: Nick Coghlan -Status: Deferred +Status: Draft Type: Standards Track Content-Type: text/x-rst Created: 28-Dec-2012 @@ -25,31 +25,6 @@ implementation is developed. -PEP Deferral -============ - -Python 3.4 is nearing its first alpha, and already includes a couple of -significant low level changes in PEP 445 (memory allocator customisation) -and PEP 442 (safe object finalization). As a result of the latter PEP, -the shutdown procedure of CPython has also been changed to be more heavily -reliant on the cyclic garbage collector, significantly reducing the -number of modules that will experience the "module globals set to None" -behaviour that is used to deliberate break cycles and attempt to releases -more external resources cleanly. - -Furthermore, I am heavily involved in the current round of updates to the -Python packaging ecosystem (as both the lead author of PEP 426 and -BDFL-delegate for several other PEPs), leaving little to spare to work on -this proposal. The other developers I would trust to lead this effort are -also working on other things. - -So, due to those practical resource constraints, the proximity of Python -3.4 deadlines, and recognition that making too many significant changes to -the low level CPython infrastructure in one release is likely to be unwise, -further work on this PEP has been deferred to the Python 3.5 development -cycle. - - Proposal ======== diff --git a/pep-0462.txt b/pep-0462.txt --- a/pep-0462.txt +++ b/pep-0462.txt @@ -3,7 +3,7 @@ Version: $Revision$ Last-Modified: $Date$ Author: Nick Coghlan -Status: Draft +Status: Deferred Type: Process Content-Type: text/x-rst Created: 23-Jan-2014 @@ -22,6 +22,13 @@ their changes incorporated. +PEP Deferral +============ + +This PEP is deferred largely because I don't currently have time to work on +it. If anyone would like to take it over, let me know. + + Rationale for changes to the core development workflow ====================================================== diff --git a/pep-0474.txt b/pep-0474.txt --- a/pep-0474.txt +++ b/pep-0474.txt @@ -3,7 +3,7 @@ Version: $Revision$ Last-Modified: $Date$ Author: Nick Coghlan -Status: Draft +Status: Deferred Type: Process Content-Type: text/x-rst Created: 19-Jul-2014 @@ -23,6 +23,13 @@ for CPython itself (see PEP 462 in relation to that). +PEP Deferral +============ + +This PEP is deferred largely because I don't currently have time to work on +it. If anyone would like to take it over, let me know. + + Proposal ======== -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 23 19:03:05 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 17:03:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fixed_reference_leak_in_the_=22backslashreplace=22_error?= =?utf-8?q?_handler=2E?= Message-ID: <20140923170237.44092.90372@mail.hg.python.org> https://hg.python.org/cpython/rev/e9e790ea071c changeset: 92535:e9e790ea071c parent: 92532:6dcc96fa3970 parent: 92534:80e0fc462005 user: Serhiy Storchaka date: Tue Sep 23 19:59:34 2014 +0300 summary: Fixed reference leak in the "backslashreplace" error handler. files: Python/codecs.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Python/codecs.c b/Python/codecs.c --- a/Python/codecs.c +++ b/Python/codecs.c @@ -890,8 +890,10 @@ ressize += 1+1+2; } res = PyUnicode_New(ressize, 127); - if (res==NULL) + if (res == NULL) { + Py_DECREF(object); return NULL; + } for (i = start, outp = PyUnicode_1BYTE_DATA(res); i < end; ++i) { c = PyUnicode_READ_CHAR(object, i); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 19:03:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 17:03:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fixed_referenc?= =?utf-8?q?e_leak_in_the_=22backslashreplace=22_error_handler=2E?= Message-ID: <20140923170237.44100.54521@mail.hg.python.org> https://hg.python.org/cpython/rev/80e0fc462005 changeset: 92534:80e0fc462005 branch: 3.4 parent: 92531:8eb4eec8626c user: Serhiy Storchaka date: Tue Sep 23 19:59:09 2014 +0300 summary: Fixed reference leak in the "backslashreplace" error handler. files: Python/codecs.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Python/codecs.c b/Python/codecs.c --- a/Python/codecs.c +++ b/Python/codecs.c @@ -890,8 +890,10 @@ ressize += 1+1+2; } res = PyUnicode_New(ressize, 127); - if (res==NULL) + if (res == NULL) { + Py_DECREF(object); return NULL; + } for (i = start, outp = PyUnicode_1BYTE_DATA(res); i < end; ++i) { c = PyUnicode_READ_CHAR(object, i); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 19:03:06 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 17:03:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Fixed_referenc?= =?utf-8?q?e_leak_in_the_=22backslashreplace=22_error_handler=2E?= Message-ID: <20140923170235.44090.20035@mail.hg.python.org> https://hg.python.org/cpython/rev/49e293fb36fe changeset: 92533:49e293fb36fe branch: 2.7 parent: 92528:66e6807442c9 user: Serhiy Storchaka date: Tue Sep 23 19:58:57 2014 +0300 summary: Fixed reference leak in the "backslashreplace" error handler. files: Python/codecs.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Python/codecs.c b/Python/codecs.c --- a/Python/codecs.c +++ b/Python/codecs.c @@ -696,8 +696,10 @@ ressize += 1+1+2; } res = PyUnicode_FromUnicode(NULL, ressize); - if (res==NULL) + if (res == NULL) { + Py_DECREF(object); return NULL; + } for (p = startp+start, outp = PyUnicode_AS_UNICODE(res); p < startp+end; ++p) { Py_UNICODE c = *p; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 20:40:47 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 18:40:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxODY2?= =?utf-8?q?=3A_ZipFile=2Eclose=28=29_no_longer_writes_ZIP64_central_direct?= =?utf-8?q?ory?= Message-ID: <20140923184045.50560.41019@mail.hg.python.org> https://hg.python.org/cpython/rev/8f25d118ce38 changeset: 92537:8f25d118ce38 branch: 3.4 parent: 92534:80e0fc462005 user: Serhiy Storchaka date: Tue Sep 23 21:34:24 2014 +0300 summary: Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. files: Lib/test/test_zipfile64.py | 45 ++++++++++++++++++++++--- Lib/zipfile.py | 38 +++++++++++++-------- Misc/NEWS | 3 + 3 files changed, 66 insertions(+), 20 deletions(-) diff --git a/Lib/test/test_zipfile64.py b/Lib/test/test_zipfile64.py --- a/Lib/test/test_zipfile64.py +++ b/Lib/test/test_zipfile64.py @@ -18,7 +18,7 @@ from io import StringIO from tempfile import TemporaryFile -from test.support import TESTFN, run_unittest, requires_zlib +from test.support import TESTFN, requires_zlib TESTFN2 = TESTFN + "2" @@ -92,7 +92,7 @@ def testMoreThan64kFiles(self): # This test checks that more than 64k files can be added to an archive, # and that the resulting archive can be read properly by ZipFile - zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=True) zipf.debug = 100 numfiles = (1 << 16) * 3//2 for i in range(numfiles): @@ -105,14 +105,47 @@ for i in range(numfiles): content = zipf2.read("foo%08d" % i).decode('ascii') self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + + def testMoreThan64kFilesAppend(self): + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf.debug = 100 + numfiles = (1 << 16) - 1 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) zipf.close() + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = (1 << 16) * 3//2 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, mode="r") + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): support.unlink(TESTFN) support.unlink(TESTFN2) -def test_main(): - run_unittest(TestsWithSourceFile, OtherTests) - if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -50,7 +50,7 @@ ZIP64_LIMIT = (1 << 31) - 1 -ZIP_FILECOUNT_LIMIT = 1 << 16 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 ZIP_MAX_COMMENT = (1 << 16) - 1 # constants for Zip file compression methods @@ -1304,13 +1304,17 @@ raise RuntimeError( "Attempt to write ZIP archive that was already closed") _check_compression(zinfo.compress_type) - if zinfo.file_size > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - if zinfo.header_offset > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile( - "Zipfile size would require ZIP64 extensions") + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") def write(self, filename, arcname=None, compress_type=None): """Put the bytes from filename into the archive under the name @@ -1464,10 +1468,8 @@ try: if self.mode in ("w", "a") and self._didModify: # write ending records - count = 0 pos1 = self.fp.tell() for zinfo in self.filelist: # write central directory - count = count + 1 dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) @@ -1531,13 +1533,21 @@ pos2 = self.fp.tell() # Write end-of-zip-archive record - centDirCount = count + centDirCount = len(self.filelist) centDirSize = pos2 - pos1 centDirOffset = pos1 - if (centDirCount >= ZIP_FILECOUNT_LIMIT or - centDirOffset > ZIP64_LIMIT or - centDirSize > ZIP64_LIMIT): + requires_zip64 = None + if centDirCount > ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,9 @@ Library ------- +- Issue #21866: ZipFile.close() no longer writes ZIP64 central directory + records if allowZip64 is false. + - Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. Removed trailing spaces in debugging output. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 20:40:48 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 18:40:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIxODY2?= =?utf-8?q?=3A_ZipFile=2Eclose=28=29_no_longer_writes_ZIP64_central_direct?= =?utf-8?q?ory?= Message-ID: <20140923184045.112208.43083@mail.hg.python.org> https://hg.python.org/cpython/rev/8a010ca89094 changeset: 92536:8a010ca89094 branch: 2.7 parent: 92533:49e293fb36fe user: Serhiy Storchaka date: Tue Sep 23 21:33:52 2014 +0300 summary: Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. files: Lib/test/test_zipfile64.py | 37 +++++++++++++++++++++++++- Lib/zipfile.py | 37 ++++++++++++++++--------- Misc/NEWS | 3 ++ 3 files changed, 63 insertions(+), 14 deletions(-) diff --git a/Lib/test/test_zipfile64.py b/Lib/test/test_zipfile64.py --- a/Lib/test/test_zipfile64.py +++ b/Lib/test/test_zipfile64.py @@ -99,7 +99,7 @@ def testMoreThan64kFiles(self): # This test checks that more than 64k files can be added to an archive, # and that the resulting archive can be read properly by ZipFile - zipf = zipfile.ZipFile(TESTFN, mode="w") + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=True) zipf.debug = 100 numfiles = (1 << 16) * 3/2 for i in xrange(numfiles): @@ -111,8 +111,43 @@ self.assertEqual(len(zipf2.namelist()), numfiles) for i in xrange(numfiles): self.assertEqual(zipf2.read("foo%08d" % i), "%d" % (i**3 % 57)) + zipf2.close() + + def testMoreThan64kFilesAppend(self): + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf.debug = 100 + numfiles = (1 << 16) - 1 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) zipf.close() + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = (1 << 16) * 3//2 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, mode="r") + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + self.assertEqual(zipf2.read("foo%08d" % i), "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): test_support.unlink(TESTFN) test_support.unlink(TESTFN2) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -30,7 +30,7 @@ error = BadZipfile # The exception raised by this module ZIP64_LIMIT = (1 << 31) - 1 -ZIP_FILECOUNT_LIMIT = 1 << 16 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 ZIP_MAX_COMMENT = (1 << 16) - 1 # constants for Zip file compression methods @@ -1101,12 +1101,17 @@ if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED): raise RuntimeError, \ "That compression method is not supported" - if zinfo.file_size > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - if zinfo.header_offset > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile("Zipfile size would require ZIP64 extensions") + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") def write(self, filename, arcname=None, compress_type=None): """Put the bytes from filename into the archive under the name @@ -1256,10 +1261,8 @@ try: if self.mode in ("w", "a") and self._didModify: # write ending records - count = 0 pos1 = self.fp.tell() for zinfo in self.filelist: # write central directory - count = count + 1 dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) @@ -1320,13 +1323,21 @@ pos2 = self.fp.tell() # Write end-of-zip-archive record - centDirCount = count + centDirCount = len(self.filelist) centDirSize = pos2 - pos1 centDirOffset = pos1 - if (centDirCount >= ZIP_FILECOUNT_LIMIT or - centDirOffset > ZIP64_LIMIT or - centDirSize > ZIP64_LIMIT): + requires_zip64 = None + if centDirCount > ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #21866: ZipFile.close() no longer writes ZIP64 central directory + records if allowZip64 is false. + - Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re module. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 20:40:47 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 18:40:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321866=3A_ZipFile=2Eclose=28=29_no_longer_writes?= =?utf-8?q?_ZIP64_central_directory?= Message-ID: <20140923184046.112574.17194@mail.hg.python.org> https://hg.python.org/cpython/rev/d361d2176121 changeset: 92538:d361d2176121 parent: 92535:e9e790ea071c parent: 92537:8f25d118ce38 user: Serhiy Storchaka date: Tue Sep 23 21:35:57 2014 +0300 summary: Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. files: Lib/test/test_zipfile64.py | 45 ++++++++++++++++++++++--- Lib/zipfile.py | 38 +++++++++++++-------- Misc/NEWS | 3 + 3 files changed, 66 insertions(+), 20 deletions(-) diff --git a/Lib/test/test_zipfile64.py b/Lib/test/test_zipfile64.py --- a/Lib/test/test_zipfile64.py +++ b/Lib/test/test_zipfile64.py @@ -18,7 +18,7 @@ from io import StringIO from tempfile import TemporaryFile -from test.support import TESTFN, run_unittest, requires_zlib +from test.support import TESTFN, requires_zlib TESTFN2 = TESTFN + "2" @@ -92,7 +92,7 @@ def testMoreThan64kFiles(self): # This test checks that more than 64k files can be added to an archive, # and that the resulting archive can be read properly by ZipFile - zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=True) zipf.debug = 100 numfiles = (1 << 16) * 3//2 for i in range(numfiles): @@ -105,14 +105,47 @@ for i in range(numfiles): content = zipf2.read("foo%08d" % i).decode('ascii') self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + + def testMoreThan64kFilesAppend(self): + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf.debug = 100 + numfiles = (1 << 16) - 1 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) zipf.close() + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = (1 << 16) * 3//2 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, mode="r") + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): support.unlink(TESTFN) support.unlink(TESTFN2) -def test_main(): - run_unittest(TestsWithSourceFile, OtherTests) - if __name__ == "__main__": - test_main() + unittest.main() diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -50,7 +50,7 @@ ZIP64_LIMIT = (1 << 31) - 1 -ZIP_FILECOUNT_LIMIT = 1 << 16 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 ZIP_MAX_COMMENT = (1 << 16) - 1 # constants for Zip file compression methods @@ -1304,13 +1304,17 @@ raise RuntimeError( "Attempt to write ZIP archive that was already closed") _check_compression(zinfo.compress_type) - if zinfo.file_size > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile("Filesize would require ZIP64 extensions") - if zinfo.header_offset > ZIP64_LIMIT: - if not self._allowZip64: - raise LargeZipFile( - "Zipfile size would require ZIP64 extensions") + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") def write(self, filename, arcname=None, compress_type=None): """Put the bytes from filename into the archive under the name @@ -1464,10 +1468,8 @@ try: if self.mode in ("w", "a") and self._didModify: # write ending records - count = 0 pos1 = self.fp.tell() for zinfo in self.filelist: # write central directory - count = count + 1 dt = zinfo.date_time dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) @@ -1531,13 +1533,21 @@ pos2 = self.fp.tell() # Write end-of-zip-archive record - centDirCount = count + centDirCount = len(self.filelist) centDirSize = pos2 - pos1 centDirOffset = pos1 - if (centDirCount >= ZIP_FILECOUNT_LIMIT or - centDirOffset > ZIP64_LIMIT or - centDirSize > ZIP64_LIMIT): + requires_zip64 = None + if centDirCount > ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") zip64endrec = struct.pack( structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #21866: ZipFile.close() no longer writes ZIP64 central directory + records if allowZip64 is false. + - Issue #22278: Fix urljoin problem with relative urls, a regression observed after changes to issue22118 were submitted. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:30:20 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:30:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogRml4ZWQgdGVzdF9s?= =?utf-8?q?arge=5Ffile=5Fexception=2E_Ported_tests_for_large_count_of_file?= =?utf-8?q?s?= Message-ID: <20140923193013.50564.98705@mail.hg.python.org> https://hg.python.org/cpython/rev/c7fd7bf039a8 changeset: 92540:c7fd7bf039a8 branch: 3.4 parent: 92537:8f25d118ce38 user: Serhiy Storchaka date: Tue Sep 23 22:27:34 2014 +0300 summary: Fixed test_large_file_exception. Ported tests for large count of files to AbstractTestZip64InSmallFiles. files: Lib/test/test_zipfile.py | 63 +++++++++++++++++++++++++++- 1 files changed, 62 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -462,7 +462,9 @@ def setUp(self): self._limit = zipfile.ZIP64_LIMIT - zipfile.ZIP64_LIMIT = 5 + self._filecount_limit = zipfile.ZIP_FILECOUNT_LIMIT + zipfile.ZIP64_LIMIT = 1000 + zipfile.ZIP_FILECOUNT_LIMIT = 9 # Make a source file with some lines with open(TESTFN, "wb") as fp: @@ -529,8 +531,67 @@ for f in get_files(self): self.zip_test(f, self.compression) + def test_too_many_files(self): + # This test checks that more than 64k files can be added to an archive, + # and that the resulting archive can be read properly by ZipFile + zipf = zipfile.ZipFile(TESTFN, "w", self.compression, + allowZip64=True) + zipf.debug = 100 + numfiles = 15 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression) + self.assertEqual(len(zipf2.namelist()), numfiles) + for i in range(numfiles): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + + def test_too_many_files_append(self): + zipf = zipfile.ZipFile(TESTFN, "w", self.compression, + allowZip64=False) + zipf.debug = 100 + numfiles = 9 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, "a", self.compression, + allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, "a", self.compression, + allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = 15 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression) + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): zipfile.ZIP64_LIMIT = self._limit + zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit unlink(TESTFN) unlink(TESTFN2) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:30:20 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:30:20 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fixed_test=5Flarge=5Ffile=5Fexception=2E_Ported_tests_fo?= =?utf-8?q?r_large_count_of_files?= Message-ID: <20140923193013.44098.31910@mail.hg.python.org> https://hg.python.org/cpython/rev/a57351993623 changeset: 92541:a57351993623 parent: 92538:d361d2176121 parent: 92540:c7fd7bf039a8 user: Serhiy Storchaka date: Tue Sep 23 22:28:03 2014 +0300 summary: Fixed test_large_file_exception. Ported tests for large count of files to AbstractTestZip64InSmallFiles. files: Lib/test/test_zipfile.py | 63 +++++++++++++++++++++++++++- 1 files changed, 62 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -462,7 +462,9 @@ def setUp(self): self._limit = zipfile.ZIP64_LIMIT - zipfile.ZIP64_LIMIT = 5 + self._filecount_limit = zipfile.ZIP_FILECOUNT_LIMIT + zipfile.ZIP64_LIMIT = 1000 + zipfile.ZIP_FILECOUNT_LIMIT = 9 # Make a source file with some lines with open(TESTFN, "wb") as fp: @@ -529,8 +531,67 @@ for f in get_files(self): self.zip_test(f, self.compression) + def test_too_many_files(self): + # This test checks that more than 64k files can be added to an archive, + # and that the resulting archive can be read properly by ZipFile + zipf = zipfile.ZipFile(TESTFN, "w", self.compression, + allowZip64=True) + zipf.debug = 100 + numfiles = 15 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression) + self.assertEqual(len(zipf2.namelist()), numfiles) + for i in range(numfiles): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + + def test_too_many_files_append(self): + zipf = zipfile.ZipFile(TESTFN, "w", self.compression, + allowZip64=False) + zipf.debug = 100 + numfiles = 9 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, "a", self.compression, + allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, "a", self.compression, + allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = 15 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, "r", self.compression) + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + content = zipf2.read("foo%08d" % i).decode('ascii') + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): zipfile.ZIP64_LIMIT = self._limit + zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit unlink(TESTFN) unlink(TESTFN2) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:30:20 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:30:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogRml4ZWQgdGVzdF9s?= =?utf-8?q?arge=5Ffile=5Fexception=2E_Ported_tests_for_large_count_of_file?= =?utf-8?q?s?= Message-ID: <20140923193012.112574.99523@mail.hg.python.org> https://hg.python.org/cpython/rev/1288ecf378e5 changeset: 92539:1288ecf378e5 branch: 2.7 parent: 92536:8a010ca89094 user: Serhiy Storchaka date: Tue Sep 23 22:26:45 2014 +0300 summary: Fixed test_large_file_exception. Ported tests for large count of files to TestZip64InSmallFiles. files: Lib/test/test_zipfile.py | 59 +++++++++++++++++++++++++++- 1 files changed, 58 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -605,7 +605,9 @@ def setUp(self): self._limit = zipfile.ZIP64_LIMIT - zipfile.ZIP64_LIMIT = 5 + self._filecount_limit = zipfile.ZIP_FILECOUNT_LIMIT + zipfile.ZIP64_LIMIT = 1000 + zipfile.ZIP_FILECOUNT_LIMIT = 9 line_gen = ("Test of zipfile line %d." % i for i in range(0, FIXEDTEST_SIZE)) @@ -709,8 +711,63 @@ with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp: self.assertEqual(zipfp.namelist(), ["absolute"]) + def test_too_many_files(self): + # This test checks that more than 64k files can be added to an archive, + # and that the resulting archive can be read properly by ZipFile + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=True) + zipf.debug = 100 + numfiles = 15 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, mode="r") + self.assertEqual(len(zipf2.namelist()), numfiles) + for i in range(numfiles): + content = zipf2.read("foo%08d" % i) + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + + def test_too_many_files_append(self): + zipf = zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) + zipf.debug = 100 + numfiles = 9 + for i in range(numfiles): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=False) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + with self.assertRaises(zipfile.LargeZipFile): + zipf.writestr("foo%08d" % numfiles, b'') + self.assertEqual(len(zipf.namelist()), numfiles) + zipf.close() + + zipf = zipfile.ZipFile(TESTFN, mode="a", allowZip64=True) + zipf.debug = 100 + self.assertEqual(len(zipf.namelist()), numfiles) + numfiles2 = 15 + for i in range(numfiles, numfiles2): + zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57)) + self.assertEqual(len(zipf.namelist()), numfiles2) + zipf.close() + + zipf2 = zipfile.ZipFile(TESTFN, mode="r") + self.assertEqual(len(zipf2.namelist()), numfiles2) + for i in range(numfiles2): + content = zipf2.read("foo%08d" % i) + self.assertEqual(content, "%d" % (i**3 % 57)) + zipf2.close() + def tearDown(self): zipfile.ZIP64_LIMIT = self._limit + zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit unlink(TESTFN) unlink(TESTFN2) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:45:16 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:45:16 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIwOTEy?= =?utf-8?q?=3A_Now_directories_added_to_ZIP_file_have_correct_Unix_and_MS-?= =?utf-8?q?DOS?= Message-ID: <20140923194514.112011.66770@mail.hg.python.org> https://hg.python.org/cpython/rev/b06e25a357de changeset: 92543:b06e25a357de branch: 3.4 parent: 92540:c7fd7bf039a8 user: Serhiy Storchaka date: Tue Sep 23 22:40:23 2014 +0300 summary: Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. files: Lib/test/test_zipfile.py | 45 +++++++++++++++++++++++++-- Lib/zipfile.py | 7 +++- Misc/NEWS | 3 + 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -1696,11 +1696,48 @@ os.mkdir(os.path.join(TESTFN2, "a")) self.test_extract_dir() - def test_store_dir(self): + def test_write_dir(self): + dirpath = os.path.join(TESTFN2, "x") + os.mkdir(dirpath) + mode = os.stat(dirpath).st_mode & 0xFFFF + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(dirpath) + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zipf.write(dirpath, "y") + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "y"))) + self.assertEqual(len(os.listdir(target)), 2) + + def test_writestr_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) - zipf = zipfile.ZipFile(TESTFN, "w") - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.writestr("x/", b'') + zinfo = zipf.filelist[0] + self.assertEqual(zinfo.filename, "x/") + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("x/")) + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "x"))) + self.assertEqual(os.listdir(target), ["x"]) def tearDown(self): rmtree(TESTFN2) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -1356,6 +1356,7 @@ zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) @@ -1416,7 +1417,11 @@ zinfo = ZipInfo(filename=zinfo_or_arcname, date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression - zinfo.external_attr = 0o600 << 16 + if zinfo.filename[-1] == '/': + zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.external_attr = 0o600 << 16 # ?rw------- else: zinfo = zinfo_or_arcname diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,9 @@ Library ------- +- Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS + directory attributes. + - Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:45:16 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:45:16 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIwOTEy?= =?utf-8?q?=3A_Now_directories_added_to_ZIP_file_have_correct_Unix_and_MS-?= =?utf-8?q?DOS?= Message-ID: <20140923194514.111962.63386@mail.hg.python.org> https://hg.python.org/cpython/rev/c6b884483cd6 changeset: 92542:c6b884483cd6 branch: 2.7 parent: 92539:1288ecf378e5 user: Serhiy Storchaka date: Tue Sep 23 22:39:59 2014 +0300 summary: Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. files: Lib/test/test_zipfile.py | 43 ++++++++++++++++++++++++++- Lib/zipfile.py | 7 +++- Misc/NEWS | 3 + 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -1472,11 +1472,48 @@ os.mkdir(os.path.join(TESTFN2, "a")) self.test_extract_dir() - def test_store_dir(self): + def test_write_dir(self): + dirpath = os.path.join(TESTFN2, "x") + os.mkdir(dirpath) + mode = os.stat(dirpath).st_mode & 0xFFFF + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(dirpath) + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zipf.write(dirpath, "y") + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "y"))) + self.assertEqual(len(os.listdir(target)), 2) + + def test_writestr_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) with zipfile.ZipFile(TESTFN, "w") as zipf: - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + zipf.writestr("x/", b'') + zinfo = zipf.filelist[0] + self.assertEqual(zinfo.filename, "x/") + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("x/")) + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "x"))) + self.assertEqual(os.listdir(target), ["x"]) def tearDown(self): rmtree(TESTFN2) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -1150,6 +1150,7 @@ zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) @@ -1211,7 +1212,11 @@ date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression - zinfo.external_attr = 0600 << 16 + if zinfo.filename[-1] == '/': + zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.external_attr = 0o600 << 16 # ?rw------- else: zinfo = zinfo_or_arcname diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS + directory attributes. + - Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:45:19 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 19:45:19 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2320912=3A_Now_directories_added_to_ZIP_file_have?= =?utf-8?q?_correct_Unix_and_MS-DOS?= Message-ID: <20140923194515.44078.51487@mail.hg.python.org> https://hg.python.org/cpython/rev/051105a95461 changeset: 92544:051105a95461 parent: 92541:a57351993623 parent: 92543:b06e25a357de user: Serhiy Storchaka date: Tue Sep 23 22:42:02 2014 +0300 summary: Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. files: Lib/test/test_zipfile.py | 45 +++++++++++++++++++++++++-- Lib/zipfile.py | 7 +++- Misc/NEWS | 3 + 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py --- a/Lib/test/test_zipfile.py +++ b/Lib/test/test_zipfile.py @@ -1696,11 +1696,48 @@ os.mkdir(os.path.join(TESTFN2, "a")) self.test_extract_dir() - def test_store_dir(self): + def test_write_dir(self): + dirpath = os.path.join(TESTFN2, "x") + os.mkdir(dirpath) + mode = os.stat(dirpath).st_mode & 0xFFFF + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.write(dirpath) + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zipf.write(dirpath, "y") + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("/x/")) + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + zinfo = zipf.filelist[1] + self.assertTrue(zinfo.filename, "y/") + self.assertEqual(zinfo.external_attr, (mode << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "y"))) + self.assertEqual(len(os.listdir(target)), 2) + + def test_writestr_dir(self): os.mkdir(os.path.join(TESTFN2, "x")) - zipf = zipfile.ZipFile(TESTFN, "w") - zipf.write(os.path.join(TESTFN2, "x"), "x") - self.assertTrue(zipf.filelist[0].filename.endswith("x/")) + with zipfile.ZipFile(TESTFN, "w") as zipf: + zipf.writestr("x/", b'') + zinfo = zipf.filelist[0] + self.assertEqual(zinfo.filename, "x/") + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + with zipfile.ZipFile(TESTFN, "r") as zipf: + zinfo = zipf.filelist[0] + self.assertTrue(zinfo.filename.endswith("x/")) + self.assertEqual(zinfo.external_attr, (0o40775 << 16) | 0x10) + target = os.path.join(TESTFN2, "target") + os.mkdir(target) + zipf.extractall(target) + self.assertTrue(os.path.isdir(os.path.join(target, "x"))) + self.assertEqual(os.listdir(target), ["x"]) def tearDown(self): rmtree(TESTFN2) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -1356,6 +1356,7 @@ zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) @@ -1416,7 +1417,11 @@ zinfo = ZipInfo(filename=zinfo_or_arcname, date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression - zinfo.external_attr = 0o600 << 16 + if zinfo.filename[-1] == '/': + zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.external_attr = 0o600 << 16 # ?rw------- else: zinfo = zinfo_or_arcname diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS + directory attributes. + - Issue #21866: ZipFile.close() no longer writes ZIP64 central directory records if allowZip64 is false. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:54:48 2014 From: python-checkins at python.org (ned.deily) Date: Tue, 23 Sep 2014 19:54:48 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322471=3A_Avoid_Python_Launcher=2Eapp_install_pr?= =?utf-8?q?oblems_by_removing?= Message-ID: <20140923195447.44082.22472@mail.hg.python.org> https://hg.python.org/cpython/rev/9c9980c3c38c changeset: 92547:9c9980c3c38c parent: 92544:051105a95461 parent: 92546:ff2cb4dc36e7 user: Ned Deily date: Tue Sep 23 12:54:18 2014 -0700 summary: Issue #22471: Avoid Python Launcher.app install problems by removing vestigial Makefile step. files: Mac/PythonLauncher/Makefile.in | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in --- a/Mac/PythonLauncher/Makefile.in +++ b/Mac/PythonLauncher/Makefile.in @@ -50,7 +50,6 @@ cp $(srcdir)/../Icons/PythonCompiled.icns "Python Launcher.app/Contents/Resources" cp $(srcdir)/factorySettings.plist "Python Launcher.app/Contents/Resources" cp -R $(srcdir)/English.lproj "Python Launcher.app/Contents/Resources" - find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r FileSettings.o: $(srcdir)/FileSettings.m $(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:54:48 2014 From: python-checkins at python.org (ned.deily) Date: Tue, 23 Sep 2014 19:54:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDcx?= =?utf-8?q?=3A_Avoid_Python_Launcher=2Eapp_install_problems_by_removing?= Message-ID: <20140923195447.44104.70981@mail.hg.python.org> https://hg.python.org/cpython/rev/ff2cb4dc36e7 changeset: 92546:ff2cb4dc36e7 branch: 3.4 parent: 92543:b06e25a357de user: Ned Deily date: Tue Sep 23 12:48:54 2014 -0700 summary: Issue #22471: Avoid Python Launcher.app install problems by removing vestigial Makefile step. files: Mac/PythonLauncher/Makefile.in | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in --- a/Mac/PythonLauncher/Makefile.in +++ b/Mac/PythonLauncher/Makefile.in @@ -53,8 +53,6 @@ --resource=$(srcdir)/factorySettings.plist \ --plist Info.plist \ build - find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r - FileSettings.o: $(srcdir)/FileSettings.m $(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 21:54:48 2014 From: python-checkins at python.org (ned.deily) Date: Tue, 23 Sep 2014 19:54:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDcx?= =?utf-8?q?=3A_Avoid_Python_Launcher=2Eapp_install_problems_by_removing?= Message-ID: <20140923195446.50588.65401@mail.hg.python.org> https://hg.python.org/cpython/rev/5444c2e22ff8 changeset: 92545:5444c2e22ff8 branch: 2.7 parent: 92542:c6b884483cd6 user: Ned Deily date: Tue Sep 23 12:48:16 2014 -0700 summary: Issue #22471: Avoid Python Launcher.app install problems by removing vestigial Makefile step. files: Mac/PythonLauncher/Makefile.in | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in --- a/Mac/PythonLauncher/Makefile.in +++ b/Mac/PythonLauncher/Makefile.in @@ -54,8 +54,6 @@ --resource=$(srcdir)/factorySettings.plist \ --plist Info.plist \ build - find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r - FileSettings.o: $(srcdir)/FileSettings.m $(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:25:57 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:25:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fixed_bytes_li?= =?utf-8?q?terals_in_tests=2E?= Message-ID: <20140923202556.44084.69765@mail.hg.python.org> https://hg.python.org/cpython/rev/381d6362c7bc changeset: 92548:381d6362c7bc branch: 3.4 parent: 92543:b06e25a357de user: Serhiy Storchaka date: Tue Sep 23 23:04:21 2014 +0300 summary: Fixed bytes literals in tests. files: Lib/test/test_re.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -766,8 +766,8 @@ self.assertTrue(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertTrue(re.match(br"[\u]", b'u')) self.assertTrue(re.match(br"[\U]", b'U')) - self.assertRaises(re.error, re.match, br"[\911]", "") - self.assertRaises(re.error, re.match, br"[\x1z]", "") + self.assertRaises(re.error, re.match, br"[\911]", b"") + self.assertRaises(re.error, re.match, br"[\x1z]", b"") def test_bug_113254(self): self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:25:57 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:25:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Fixed_bytes_literals_in_tests=2E?= Message-ID: <20140923202556.112610.56339@mail.hg.python.org> https://hg.python.org/cpython/rev/c9bda6fd3fb5 changeset: 92549:c9bda6fd3fb5 parent: 92544:051105a95461 parent: 92548:381d6362c7bc user: Serhiy Storchaka date: Tue Sep 23 23:04:54 2014 +0300 summary: Fixed bytes literals in tests. files: Lib/test/test_re.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -766,8 +766,8 @@ self.assertTrue(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertTrue(re.match(br"[\u]", b'u')) self.assertTrue(re.match(br"[\U]", b'U')) - self.assertRaises(re.error, re.match, br"[\911]", "") - self.assertRaises(re.error, re.match, br"[\x1z]", "") + self.assertRaises(re.error, re.match, br"[\911]", b"") + self.assertRaises(re.error, re.match, br"[\x1z]", b"") def test_bug_113254(self): self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:26:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:26:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <20140923202558.112111.67092@mail.hg.python.org> https://hg.python.org/cpython/rev/837353153f80 changeset: 92553:837353153f80 parent: 92552:01efc4157f63 parent: 92551:bce1594023f9 user: Serhiy Storchaka date: Tue Sep 23 23:24:19 2014 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:26:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:26:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge_heads?= Message-ID: <20140923202557.112410.77653@mail.hg.python.org> https://hg.python.org/cpython/rev/bce1594023f9 changeset: 92551:bce1594023f9 branch: 3.4 parent: 92548:381d6362c7bc parent: 92546:ff2cb4dc36e7 user: Serhiy Storchaka date: Tue Sep 23 23:23:41 2014 +0300 summary: Merge heads files: Mac/PythonLauncher/Makefile.in | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in --- a/Mac/PythonLauncher/Makefile.in +++ b/Mac/PythonLauncher/Makefile.in @@ -53,8 +53,6 @@ --resource=$(srcdir)/factorySettings.plist \ --plist Info.plist \ build - find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r - FileSettings.o: $(srcdir)/FileSettings.m $(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:26:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:26:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322362=3A_Forbidde?= =?utf-8?q?n_ambiguous_octal_escapes_out_of_range_0-0o377_in?= Message-ID: <20140923202557.50584.52402@mail.hg.python.org> https://hg.python.org/cpython/rev/3b32f495fb38 changeset: 92550:3b32f495fb38 user: Serhiy Storchaka date: Tue Sep 23 23:22:41 2014 +0300 summary: Issue #22362: Forbidden ambiguous octal escapes out of range 0-0o377 in regular expressions. files: Lib/sre_parse.py | 20 ++++++++++++++++---- Lib/test/test_re.py | 12 +++++++----- Misc/NEWS | 3 +++ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -295,7 +295,11 @@ elif c in OCTDIGITS: # octal escape (up to three digits) escape += source.getwhile(2, OCTDIGITS) - return LITERAL, int(escape[1:], 8) & 0xff + c = int(escape[1:], 8) + if c > 0o377: + raise error('octal escape value %r outside of ' + 'range 0-0o377' % escape) + return LITERAL, c elif c in DIGITS: raise ValueError if len(escape) == 2: @@ -337,7 +341,7 @@ elif c == "0": # octal escape escape += source.getwhile(2, OCTDIGITS) - return LITERAL, int(escape[1:], 8) & 0xff + return LITERAL, int(escape[1:], 8) elif c in DIGITS: # octal escape *or* decimal group reference (sigh) if source.next in DIGITS: @@ -346,7 +350,11 @@ source.next in OCTDIGITS): # got three octal digits; this is an octal escape escape = escape + source.get() - return LITERAL, int(escape[1:], 8) & 0xff + c = int(escape[1:], 8) + if c > 0o377: + raise error('octal escape value %r outside of ' + 'range 0-0o377' % escape) + return LITERAL, c # not an octal escape, so this is a group reference group = int(escape[1:]) if group < state.groups: @@ -837,7 +845,11 @@ s.next in OCTDIGITS): this += sget() isoctal = True - lappend(chr(int(this[1:], 8) & 0xff)) + c = int(this[1:], 8) + if c > 0o377: + raise error('octal escape value %r outside of ' + 'range 0-0o377' % this) + lappend(chr(c)) if not isoctal: addgroup(int(this[1:])) else: diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -154,8 +154,8 @@ self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a') - self.assertEqual(re.sub('x', r'\400', 'x'), '\0') - self.assertEqual(re.sub('x', r'\777', 'x'), '\377') + self.assertRaises(re.error, re.sub, 'x', r'\400', 'x') + self.assertRaises(re.error, re.sub, 'x', r'\777', 'x') self.assertRaises(re.error, re.sub, 'x', r'\1', 'x') self.assertRaises(re.error, re.sub, 'x', r'\8', 'x') @@ -700,7 +700,7 @@ self.assertTrue(re.match(r"\08", "\0008")) self.assertTrue(re.match(r"\01", "\001")) self.assertTrue(re.match(r"\018", "\0018")) - self.assertTrue(re.match(r"\567", chr(0o167))) + self.assertRaises(re.error, re.match, r"\567", "") self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") @@ -728,12 +728,13 @@ self.assertTrue(re.match(r"[\U%08x]" % i, chr(i))) self.assertTrue(re.match(r"[\U%08x0]" % i, chr(i)+"0")) self.assertTrue(re.match(r"[\U%08xz]" % i, chr(i)+"z")) - self.assertTrue(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) + self.assertRaises(re.error, re.match, r"[\567]", "") self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") self.assertRaises(re.error, re.match, r"[\U0001234z]", "") self.assertRaises(re.error, re.match, r"[\U00110000]", "") + self.assertTrue(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: @@ -749,7 +750,7 @@ self.assertTrue(re.match(br"\08", b"\0008")) self.assertTrue(re.match(br"\01", b"\001")) self.assertTrue(re.match(br"\018", b"\0018")) - self.assertTrue(re.match(br"\567", bytes([0o167]))) + self.assertRaises(re.error, re.match, br"\567", b"") self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") @@ -766,6 +767,7 @@ self.assertTrue(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertTrue(re.match(br"[\u]", b'u')) self.assertTrue(re.match(br"[\U]", b'U')) + self.assertRaises(re.error, re.match, br"[\567]", b"") self.assertRaises(re.error, re.match, br"[\911]", b"") self.assertRaises(re.error, re.match, br"[\x1z]", b"") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22362: Forbidden ambiguous octal escapes out of range 0-0o377 in + regular expressions. + - Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 23 22:26:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 23 Sep 2014 20:26:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?q?=29=3A_Merge_heads?= Message-ID: <20140923202557.44078.82341@mail.hg.python.org> https://hg.python.org/cpython/rev/01efc4157f63 changeset: 92552:01efc4157f63 parent: 92550:3b32f495fb38 parent: 92547:9c9980c3c38c user: Serhiy Storchaka date: Tue Sep 23 23:23:55 2014 +0300 summary: Merge heads files: Mac/PythonLauncher/Makefile.in | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Mac/PythonLauncher/Makefile.in b/Mac/PythonLauncher/Makefile.in --- a/Mac/PythonLauncher/Makefile.in +++ b/Mac/PythonLauncher/Makefile.in @@ -50,7 +50,6 @@ cp $(srcdir)/../Icons/PythonCompiled.icns "Python Launcher.app/Contents/Resources" cp $(srcdir)/factorySettings.plist "Python Launcher.app/Contents/Resources" cp -R $(srcdir)/English.lproj "Python Launcher.app/Contents/Resources" - find "Python Launcher.app" -name '.svn' -print0 | xargs -0 rm -r FileSettings.o: $(srcdir)/FileSettings.m $(CC) $(CFLAGS) -o $@ -c $(srcdir)/FileSettings.m -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 02:13:17 2014 From: python-checkins at python.org (nick.coghlan) Date: Wed, 24 Sep 2014 00:13:17 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_470=3A_fix_typo_spotted_b?= =?utf-8?q?y_Jan_Pokorn=C3=BD?= Message-ID: <20140924001242.69986.43613@mail.hg.python.org> https://hg.python.org/peps/rev/3fe45416e5e5 changeset: 5560:3fe45416e5e5 user: Nick Coghlan date: Wed Sep 24 10:12:29 2014 +1000 summary: PEP 470: fix typo spotted by Jan Pokorn? files: pep-0470.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0470.txt b/pep-0470.txt --- a/pep-0470.txt +++ b/pep-0470.txt @@ -239,7 +239,7 @@ else. Upon acceptance of this PEP and the addition of the ``pypi-only`` mode, all new -projects will by defaulted to the PyPI only mode and they will be locked to +projects will be defaulted to the PyPI only mode and they will be locked to this mode and unable to change this particular setting. ``pypi-only`` projects will still be able to register external index URLs as described above - the "pypi-only" refers only to the download links that are published directly on -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Wed Sep 24 08:40:44 2014 From: python-checkins at python.org (georg.brandl) Date: Wed, 24 Sep 2014 06:40:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_=2322464=3A_Speed_up_commo?= =?utf-8?q?n_Fraction_operations_by_special-casing_several?= Message-ID: <20140924064042.99105.6444@mail.hg.python.org> https://hg.python.org/cpython/rev/646bc7d3544b changeset: 92554:646bc7d3544b user: Georg Brandl date: Wed Sep 24 08:37:55 2014 +0200 summary: #22464: Speed up common Fraction operations by special-casing several operations for int-type arguments: constructor and equality test. Also avoid redundant property lookups in addition and subtraction. files: Lib/fractions.py | 24 +++++++++++++++++------- 1 files changed, 17 insertions(+), 7 deletions(-) diff --git a/Lib/fractions.py b/Lib/fractions.py --- a/Lib/fractions.py +++ b/Lib/fractions.py @@ -104,7 +104,12 @@ self = super(Fraction, cls).__new__(cls) if denominator is None: - if isinstance(numerator, numbers.Rational): + if type(numerator) is int: + self._numerator = numerator + self._denominator = 1 + return self + + elif isinstance(numerator, numbers.Rational): self._numerator = numerator.numerator self._denominator = numerator.denominator return self @@ -153,6 +158,9 @@ raise TypeError("argument should be a string " "or a Rational instance") + elif type(numerator) is int is type(denominator): + pass # *very* normal case + elif (isinstance(numerator, numbers.Rational) and isinstance(denominator, numbers.Rational)): numerator, denominator = ( @@ -399,17 +407,17 @@ def _add(a, b): """a + b""" - return Fraction(a.numerator * b.denominator + - b.numerator * a.denominator, - a.denominator * b.denominator) + da, db = a.denominator, b.denominator + return Fraction(a.numerator * db + b.numerator * da, + da * db) __add__, __radd__ = _operator_fallbacks(_add, operator.add) def _sub(a, b): """a - b""" - return Fraction(a.numerator * b.denominator - - b.numerator * a.denominator, - a.denominator * b.denominator) + da, db = a.denominator, b.denominator + return Fraction(a.numerator * db - b.numerator * da, + da * db) __sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub) @@ -561,6 +569,8 @@ def __eq__(a, b): """a == b""" + if type(b) is int: + return a._numerator == b and a._denominator == 1 if isinstance(b, numbers.Rational): return (a._numerator == b.numerator and a._denominator == b.denominator) -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Wed Sep 24 08:59:57 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 24 Sep 2014 08:59:57 +0200 Subject: [Python-checkins] Daily reference leaks (837353153f80): sum=5 Message-ID: results for 837353153f80 on branch "default" -------------------------------------------- test_collections leaked [0, -2, 0] references, sum=-2 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 2, 0] references, sum=2 test_site leaked [0, 2, 0] memory blocks, sum=2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogKJ9JZ8', '-x'] From python-checkins at python.org Wed Sep 24 09:08:41 2014 From: python-checkins at python.org (georg.brandl) Date: Wed, 24 Sep 2014 07:08:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Update_importlib=2Eh_froze?= =?utf-8?q?n_bytecode_=28changed_due_to_commit_c0ca9d32aed4=29=2E?= Message-ID: <20140924070841.99115.38354@mail.hg.python.org> https://hg.python.org/cpython/rev/064f6baeb6bd changeset: 92555:064f6baeb6bd user: Georg Brandl date: Wed Sep 24 09:08:12 2014 +0200 summary: Update importlib.h frozen bytecode (changed due to commit c0ca9d32aed4). files: Python/importlib.h | 7629 +++++++++++++++---------------- 1 files changed, 3804 insertions(+), 3825 deletions(-) diff --git a/Python/importlib.h b/Python/importlib.h --- a/Python/importlib.h +++ b/Python/importlib.h [stripped] -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 11:44:09 2014 From: python-checkins at python.org (berker.peksag) Date: Wed, 24 Sep 2014 09:44:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIxODYw?= =?utf-8?q?=3A_Correct_docstrings_of_FileIO=2Eseek=28=29_and_FileIO=2Etrun?= =?utf-8?b?Y2F0ZSgpIG1ldGhvZHMu?= Message-ID: <20140924094404.42147.51858@mail.hg.python.org> https://hg.python.org/cpython/rev/2058d94f32dd changeset: 92556:2058d94f32dd branch: 3.4 parent: 92551:bce1594023f9 user: Berker Peksag date: Wed Sep 24 12:43:29 2014 +0300 summary: Issue #21860: Correct docstrings of FileIO.seek() and FileIO.truncate() methods. Patch by Terry Chia. files: Misc/ACKS | 1 + Modules/_io/fileio.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -237,6 +237,7 @@ Jerry Chen Michael Chermside Ingrid Cheung +Terry Chia Albert Chin-A-Young Adal Chiriliuc Matt Chisholm diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -1124,7 +1124,8 @@ "This is needed for lower-level file interfaces, such the fcntl module."); PyDoc_STRVAR(seek_doc, -"seek(offset: int[, whence: int]) -> None. Move to new file position.\n" +"seek(offset: int[, whence: int]) -> int. Move to new file position and\n" +"return the file position.\n" "\n" "Argument offset is a byte count. Optional argument whence defaults to\n" "0 (offset from start of file, offset should be >= 0); other values are 1\n" @@ -1136,9 +1137,10 @@ #ifdef HAVE_FTRUNCATE PyDoc_STRVAR(truncate_doc, -"truncate([size: int]) -> None. Truncate the file to at most size bytes.\n" +"truncate([size: int]) -> int. Truncate the file to at most size bytes\n" +"and return the truncated size.\n" "\n" -"Size defaults to the current file position, as returned by tell()." +"Size defaults to the current file position, as returned by tell().\n" "The current file position is changed to the value of size."); #endif -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 11:44:09 2014 From: python-checkins at python.org (berker.peksag) Date: Wed, 24 Sep 2014 09:44:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2321860=3A_Correct_docstrings_of_FileIO=2Eseek=28?= =?utf-8?q?=29_and_FileIO=2Etruncate=28=29_methods=2E?= Message-ID: <20140924094404.81460.94389@mail.hg.python.org> https://hg.python.org/cpython/rev/de645efe6a9b changeset: 92557:de645efe6a9b parent: 92555:064f6baeb6bd parent: 92556:2058d94f32dd user: Berker Peksag date: Wed Sep 24 12:44:06 2014 +0300 summary: Issue #21860: Correct docstrings of FileIO.seek() and FileIO.truncate() methods. Patch by Terry Chia. files: Misc/ACKS | 1 + Modules/_io/fileio.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -239,6 +239,7 @@ Jerry Chen Michael Chermside Ingrid Cheung +Terry Chia Albert Chin-A-Young Adal Chiriliuc Matt Chisholm diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -1127,7 +1127,8 @@ "This is needed for lower-level file interfaces, such the fcntl module."); PyDoc_STRVAR(seek_doc, -"seek(offset: int[, whence: int]) -> None. Move to new file position.\n" +"seek(offset: int[, whence: int]) -> int. Move to new file position and\n" +"return the file position.\n" "\n" "Argument offset is a byte count. Optional argument whence defaults to\n" "0 (offset from start of file, offset should be >= 0); other values are 1\n" @@ -1139,9 +1140,10 @@ #ifdef HAVE_FTRUNCATE PyDoc_STRVAR(truncate_doc, -"truncate([size: int]) -> None. Truncate the file to at most size bytes.\n" +"truncate([size: int]) -> int. Truncate the file to at most size bytes\n" +"and return the truncated size.\n" "\n" -"Size defaults to the current file position, as returned by tell()." +"Size defaults to the current file position, as returned by tell().\n" "The current file position is changed to the value of size."); #endif -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 11:54:25 2014 From: python-checkins at python.org (berker.peksag) Date: Wed, 24 Sep 2014 09:54:25 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIxODYw?= =?utf-8?q?=3A_Correct_docstrings_of_FileIO=2Eseek=28=29_and_FileIO=2Etrun?= =?utf-8?b?Y2F0ZSgpIG1ldGhvZHMu?= Message-ID: <20140924095421.42145.36027@mail.hg.python.org> https://hg.python.org/cpython/rev/be2746c565c2 changeset: 92558:be2746c565c2 branch: 2.7 parent: 92545:5444c2e22ff8 user: Berker Peksag date: Wed Sep 24 12:54:25 2014 +0300 summary: Issue #21860: Correct docstrings of FileIO.seek() and FileIO.truncate() methods. Patch by Terry Chia. files: Misc/ACKS | 1 + Modules/_io/fileio.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -233,6 +233,7 @@ Jerry Chen Michael Chermside Ingrid Cheung +Terry Chia Albert Chin-A-Young Adal Chiriliuc Matt Chisholm diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -980,7 +980,8 @@ "This is needed for lower-level file interfaces, such the fcntl module."); PyDoc_STRVAR(seek_doc, -"seek(offset: int[, whence: int]) -> None. Move to new file position.\n" +"seek(offset: int[, whence: int]) -> int. Move to new file position\n" +"and return the file position.\n" "\n" "Argument offset is a byte count. Optional argument whence defaults to\n" "0 (offset from start of file, offset should be >= 0); other values are 1\n" @@ -992,9 +993,10 @@ #ifdef HAVE_FTRUNCATE PyDoc_STRVAR(truncate_doc, -"truncate([size: int]) -> None. Truncate the file to at most size bytes.\n" +"truncate([size: int]) -> int. Truncate the file to at most size bytes and\n" +"return the truncated size.\n" "\n" -"Size defaults to the current file position, as returned by tell()." +"Size defaults to the current file position, as returned by tell().\n" "The current file position is changed to the value of size."); #endif -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 12:31:13 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 24 Sep 2014 10:31:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322427=3A_TemporaryDirectory_no_longer_attempts_?= =?utf-8?q?to_clean_up_twice_when?= Message-ID: <20140924103059.82181.22661@mail.hg.python.org> https://hg.python.org/cpython/rev/e9d4288c32de changeset: 92560:e9d4288c32de parent: 92557:de645efe6a9b parent: 92559:7ea2153eae87 user: Serhiy Storchaka date: Wed Sep 24 13:29:27 2014 +0300 summary: Issue #22427: TemporaryDirectory no longer attempts to clean up twice when used in the with statement in generator. files: Lib/tempfile.py | 15 +++------------ Lib/test/test_tempfile.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/Lib/tempfile.py b/Lib/tempfile.py --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -689,11 +689,6 @@ in it are removed. """ - # Handle mkdtemp raising an exception - name = None - _finalizer = None - _closed = False - def __init__(self, suffix="", prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._finalizer = _weakref.finalize( @@ -701,10 +696,9 @@ warn_message="Implicitly cleaning up {!r}".format(self)) @classmethod - def _cleanup(cls, name, warn_message=None): + def _cleanup(cls, name, warn_message): _shutil.rmtree(name) - if warn_message is not None: - _warnings.warn(warn_message, ResourceWarning) + _warnings.warn(warn_message, ResourceWarning) def __repr__(self): @@ -717,8 +711,5 @@ self.cleanup() def cleanup(self): - if self._finalizer is not None: - self._finalizer.detach() - if self.name is not None and not self._closed: + if self._finalizer.detach(): _shutil.rmtree(self.name) - self._closed = True diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py --- a/Lib/test/test_tempfile.py +++ b/Lib/test/test_tempfile.py @@ -1211,6 +1211,30 @@ self.assertNotIn("Exception ", err) self.assertIn("ResourceWarning: Implicitly cleaning up", err) + def test_exit_on_shutdown(self): + # Issue #22427 + with self.do_create() as dir: + code = """if True: + import sys + import tempfile + import warnings + + def generator(): + with tempfile.TemporaryDirectory(dir={dir!r}) as tmp: + yield tmp + g = generator() + sys.stdout.buffer.write(next(g).encode()) + + warnings.filterwarnings("always", category=ResourceWarning) + """.format(dir=dir) + rc, out, err = script_helper.assert_python_ok("-c", code) + tmp_name = out.decode().strip() + self.assertFalse(os.path.exists(tmp_name), + "TemporaryDirectory %s exists after cleanup" % tmp_name) + err = err.decode('utf-8', 'backslashreplace') + self.assertNotIn("Exception ", err) + self.assertIn("ResourceWarning: Implicitly cleaning up", err) + def test_warnings_on_cleanup(self): # ResourceWarning will be triggered by __del__ with self.do_create() as dir: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22427: TemporaryDirectory no longer attempts to clean up twice when + used in the with statement in generator. + - Issue #22362: Forbidden ambiguous octal escapes out of range 0-0o377 in regular expressions. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 12:31:13 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 24 Sep 2014 10:31:13 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDI3?= =?utf-8?q?=3A_TemporaryDirectory_no_longer_attempts_to_clean_up_twice_whe?= =?utf-8?q?n?= Message-ID: <20140924103059.99097.36521@mail.hg.python.org> https://hg.python.org/cpython/rev/7ea2153eae87 changeset: 92559:7ea2153eae87 branch: 3.4 parent: 92556:2058d94f32dd user: Serhiy Storchaka date: Wed Sep 24 13:26:25 2014 +0300 summary: Issue #22427: TemporaryDirectory no longer attempts to clean up twice when used in the with statement in generator. files: Lib/tempfile.py | 15 +++------------ Lib/test/test_tempfile.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/Lib/tempfile.py b/Lib/tempfile.py --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -663,11 +663,6 @@ in it are removed. """ - # Handle mkdtemp raising an exception - name = None - _finalizer = None - _closed = False - def __init__(self, suffix="", prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._finalizer = _weakref.finalize( @@ -675,10 +670,9 @@ warn_message="Implicitly cleaning up {!r}".format(self)) @classmethod - def _cleanup(cls, name, warn_message=None): + def _cleanup(cls, name, warn_message): _shutil.rmtree(name) - if warn_message is not None: - _warnings.warn(warn_message, ResourceWarning) + _warnings.warn(warn_message, ResourceWarning) def __repr__(self): @@ -691,8 +685,5 @@ self.cleanup() def cleanup(self): - if self._finalizer is not None: - self._finalizer.detach() - if self.name is not None and not self._closed: + if self._finalizer.detach(): _shutil.rmtree(self.name) - self._closed = True diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py --- a/Lib/test/test_tempfile.py +++ b/Lib/test/test_tempfile.py @@ -1211,6 +1211,30 @@ self.assertNotIn("Exception ", err) self.assertIn("ResourceWarning: Implicitly cleaning up", err) + def test_exit_on_shutdown(self): + # Issue #22427 + with self.do_create() as dir: + code = """if True: + import sys + import tempfile + import warnings + + def generator(): + with tempfile.TemporaryDirectory(dir={dir!r}) as tmp: + yield tmp + g = generator() + sys.stdout.buffer.write(next(g).encode()) + + warnings.filterwarnings("always", category=ResourceWarning) + """.format(dir=dir) + rc, out, err = script_helper.assert_python_ok("-c", code) + tmp_name = out.decode().strip() + self.assertFalse(os.path.exists(tmp_name), + "TemporaryDirectory %s exists after cleanup" % tmp_name) + err = err.decode('utf-8', 'backslashreplace') + self.assertNotIn("Exception ", err) + self.assertIn("ResourceWarning: Implicitly cleaning up", err) + def test_warnings_on_cleanup(self): # ResourceWarning will be triggered by __del__ with self.do_create() as dir: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -13,6 +13,9 @@ Library ------- +- Issue #22427: TemporaryDirectory no longer attempts to clean up twice when + used in the with statement in generator. + - Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 14:03:57 2014 From: python-checkins at python.org (berker.peksag) Date: Wed, 24 Sep 2014 12:03:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2316056=3A_Rename_test_method_in_test=5Fstatistic?= =?utf-8?q?s_to_avoid_conflict=2E?= Message-ID: <20140924120354.82131.94140@mail.hg.python.org> https://hg.python.org/cpython/rev/c49d7f4d1c04 changeset: 92562:c49d7f4d1c04 parent: 92560:e9d4288c32de parent: 92561:6d44906344f4 user: Berker Peksag date: Wed Sep 24 15:03:57 2014 +0300 summary: Issue #16056: Rename test method in test_statistics to avoid conflict. files: Lib/test/test_statistics.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -991,14 +991,14 @@ result = statistics._sum([1, 2, inf, 3, -inf, 4]) self.assertTrue(math.isnan(result)) - def test_decimal_mismatched_infs_to_nan(self): + def test_decimal_extendedcontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign returns NAN. inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): self.assertTrue(math.isnan(statistics._sum(data))) - def test_decimal_mismatched_infs_to_nan(self): + def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 14:03:57 2014 From: python-checkins at python.org (berker.peksag) Date: Wed, 24 Sep 2014 12:03:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE2MDU2?= =?utf-8?q?=3A_Rename_test_method_in_test=5Fstatistics_to_avoid_conflict?= =?utf-8?q?=2E?= Message-ID: <20140924120354.99097.8356@mail.hg.python.org> https://hg.python.org/cpython/rev/6d44906344f4 changeset: 92561:6d44906344f4 branch: 3.4 parent: 92559:7ea2153eae87 user: Berker Peksag date: Wed Sep 24 15:03:25 2014 +0300 summary: Issue #16056: Rename test method in test_statistics to avoid conflict. files: Lib/test/test_statistics.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -991,14 +991,14 @@ result = statistics._sum([1, 2, inf, 3, -inf, 4]) self.assertTrue(math.isnan(result)) - def test_decimal_mismatched_infs_to_nan(self): + def test_decimal_extendedcontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign returns NAN. inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): self.assertTrue(math.isnan(statistics._sum(data))) - def test_decimal_mismatched_infs_to_nan(self): + def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 17:03:39 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 24 Sep 2014 15:03:39 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_478=3A_replace_=223=2E4?= =?utf-8?b?IiB3aXRoICIzLjUi?= Message-ID: <20140924150323.82181.1318@mail.hg.python.org> https://hg.python.org/peps/rev/3fd8a5d108a0 changeset: 5561:3fd8a5d108a0 user: Victor Stinner date: Wed Sep 24 17:02:24 2014 +0200 summary: PEP 478: replace "3.4" with "3.5" files: pep-0478.txt | 26 +++++++++++++------------- 1 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pep-0478.txt b/pep-0478.txt --- a/pep-0478.txt +++ b/pep-0478.txt @@ -25,7 +25,7 @@ Release Manager and Crew ======================== -- 3.4 Release Manager: Larry Hastings +- 3.5 Release Manager: Larry Hastings - Windows installers: Martin v. L?wis - Mac installers: Ned Deily / Ronald Oussoren - Documentation: Georg Brandl @@ -36,23 +36,23 @@ The releases: -- 3.4.0 alpha 1: February 1, 2015 -- 3.4.0 alpha 2: March 8, 2015 -- 3.4.0 alpha 3: March 28, 2015 -- 3.4.0 alpha 4: April 19, 2015 -- 3.4.0 beta 1: May 24, 2015 -- 3.4.0 beta 2: July 5, 2015 -- 3.4.0 beta 3: July 26, 2015 -- 3.4.0 candidate 1: August 9, 2015 -- 3.4.0 candidate 2: August 23, 2015 -- 3.4.0 candidate 3: September 6, 2015 -- 3.4.0 final: September 13, 2015 +- 3.5.0 alpha 1: February 1, 2015 +- 3.5.0 alpha 2: March 8, 2015 +- 3.5.0 alpha 3: March 28, 2015 +- 3.5.0 alpha 4: April 19, 2015 +- 3.5.0 beta 1: May 24, 2015 +- 3.5.0 beta 2: July 5, 2015 +- 3.5.0 beta 3: July 26, 2015 +- 3.5.0 candidate 1: August 9, 2015 +- 3.5.0 candidate 2: August 23, 2015 +- 3.5.0 candidate 3: September 6, 2015 +- 3.5.0 final: September 13, 2015 (Beta 1 is also "feature freeze"--no new features beyond this point.) -Features for 3.4 +Features for 3.5 ================ Proposed changes for 3.5: -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Wed Sep 24 17:10:26 2014 From: python-checkins at python.org (r.david.murray) Date: Wed, 24 Sep 2014 15:10:26 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Improve_Englis?= =?utf-8?q?h_phrasing_in_asyncio_task_docs=2E?= Message-ID: <20140924151001.42131.3416@mail.hg.python.org> https://hg.python.org/cpython/rev/33fee8798ee2 changeset: 92563:33fee8798ee2 branch: 3.4 parent: 92561:6d44906344f4 user: R David Murray date: Wed Sep 24 11:09:09 2014 -0400 summary: Improve English phrasing in asyncio task docs. files: Doc/library/asyncio-task.rst | 18 +++++++++--------- 1 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -319,18 +319,18 @@ Schedule the execution of a :ref:`coroutine `: wrap it in a future. A task is a subclass of :class:`Future`. - A task is responsible to execute a coroutine object in an event loop. If + A task is responsible for executing a coroutine object in an event loop. If the wrapped coroutine yields from a future, the task suspends the execution of the wrapped coroutine and waits for the completition of the future. When the future is done, the execution of the wrapped coroutine restarts with the result or the exception of the future. Event loops use cooperative scheduling: an event loop only runs one task at - the same time. Other tasks may run in parallel if other event loops are + a time. Other tasks may run in parallel if other event loops are running in different threads. While a task waits for the completion of a future, the event loop executes a new task. - The cancellation of a task is different than cancelling a future. Calling + The cancellation of a task is different from the cancelation of a future. Calling :meth:`cancel` will throw a :exc:`~concurrent.futures.CancelledError` to the wrapped coroutine. :meth:`~Future.cancelled` only returns ``True`` if the wrapped coroutine did not catch the @@ -341,7 +341,7 @@ ` did not complete. It is probably a bug and a warning is logged: see :ref:`Pending task destroyed `. - Don't create directly :class:`Task` instances: use the :func:`async` + Don't directly create :class:`Task` instances: use the :func:`async` function or the :meth:`BaseEventLoop.create_task` method. .. classmethod:: all_tasks(loop=None) @@ -360,17 +360,17 @@ .. method:: cancel() - Request this task to cancel itself. + Request that this task cancel itself. This arranges for a :exc:`~concurrent.futures.CancelledError` to be thrown into the wrapped coroutine on the next cycle through the event loop. The coroutine then has a chance to clean up or even deny the request using try/except/finally. - Contrary to :meth:`Future.cancel`, this does not guarantee that the task + Unlike :meth:`Future.cancel`, this does not guarantee that the task will be cancelled: the exception might be caught and acted upon, delaying - cancellation of the task or preventing it completely. The task may also - return a value or raise a different exception. + cancellation of the task or preventing cancellation completely. The task + may also return a value or raise a different exception. Immediately after this method is called, :meth:`~Future.cancelled` will not return ``True`` (unless the task was already cancelled). A task will @@ -405,7 +405,7 @@ This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream to which the output - goes; by default it goes to sys.stderr. + is written; by default output is written to sys.stderr. Example: Parallel execution of tasks -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 17:10:26 2014 From: python-checkins at python.org (r.david.murray) Date: Wed, 24 Sep 2014 15:10:26 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_Improve_English_phrasing_in_asyncio_task_docs?= =?utf-8?q?=2E?= Message-ID: <20140924151001.81932.38231@mail.hg.python.org> https://hg.python.org/cpython/rev/63fcc08ee8cc changeset: 92564:63fcc08ee8cc parent: 92562:c49d7f4d1c04 parent: 92563:33fee8798ee2 user: R David Murray date: Wed Sep 24 11:09:42 2014 -0400 summary: Merge: Improve English phrasing in asyncio task docs. files: Doc/library/asyncio-task.rst | 18 +++++++++--------- 1 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -319,18 +319,18 @@ Schedule the execution of a :ref:`coroutine `: wrap it in a future. A task is a subclass of :class:`Future`. - A task is responsible to execute a coroutine object in an event loop. If + A task is responsible for executing a coroutine object in an event loop. If the wrapped coroutine yields from a future, the task suspends the execution of the wrapped coroutine and waits for the completition of the future. When the future is done, the execution of the wrapped coroutine restarts with the result or the exception of the future. Event loops use cooperative scheduling: an event loop only runs one task at - the same time. Other tasks may run in parallel if other event loops are + a time. Other tasks may run in parallel if other event loops are running in different threads. While a task waits for the completion of a future, the event loop executes a new task. - The cancellation of a task is different than cancelling a future. Calling + The cancellation of a task is different from the cancelation of a future. Calling :meth:`cancel` will throw a :exc:`~concurrent.futures.CancelledError` to the wrapped coroutine. :meth:`~Future.cancelled` only returns ``True`` if the wrapped coroutine did not catch the @@ -341,7 +341,7 @@ ` did not complete. It is probably a bug and a warning is logged: see :ref:`Pending task destroyed `. - Don't create directly :class:`Task` instances: use the :func:`async` + Don't directly create :class:`Task` instances: use the :func:`async` function or the :meth:`BaseEventLoop.create_task` method. .. classmethod:: all_tasks(loop=None) @@ -360,17 +360,17 @@ .. method:: cancel() - Request this task to cancel itself. + Request that this task cancel itself. This arranges for a :exc:`~concurrent.futures.CancelledError` to be thrown into the wrapped coroutine on the next cycle through the event loop. The coroutine then has a chance to clean up or even deny the request using try/except/finally. - Contrary to :meth:`Future.cancel`, this does not guarantee that the task + Unlike :meth:`Future.cancel`, this does not guarantee that the task will be cancelled: the exception might be caught and acted upon, delaying - cancellation of the task or preventing it completely. The task may also - return a value or raise a different exception. + cancellation of the task or preventing cancellation completely. The task + may also return a value or raise a different exception. Immediately after this method is called, :meth:`~Future.cancelled` will not return ``True`` (unless the task was already cancelled). A task will @@ -405,7 +405,7 @@ This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream to which the output - goes; by default it goes to sys.stderr. + is written; by default output is written to sys.stderr. Example: Parallel execution of tasks -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 19:15:15 2014 From: python-checkins at python.org (r.david.murray) Date: Wed, 24 Sep 2014 17:15:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_Apply_asyncio_Task_English_fixes_to_docstrings_?= =?utf-8?q?as_well=2E?= Message-ID: <20140924171511.99087.24963@mail.hg.python.org> https://hg.python.org/cpython/rev/b18288f24501 changeset: 92566:b18288f24501 parent: 92564:63fcc08ee8cc parent: 92565:132e2fe31d9f user: R David Murray date: Wed Sep 24 13:14:07 2014 -0400 summary: Merge: Apply asyncio Task English fixes to docstrings as well. files: Lib/asyncio/tasks.py | 19 ++++++++++--------- 1 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -77,9 +77,9 @@ # status is still pending self._log_destroy_pending = True - # On Python 3.3 or older, objects with a destructor part of a reference - # cycle are never destroyed. It's not more the case on Python 3.4 thanks to - # the PEP 442. + # On Python 3.3 or older, objects with a destructor that are part of a + # reference cycle are never destroyed. That's not the case any more on + # Python 3.4 thanks to the PEP 442. if _PY34: def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: @@ -155,7 +155,8 @@ This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream - to which the output goes; by default it goes to sys.stderr. + to which the output is written; by default output is written + to sys.stderr. """ extracted_list = [] checked = set() @@ -184,18 +185,18 @@ print(line, file=file, end='') def cancel(self): - """Request this task to cancel itself. + """Request that this task cancel itself. This arranges for a CancelledError to be thrown into the wrapped coroutine on the next cycle through the event loop. The coroutine then has a chance to clean up or even deny the request using try/except/finally. - Contrary to Future.cancel(), this does not guarantee that the + Unlike Future.cancel, this does not guarantee that the task will be cancelled: the exception might be caught and - acted upon, delaying cancellation of the task or preventing it - completely. The task may also return a value or raise a - different exception. + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. Immediately after this method is called, Task.cancelled() will not return True (unless the task was already cancelled). A -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 19:15:15 2014 From: python-checkins at python.org (r.david.murray) Date: Wed, 24 Sep 2014 17:15:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Apply_asyncio_?= =?utf-8?q?Task_English_fixes_to_docstrings_as_well=2E?= Message-ID: <20140924171511.81762.84166@mail.hg.python.org> https://hg.python.org/cpython/rev/132e2fe31d9f changeset: 92565:132e2fe31d9f branch: 3.4 parent: 92563:33fee8798ee2 user: R David Murray date: Wed Sep 24 13:13:45 2014 -0400 summary: Apply asyncio Task English fixes to docstrings as well. Also fixed the phrasing in a comment. files: Lib/asyncio/tasks.py | 19 ++++++++++--------- 1 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -77,9 +77,9 @@ # status is still pending self._log_destroy_pending = True - # On Python 3.3 or older, objects with a destructor part of a reference - # cycle are never destroyed. It's not more the case on Python 3.4 thanks to - # the PEP 442. + # On Python 3.3 or older, objects with a destructor that are part of a + # reference cycle are never destroyed. That's not the case any more on + # Python 3.4 thanks to the PEP 442. if _PY34: def __del__(self): if self._state == futures._PENDING and self._log_destroy_pending: @@ -155,7 +155,8 @@ This produces output similar to that of the traceback module, for the frames retrieved by get_stack(). The limit argument is passed to get_stack(). The file argument is an I/O stream - to which the output goes; by default it goes to sys.stderr. + to which the output is written; by default output is written + to sys.stderr. """ extracted_list = [] checked = set() @@ -184,18 +185,18 @@ print(line, file=file, end='') def cancel(self): - """Request this task to cancel itself. + """Request that this task cancel itself. This arranges for a CancelledError to be thrown into the wrapped coroutine on the next cycle through the event loop. The coroutine then has a chance to clean up or even deny the request using try/except/finally. - Contrary to Future.cancel(), this does not guarantee that the + Unlike Future.cancel, this does not guarantee that the task will be cancelled: the exception might be caught and - acted upon, delaying cancellation of the task or preventing it - completely. The task may also return a value or raise a - different exception. + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. Immediately after this method is called, Task.cancelled() will not return True (unless the task was already cancelled). A -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Sep 24 21:30:29 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 24 Sep 2014 19:30:29 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_Add_myself_to_some_=22exp?= =?utf-8?b?ZXJ0cyIu?= Message-ID: <20140924193026.84025.32118@mail.hg.python.org> https://hg.python.org/devguide/rev/8e4580b74c3f changeset: 714:8e4580b74c3f user: Serhiy Storchaka date: Wed Sep 24 22:29:47 2014 +0300 summary: Add myself to some "experts". files: experts.rst | 9 +++++---- 1 files changed, 5 insertions(+), 4 deletions(-) diff --git a/experts.rst b/experts.rst --- a/experts.rst +++ b/experts.rst @@ -59,7 +59,7 @@ asyncio gvanrossum, haypo, pitrou, yselivanov, giampaolo.rodola asyncore josiahcarlson, giampaolo.rodola*, stutzbach atexit -audioop +audioop serhiy.storchaka base64 bdb binascii @@ -161,6 +161,7 @@ operator optparse aronacher os loewis +os.path serhiy.storchaka ossaudiodev parser benjamin.peterson pathlib pitrou* @@ -185,7 +186,7 @@ queue rhettinger quopri random rhettinger, mark.dickinson -re effbot (inactive), pitrou, ezio.melotti +re effbot (inactive), pitrou, ezio.melotti, serhiy.storchaka readline reprlib resource @@ -229,7 +230,7 @@ threading pitrou time belopolsky timeit georg.brandl -tkinter gpolo +tkinter gpolo, serhiy.storchaka token georg.brandl tokenize meador.inge trace belopolsky @@ -263,7 +264,7 @@ xml.sax.saxutils xml.sax.xmlreader xmlrpc loewis -zipfile alanmcintyre +zipfile alanmcintyre, serhiy.storchaka zipimport zlib nadeem.vawda ==================== ============================================= -- Repository URL: https://hg.python.org/devguide From python-checkins at python.org Thu Sep 25 00:40:41 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 24 Sep 2014 22:40:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_test=5Ffaulthandler=3A_fix_typo?= Message-ID: <20140924224038.123485.21391@mail.hg.python.org> https://hg.python.org/cpython/rev/2ba974365964 changeset: 92568:2ba974365964 parent: 92566:b18288f24501 parent: 92567:820dd1bbdbac user: Victor Stinner date: Thu Sep 25 00:39:17 2014 +0200 summary: (Merge 3.4) test_faulthandler: fix typo files: Lib/test/test_faulthandler.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py --- a/Lib/test/test_faulthandler.py +++ b/Lib/test/test_faulthandler.py @@ -230,7 +230,7 @@ """ not_expected = 'Fatal Python error' stderr, exitcode = self.get_output(code) - stder = '\n'.join(stderr) + stderr = '\n'.join(stderr) self.assertTrue(not_expected not in stderr, "%r is present in %r" % (not_expected, stderr)) self.assertNotEqual(exitcode, 0) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 00:40:41 2014 From: python-checkins at python.org (victor.stinner) Date: Wed, 24 Sep 2014 22:40:41 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogdGVzdF9mYXVsdGhh?= =?utf-8?q?ndler=3A_fix_typo?= Message-ID: <20140924224037.91836.96345@mail.hg.python.org> https://hg.python.org/cpython/rev/820dd1bbdbac changeset: 92567:820dd1bbdbac branch: 3.4 parent: 92565:132e2fe31d9f user: Victor Stinner date: Thu Sep 25 00:38:48 2014 +0200 summary: test_faulthandler: fix typo files: Lib/test/test_faulthandler.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py --- a/Lib/test/test_faulthandler.py +++ b/Lib/test/test_faulthandler.py @@ -220,7 +220,7 @@ """ not_expected = 'Fatal Python error' stderr, exitcode = self.get_output(code) - stder = '\n'.join(stderr) + stderr = '\n'.join(stderr) self.assertTrue(not_expected not in stderr, "%r is present in %r" % (not_expected, stderr)) self.assertNotEqual(exitcode, 0) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 02:23:13 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 25 Sep 2014 00:23:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI0ODQp?= Message-ID: <20140925002313.91810.49119@mail.hg.python.org> https://hg.python.org/cpython/rev/7d6297450943 changeset: 92570:7d6297450943 parent: 92568:2ba974365964 parent: 92569:8ce21ffc6df5 user: Benjamin Peterson date: Wed Sep 24 20:22:49 2014 -0400 summary: merge 3.4 (#22484) files: Doc/Makefile | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -165,9 +165,10 @@ autobuild-html: make html SPHINXOPTS='-A daily=1 -A versionswitcher=1' -# for stable releases: only build if not in pre-release stage (alpha, beta, rc) +# for stable releases: only build if not in pre-release stage (alpha, beta) +# release candidate downloads are okay, since the stable tree can be in that stage autobuild-stable: - @case $(DISTVERSION) in *[abc]*) \ + @case $(DISTVERSION) in *[ab]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ exit 1;; \ esac -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 02:23:13 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 25 Sep 2014 00:23:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_allow_archives?= =?utf-8?q?_for_rc_releases_to_be_built_=28closes_=2322484=29?= Message-ID: <20140925002312.91832.49366@mail.hg.python.org> https://hg.python.org/cpython/rev/8ce21ffc6df5 changeset: 92569:8ce21ffc6df5 branch: 3.4 parent: 92567:820dd1bbdbac user: Benjamin Peterson date: Wed Sep 24 20:22:24 2014 -0400 summary: allow archives for rc releases to be built (closes #22484) files: Doc/Makefile | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -165,9 +165,10 @@ autobuild-html: make html SPHINXOPTS='-A daily=1 -A versionswitcher=1' -# for stable releases: only build if not in pre-release stage (alpha, beta, rc) +# for stable releases: only build if not in pre-release stage (alpha, beta) +# release candidate downloads are okay, since the stable tree can be in that stage autobuild-stable: - @case $(DISTVERSION) in *[abc]*) \ + @case $(DISTVERSION) in *[ab]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ exit 1;; \ esac -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 02:23:14 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 25 Sep 2014 00:23:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_allow_archives?= =?utf-8?q?_for_rc_releases_to_be_built_=28closes_=2322484=29?= Message-ID: <20140925002313.91814.63662@mail.hg.python.org> https://hg.python.org/cpython/rev/22a46f05ce23 changeset: 92571:22a46f05ce23 branch: 2.7 parent: 92558:be2746c565c2 user: Benjamin Peterson date: Wed Sep 24 20:22:24 2014 -0400 summary: allow archives for rc releases to be built (closes #22484) files: Doc/Makefile | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -165,9 +165,10 @@ autobuild-html: make html SPHINXOPTS='-A daily=1 -A versionswitcher=1' -# for stable releases: only build if not in pre-release stage (alpha, beta, rc) +# for stable releases: only build if not in pre-release stage (alpha, beta) +# release candidate downloads are okay, since the stable tree can be in that stage autobuild-stable: - @case $(DISTVERSION) in *[abc]*) \ + @case $(DISTVERSION) in *[ab]*) \ echo "Not building; $(DISTVERSION) is not a release version."; \ exit 1;; \ esac -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 05:22:37 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 03:22:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogYXN5bmNpby51bml4?= =?utf-8?q?=5Fevents=3A_Move_import_statement_to_match_tulip_code?= Message-ID: <20140925032232.123967.85827@mail.hg.python.org> https://hg.python.org/cpython/rev/7b962b896f10 changeset: 92572:7b962b896f10 branch: 3.4 parent: 92569:8ce21ffc6df5 user: Yury Selivanov date: Wed Sep 24 23:21:39 2014 -0400 summary: asyncio.unix_events: Move import statement to match tulip code files: Lib/asyncio/unix_events.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -1,7 +1,6 @@ """Selector event loop for Unix with signal handling.""" import errno -import fcntl import os import signal import socket @@ -263,6 +262,8 @@ def _set_nonblocking(fd): os.set_blocking(fd, False) else: + import fcntl + def _set_nonblocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags | os.O_NONBLOCK -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 05:22:37 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 03:22:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_asyncio=2Eunix=5Fevents=3A?= =?utf-8?q?_Move_import_statement_to_match_code_in_tulip?= Message-ID: <20140925032232.123641.72834@mail.hg.python.org> https://hg.python.org/cpython/rev/c2a485169f48 changeset: 92573:c2a485169f48 parent: 92570:7d6297450943 user: Yury Selivanov date: Wed Sep 24 23:22:26 2014 -0400 summary: asyncio.unix_events: Move import statement to match code in tulip files: Lib/asyncio/unix_events.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py --- a/Lib/asyncio/unix_events.py +++ b/Lib/asyncio/unix_events.py @@ -262,6 +262,8 @@ def _set_nonblocking(fd): os.set_blocking(fd, False) else: + import fcntl + def _set_nonblocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) flags = flags | os.O_NONBLOCK -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 05:30:18 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 03:30:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_asyncio=2Etest=5Fevents=3A?= =?utf-8?q?_Partially_reverting_f7643c893587_to_stay_in_sync_with?= Message-ID: <20140925033012.91810.69519@mail.hg.python.org> https://hg.python.org/cpython/rev/5f001ad90373 changeset: 92574:5f001ad90373 user: Yury Selivanov date: Wed Sep 24 23:30:03 2014 -0400 summary: asyncio.test_events: Partially reverting f7643c893587 to stay in sync with tulip codebase files: Lib/test/test_asyncio/test_events.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -448,7 +448,7 @@ listener = socket.socket() listener.setblocking(False) listener.bind(('127.0.0.1', 0)) - listener.listen() + listener.listen(1) client = socket.socket() client.connect(listener.getsockname()) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 05:42:01 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 03:42:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_asyncio=3A_Reverting_69d47?= =?utf-8?q?4dab479_as_issue_=2321645_is_now_closed_and_debug_is_no?= Message-ID: <20140925034154.123685.28704@mail.hg.python.org> https://hg.python.org/cpython/rev/fe456770b454 changeset: 92575:fe456770b454 user: Yury Selivanov date: Wed Sep 24 23:41:28 2014 -0400 summary: asyncio: Reverting 69d474dab479 as issue #21645 is now closed and debug is no longer needed files: Lib/test/test_asyncio/test_streams.py | 6 ------ 1 files changed, 0 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py --- a/Lib/test/test_asyncio/test_streams.py +++ b/Lib/test/test_asyncio/test_streams.py @@ -596,12 +596,6 @@ code = """\ import os, sys -try: - import faulthandler -except ImportError: - pass -else: - faulthandler.dump_traceback_later(60, exit=True) fd = int(sys.argv[1]) os.write(fd, b'data') os.close(fd) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 06:12:38 2014 From: python-checkins at python.org (larry.hastings) Date: Thu, 25 Sep 2014 04:12:38 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Updates_for_3=2E5_release_sch?= =?utf-8?q?edule_PEP--different_WE=2C_more_PEPs=2E?= Message-ID: <20140925041232.84029.37192@mail.hg.python.org> https://hg.python.org/peps/rev/bc09c431a2c7 changeset: 5562:bc09c431a2c7 user: Larry Hastings date: Wed Sep 24 21:13:07 2014 -0700 summary: Updates for 3.5 release schedule PEP--different WE, more PEPs. files: pep-0478.txt | 12 +++++++++++- 1 files changed, 11 insertions(+), 1 deletions(-) diff --git a/pep-0478.txt b/pep-0478.txt --- a/pep-0478.txt +++ b/pep-0478.txt @@ -26,7 +26,7 @@ ======================== - 3.5 Release Manager: Larry Hastings -- Windows installers: Martin v. L?wis +- Windows installers: Steve Dower - Mac installers: Ned Deily / Ronald Oussoren - Documentation: Georg Brandl @@ -55,13 +55,23 @@ Features for 3.5 ================ +Implemented / Final PEPs: + +* PEP 461, a new matrix multiplication operator +* PEP 465, %-formatting for binary strings +* PEP 471, os.scandir() + Proposed changes for 3.5: * PEP 431, improved support for time zone databases +* PEP 432, simplifying Python's startup sequence * PEP 441, improved Python zip application support * PEP 447, support for __locallookup__ metaclass method * PEP 448, additional unpacking generalizations * PEP 455, key transforming dictionary +* PEP 457, a build tool generating boilerplate for extension modules +* PEP 475, retrying system calls that fail with EINTR + Copyright -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Thu Sep 25 07:08:00 2014 From: python-checkins at python.org (benjamin.peterson) Date: Thu, 25 Sep 2014 05:08:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140925050757.91832.85726@mail.hg.python.org> https://hg.python.org/cpython/rev/a6fa8538c68b changeset: 92576:a6fa8538c68b parent: 92575:fe456770b454 parent: 92572:7b962b896f10 user: Benjamin Peterson date: Thu Sep 25 01:07:54 2014 -0400 summary: merge 3.4 files: -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Thu Sep 25 09:30:28 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 25 Sep 2014 09:30:28 +0200 Subject: [Python-checkins] Daily reference leaks (7d6297450943): sum=1 Message-ID: results for 7d6297450943 on branch "default" -------------------------------------------- test_collections leaked [-2, 0, 0] references, sum=-2 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [0, 2, -2] references, sum=0 test_site leaked [0, 2, -2] memory blocks, sum=0 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog2i8FOW', '-x'] From python-checkins at python.org Thu Sep 25 11:48:36 2014 From: python-checkins at python.org (nick.coghlan) Date: Thu, 25 Sep 2014 09:48:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2318093=3A_fix_test?= =?utf-8?q?=5Fcapi_test_skip_due_to_=5Ftestembed_move?= Message-ID: <20140925094833.123534.15697@mail.hg.python.org> https://hg.python.org/cpython/rev/c87e00a6258d changeset: 92577:c87e00a6258d user: Nick Coghlan date: Thu Sep 25 19:48:15 2014 +1000 summary: Issue #18093: fix test_capi test skip due to _testembed move files: Lib/test/test_capi.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -285,7 +285,7 @@ exename += ext exepath = os.path.dirname(sys.executable) else: - exepath = os.path.join(basepath, "Modules") + exepath = os.path.join(basepath, "Programs") self.test_exe = exe = os.path.join(exepath, exename) if not os.path.exists(exe): self.skipTest("%r doesn't exist" % exe) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 18:09:27 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 16:09:27 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_asyncio=3A_Improve_cancele?= =?utf-8?q?d_timer_handles_cleanup=2E_Closes_issue_=2322448=2E?= Message-ID: <20140925160922.123342.84721@mail.hg.python.org> https://hg.python.org/cpython/rev/a6aaacb2b807 changeset: 92579:a6aaacb2b807 parent: 92577:c87e00a6258d user: Yury Selivanov date: Thu Sep 25 12:09:09 2014 -0400 summary: asyncio: Improve canceled timer handles cleanup. Closes issue #22448. Patch by Joshua Moore-Oliva. files: Lib/asyncio/base_events.py | 44 ++++- Lib/asyncio/events.py | 29 ++- Lib/test/test_asyncio/test_base_events.py | 84 ++++++++++- Lib/test/test_asyncio/test_events.py | 14 +- Misc/NEWS | 3 + 5 files changed, 148 insertions(+), 26 deletions(-) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -40,6 +40,13 @@ # Argument for default thread pool executor creation. _MAX_WORKERS = 5 +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 def _format_handle(handle): cb = handle._callback @@ -145,6 +152,7 @@ class BaseEventLoop(events.AbstractEventLoop): def __init__(self): + self._timer_cancelled_count = 0 self._closed = False self._ready = collections.deque() self._scheduled = [] @@ -349,6 +357,7 @@ if timer._source_traceback: del timer._source_traceback[-1] heapq.heappush(self._scheduled, timer) + timer._scheduled = True return timer def call_soon(self, callback, *args): @@ -964,16 +973,19 @@ assert isinstance(handle, events.Handle), 'A Handle is required here' if handle._cancelled: return - if isinstance(handle, events.TimerHandle): - heapq.heappush(self._scheduled, handle) - else: - self._ready.append(handle) + assert not isinstance(handle, events.TimerHandle) + self._ready.append(handle) def _add_callback_signalsafe(self, handle): """Like _add_callback() but called from a signal handler.""" self._add_callback(handle) self._write_to_self() + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + def _run_once(self): """Run one full iteration of the event loop. @@ -981,9 +993,26 @@ schedules the resulting callbacks, and finally schedules 'call_later' callbacks. """ - # Remove delayed calls that were cancelled from head of queue. - while self._scheduled and self._scheduled[0]._cancelled: - heapq.heappop(self._scheduled) + + # Remove delayed calls that were cancelled if their number is too high + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + self._timer_cancelled_count / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + + self._scheduled = [x for x in self._scheduled if not x._cancelled] + self._timer_cancelled_count = 0 + + heapq.heapify(self._scheduled) + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False timeout = None if self._ready: @@ -1024,6 +1053,7 @@ if handle._when >= end_time: break handle = heapq.heappop(self._scheduled) + handle._scheduled = False self._ready.append(handle) # This is the only place where callbacks are actually *called*. diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -105,14 +105,15 @@ return '<%s>' % ' '.join(info) def cancel(self): - self._cancelled = True - if self._loop.get_debug(): - # Keep a representation in debug mode to keep callback and - # parameters. For example, to log the warning "Executing took 2.5 second" - self._repr = repr(self) - self._callback = None - self._args = None + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None def _run(self): try: @@ -134,7 +135,7 @@ class TimerHandle(Handle): """Object returned by timed callback registration methods.""" - __slots__ = ['_when'] + __slots__ = ['_scheduled', '_when'] def __init__(self, when, callback, args, loop): assert when is not None @@ -142,6 +143,7 @@ if self._source_traceback: del self._source_traceback[-1] self._when = when + self._scheduled = False def _repr_info(self): info = super()._repr_info() @@ -180,6 +182,11 @@ equal = self.__eq__(other) return NotImplemented if equal is NotImplemented else not equal + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super().cancel() + class AbstractServer: """Abstract server returned by create_server().""" @@ -238,6 +245,10 @@ # Methods scheduling callbacks. All these return Handles. + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + def call_soon(self, callback, *args): return self.call_later(0, callback, *args) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -2,6 +2,7 @@ import errno import logging +import math import socket import sys import time @@ -73,13 +74,6 @@ self.assertFalse(self.loop._scheduled) self.assertIn(h, self.loop._ready) - def test__add_callback_timer(self): - h = asyncio.TimerHandle(time.monotonic()+10, lambda: False, (), - self.loop) - - self.loop._add_callback(h) - self.assertIn(h, self.loop._scheduled) - def test__add_callback_cancelled_handle(self): h = asyncio.Handle(lambda: False, (), self.loop) h.cancel() @@ -283,6 +277,82 @@ self.assertTrue(processed) self.assertEqual([handle], list(self.loop._ready)) + def test__run_once_cancelled_event_cleanup(self): + self.loop._process_events = mock.Mock() + + self.assertTrue( + 0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0) + + def cb(): + pass + + # Set up one "blocking" event that will not be cancelled to + # ensure later cancelled events do not make it to the head + # of the queue and get cleaned. + not_cancelled_count = 1 + self.loop.call_later(3000, cb) + + # Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES) + # cancelled handles, ensure they aren't removed + + cancelled_count = 2 + for x in range(2): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Add some cancelled events that will be at head and removed + cancelled_count += 2 + for x in range(2): + h = self.loop.call_later(100, cb) + h.cancel() + + # This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low + self.assertLessEqual(cancelled_count + not_cancelled_count, + base_events._MIN_SCHEDULED_TIMER_HANDLES) + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.loop._run_once() + + cancelled_count -= 2 + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + # Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION + # so that deletion of cancelled events will occur on next _run_once + add_cancel_count = int(math.ceil( + base_events._MIN_SCHEDULED_TIMER_HANDLES * + base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1 + + add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES - + add_cancel_count, 0) + + # Add some events that will not be cancelled + not_cancelled_count += add_not_cancel_count + for x in range(add_not_cancel_count): + self.loop.call_later(3600, cb) + + # Add enough cancelled events + cancelled_count += add_cancel_count + for x in range(add_cancel_count): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Ensure all handles are still scheduled + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + self.loop._run_once() + + # Ensure cancelled events were removed + self.assertEqual(len(self.loop._scheduled), not_cancelled_count) + + # Ensure only uncancelled events remain scheduled + self.assertTrue(all([not x._cancelled for x in self.loop._scheduled])) + def test_run_until_complete_type_error(self): self.assertRaises(TypeError, self.loop.run_until_complete, 'blah') diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -1890,9 +1890,17 @@ # cancelled handle h.cancel() - self.assertEqual(repr(h), - '' - % (filename, lineno, create_filename, create_lineno)) + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + # double cancellation won't overwrite _repr + h.cancel() + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) def test_handle_source_traceback(self): loop = asyncio.get_event_loop_policy().new_event_loop() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -137,6 +137,9 @@ Library ------- +- Issue #22448: Improve canceled timer handles cleanup to prevent + unbound memory usage. Patch by Joshua Moore-Oliva. + - Issue #22427: TemporaryDirectory no longer attempts to clean up twice when used in the with statement in generator. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 18:09:27 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 16:09:27 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogYXN5bmNpbzogSW1w?= =?utf-8?q?rove_canceled_timer_handles_cleanup=2E_Closes_issue_=2322448=2E?= Message-ID: <20140925160922.123342.55807@mail.hg.python.org> https://hg.python.org/cpython/rev/2a868c9f8f15 changeset: 92578:2a868c9f8f15 branch: 3.4 parent: 92572:7b962b896f10 user: Yury Selivanov date: Thu Sep 25 12:07:56 2014 -0400 summary: asyncio: Improve canceled timer handles cleanup. Closes issue #22448. Patch by Joshua Moore-Oliva. files: Lib/asyncio/base_events.py | 44 ++++- Lib/asyncio/events.py | 29 ++- Lib/test/test_asyncio/test_base_events.py | 84 ++++++++++- Lib/test/test_asyncio/test_events.py | 14 +- Misc/NEWS | 14 + 5 files changed, 159 insertions(+), 26 deletions(-) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -40,6 +40,13 @@ # Argument for default thread pool executor creation. _MAX_WORKERS = 5 +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 def _format_handle(handle): cb = handle._callback @@ -145,6 +152,7 @@ class BaseEventLoop(events.AbstractEventLoop): def __init__(self): + self._timer_cancelled_count = 0 self._closed = False self._ready = collections.deque() self._scheduled = [] @@ -349,6 +357,7 @@ if timer._source_traceback: del timer._source_traceback[-1] heapq.heappush(self._scheduled, timer) + timer._scheduled = True return timer def call_soon(self, callback, *args): @@ -964,16 +973,19 @@ assert isinstance(handle, events.Handle), 'A Handle is required here' if handle._cancelled: return - if isinstance(handle, events.TimerHandle): - heapq.heappush(self._scheduled, handle) - else: - self._ready.append(handle) + assert not isinstance(handle, events.TimerHandle) + self._ready.append(handle) def _add_callback_signalsafe(self, handle): """Like _add_callback() but called from a signal handler.""" self._add_callback(handle) self._write_to_self() + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + def _run_once(self): """Run one full iteration of the event loop. @@ -981,9 +993,26 @@ schedules the resulting callbacks, and finally schedules 'call_later' callbacks. """ - # Remove delayed calls that were cancelled from head of queue. - while self._scheduled and self._scheduled[0]._cancelled: - heapq.heappop(self._scheduled) + + # Remove delayed calls that were cancelled if their number is too high + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + self._timer_cancelled_count / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + + self._scheduled = [x for x in self._scheduled if not x._cancelled] + self._timer_cancelled_count = 0 + + heapq.heapify(self._scheduled) + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False timeout = None if self._ready: @@ -1024,6 +1053,7 @@ if handle._when >= end_time: break handle = heapq.heappop(self._scheduled) + handle._scheduled = False self._ready.append(handle) # This is the only place where callbacks are actually *called*. diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -105,14 +105,15 @@ return '<%s>' % ' '.join(info) def cancel(self): - self._cancelled = True - if self._loop.get_debug(): - # Keep a representation in debug mode to keep callback and - # parameters. For example, to log the warning "Executing took 2.5 second" - self._repr = repr(self) - self._callback = None - self._args = None + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None def _run(self): try: @@ -134,7 +135,7 @@ class TimerHandle(Handle): """Object returned by timed callback registration methods.""" - __slots__ = ['_when'] + __slots__ = ['_scheduled', '_when'] def __init__(self, when, callback, args, loop): assert when is not None @@ -142,6 +143,7 @@ if self._source_traceback: del self._source_traceback[-1] self._when = when + self._scheduled = False def _repr_info(self): info = super()._repr_info() @@ -180,6 +182,11 @@ equal = self.__eq__(other) return NotImplemented if equal is NotImplemented else not equal + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super().cancel() + class AbstractServer: """Abstract server returned by create_server().""" @@ -238,6 +245,10 @@ # Methods scheduling callbacks. All these return Handles. + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + def call_soon(self, callback, *args): return self.call_later(0, callback, *args) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -2,6 +2,7 @@ import errno import logging +import math import socket import sys import time @@ -73,13 +74,6 @@ self.assertFalse(self.loop._scheduled) self.assertIn(h, self.loop._ready) - def test__add_callback_timer(self): - h = asyncio.TimerHandle(time.monotonic()+10, lambda: False, (), - self.loop) - - self.loop._add_callback(h) - self.assertIn(h, self.loop._scheduled) - def test__add_callback_cancelled_handle(self): h = asyncio.Handle(lambda: False, (), self.loop) h.cancel() @@ -283,6 +277,82 @@ self.assertTrue(processed) self.assertEqual([handle], list(self.loop._ready)) + def test__run_once_cancelled_event_cleanup(self): + self.loop._process_events = mock.Mock() + + self.assertTrue( + 0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0) + + def cb(): + pass + + # Set up one "blocking" event that will not be cancelled to + # ensure later cancelled events do not make it to the head + # of the queue and get cleaned. + not_cancelled_count = 1 + self.loop.call_later(3000, cb) + + # Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES) + # cancelled handles, ensure they aren't removed + + cancelled_count = 2 + for x in range(2): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Add some cancelled events that will be at head and removed + cancelled_count += 2 + for x in range(2): + h = self.loop.call_later(100, cb) + h.cancel() + + # This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low + self.assertLessEqual(cancelled_count + not_cancelled_count, + base_events._MIN_SCHEDULED_TIMER_HANDLES) + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.loop._run_once() + + cancelled_count -= 2 + + self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) + + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + # Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION + # so that deletion of cancelled events will occur on next _run_once + add_cancel_count = int(math.ceil( + base_events._MIN_SCHEDULED_TIMER_HANDLES * + base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1 + + add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES - + add_cancel_count, 0) + + # Add some events that will not be cancelled + not_cancelled_count += add_not_cancel_count + for x in range(add_not_cancel_count): + self.loop.call_later(3600, cb) + + # Add enough cancelled events + cancelled_count += add_cancel_count + for x in range(add_cancel_count): + h = self.loop.call_later(3600, cb) + h.cancel() + + # Ensure all handles are still scheduled + self.assertEqual(len(self.loop._scheduled), + cancelled_count + not_cancelled_count) + + self.loop._run_once() + + # Ensure cancelled events were removed + self.assertEqual(len(self.loop._scheduled), not_cancelled_count) + + # Ensure only uncancelled events remain scheduled + self.assertTrue(all([not x._cancelled for x in self.loop._scheduled])) + def test_run_until_complete_type_error(self): self.assertRaises(TypeError, self.loop.run_until_complete, 'blah') diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -1890,9 +1890,17 @@ # cancelled handle h.cancel() - self.assertEqual(repr(h), - '' - % (filename, lineno, create_filename, create_lineno)) + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) + + # double cancellation won't overwrite _repr + h.cancel() + self.assertEqual( + repr(h), + '' + % (filename, lineno, create_filename, create_lineno)) def test_handle_source_traceback(self): loop = asyncio.get_event_loop_policy().new_event_loop() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,6 +2,20 @@ Python News +++++++++++ + +What's New in Python 3.4.3? +=========================== + +Core and Builtins +----------------- + +Library +------- + +- Issue #22448: Improve canceled timer handles cleanup to prevent + unbound memory usage. Patch by Joshua Moore-Oliva. + + What's New in Python 3.4.2? =========================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Sep 25 18:15:12 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 16:15:12 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140925161351.123394.80224@mail.hg.python.org> https://hg.python.org/cpython/rev/bfdb995e8d7d changeset: 92580:bfdb995e8d7d parent: 92579:a6aaacb2b807 parent: 92578:2a868c9f8f15 user: Yury Selivanov date: Thu Sep 25 12:13:41 2014 -0400 summary: merge 3.4 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 01:12:47 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 23:12:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_asyncio=2Etest=5Ftasks=3A_?= =?utf-8?q?Fix_test=5Fenv=5Fvar=5Fdebug_to_use_correct_asyncio_module?= Message-ID: <20140925231244.82215.70131@mail.hg.python.org> https://hg.python.org/cpython/rev/5226f89437ed changeset: 92582:5226f89437ed parent: 92580:bfdb995e8d7d user: Yury Selivanov date: Thu Sep 25 19:12:37 2014 -0400 summary: asyncio.test_tasks: Fix test_env_var_debug to use correct asyncio module files: Lib/test/test_asyncio/test_tasks.py | 15 +++++++++++---- 1 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -1,5 +1,6 @@ """Tests for tasks.py.""" +import os import re import sys import types @@ -1768,25 +1769,31 @@ self.assertEqual(fut.result(), [3, 1, exc, exc2]) def test_env_var_debug(self): + aio_path = os.path.dirname(os.path.dirname(asyncio.__file__)) + code = '\n'.join(( 'import asyncio.coroutines', 'print(asyncio.coroutines._DEBUG)')) # Test with -E to not fail if the unit test was run with # PYTHONASYNCIODEBUG set to a non-empty string - sts, stdout, stderr = assert_python_ok('-E', '-c', code) + sts, stdout, stderr = assert_python_ok('-E', '-c', code, + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, - PYTHONASYNCIODEBUG='') + PYTHONASYNCIODEBUG='', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, - PYTHONASYNCIODEBUG='1') + PYTHONASYNCIODEBUG='1', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'True') sts, stdout, stderr = assert_python_ok('-E', '-c', code, - PYTHONASYNCIODEBUG='1') + PYTHONASYNCIODEBUG='1', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 01:12:47 2014 From: python-checkins at python.org (yury.selivanov) Date: Thu, 25 Sep 2014 23:12:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogYXN5bmNpby50ZXN0?= =?utf-8?q?=5Ftasks=3A_Fix_test=5Fenv=5Fvar=5Fdebug_to_use_correct_asyncio?= =?utf-8?q?_module?= Message-ID: <20140925231244.82106.29879@mail.hg.python.org> https://hg.python.org/cpython/rev/e6eaf52325c5 changeset: 92581:e6eaf52325c5 branch: 3.4 parent: 92578:2a868c9f8f15 user: Yury Selivanov date: Thu Sep 25 19:12:10 2014 -0400 summary: asyncio.test_tasks: Fix test_env_var_debug to use correct asyncio module files: Lib/test/test_asyncio/test_tasks.py | 15 +++++++++++---- 1 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -1,5 +1,6 @@ """Tests for tasks.py.""" +import os import re import sys import types @@ -1768,25 +1769,31 @@ self.assertEqual(fut.result(), [3, 1, exc, exc2]) def test_env_var_debug(self): + aio_path = os.path.dirname(os.path.dirname(asyncio.__file__)) + code = '\n'.join(( 'import asyncio.coroutines', 'print(asyncio.coroutines._DEBUG)')) # Test with -E to not fail if the unit test was run with # PYTHONASYNCIODEBUG set to a non-empty string - sts, stdout, stderr = assert_python_ok('-E', '-c', code) + sts, stdout, stderr = assert_python_ok('-E', '-c', code, + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, - PYTHONASYNCIODEBUG='') + PYTHONASYNCIODEBUG='', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, - PYTHONASYNCIODEBUG='1') + PYTHONASYNCIODEBUG='1', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'True') sts, stdout, stderr = assert_python_ok('-E', '-c', code, - PYTHONASYNCIODEBUG='1') + PYTHONASYNCIODEBUG='1', + PYTHONPATH=aio_path) self.assertEqual(stdout.rstrip(), b'False') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 08:05:41 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 26 Sep 2014 06:05:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <20140926060538.53366.39267@mail.hg.python.org> https://hg.python.org/cpython/rev/fd0c02c3df31 changeset: 92583:fd0c02c3df31 parent: 92582:5226f89437ed parent: 92581:e6eaf52325c5 user: Serhiy Storchaka date: Fri Sep 26 09:04:19 2014 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Fri Sep 26 09:27:51 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 26 Sep 2014 09:27:51 +0200 Subject: [Python-checkins] Daily reference leaks (5226f89437ed): sum=9 Message-ID: results for 5226f89437ed on branch "default" -------------------------------------------- test_collections leaked [2, 0, 0] references, sum=2 test_functools leaked [0, 0, 3] memory blocks, sum=3 test_site leaked [2, -2, 2] references, sum=2 test_site leaked [2, -2, 2] memory blocks, sum=2 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogCm1c3n', '-x'] From python-checkins at python.org Fri Sep 26 14:35:09 2014 From: python-checkins at python.org (berker.peksag) Date: Fri, 26 Sep 2014 12:35:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2317462=3A_Add_a_paragraph_about_advantages_of_ar?= =?utf-8?q?gparse_over_optparse=2E?= Message-ID: <20140926123451.81870.53535@mail.hg.python.org> https://hg.python.org/cpython/rev/45e1c0029aff changeset: 92585:45e1c0029aff parent: 92583:fd0c02c3df31 parent: 92584:84313c61e60d user: Berker Peksag date: Fri Sep 26 15:35:02 2014 +0300 summary: Issue #17462: Add a paragraph about advantages of argparse over optparse. Patch by Anastasia Filatova. files: Doc/library/argparse.rst | 10 ++++++++++ Misc/ACKS | 1 + 2 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1949,6 +1949,16 @@ :mod:`optparse` had either been copy-pasted over or monkey-patched, it no longer seemed practical to try to maintain the backwards compatibility. +The :mod:`argparse` module improves on the standard library :mod:`optparse` +module in a number of ways including: + +* Handling positional arguments. +* Supporting sub-commands. +* Allowing alternative option prefixes like ``+`` and ``/``. +* Handling zero-or-more and one-or-more style arguments. +* Producing more informative usage messages. +* Providing a much simpler interface for custom ``type`` and ``action``. + A partial upgrade path from :mod:`optparse` to :mod:`argparse`: * Replace all :meth:`optparse.OptionParser.add_option` calls with diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -413,6 +413,7 @@ John Feuerstein Carl Feynman Vincent Fiack +Anastasia Filatova Tomer Filiba Jeffrey Finkelstein Russell Finn -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 14:35:09 2014 From: python-checkins at python.org (berker.peksag) Date: Fri, 26 Sep 2014 12:35:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE3NDYy?= =?utf-8?q?=3A_Add_a_paragraph_about_advantages_of_argparse_over_optparse?= =?utf-8?q?=2E?= Message-ID: <20140926123451.47403.25968@mail.hg.python.org> https://hg.python.org/cpython/rev/84313c61e60d changeset: 92584:84313c61e60d branch: 3.4 parent: 92581:e6eaf52325c5 user: Berker Peksag date: Fri Sep 26 15:34:26 2014 +0300 summary: Issue #17462: Add a paragraph about advantages of argparse over optparse. Patch by Anastasia Filatova. files: Doc/library/argparse.rst | 10 ++++++++++ Misc/ACKS | 1 + 2 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1949,6 +1949,16 @@ :mod:`optparse` had either been copy-pasted over or monkey-patched, it no longer seemed practical to try to maintain the backwards compatibility. +The :mod:`argparse` module improves on the standard library :mod:`optparse` +module in a number of ways including: + +* Handling positional arguments. +* Supporting sub-commands. +* Allowing alternative option prefixes like ``+`` and ``/``. +* Handling zero-or-more and one-or-more style arguments. +* Producing more informative usage messages. +* Providing a much simpler interface for custom ``type`` and ``action``. + A partial upgrade path from :mod:`optparse` to :mod:`argparse`: * Replace all :meth:`optparse.OptionParser.add_option` calls with diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -411,6 +411,7 @@ John Feuerstein Carl Feynman Vincent Fiack +Anastasia Filatova Tomer Filiba Jeffrey Finkelstein Russell Finn -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 14:39:08 2014 From: python-checkins at python.org (berker.peksag) Date: Fri, 26 Sep 2014 12:39:08 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE3NDYy?= =?utf-8?q?=3A_Add_a_paragraph_about_advantages_of_argparse_over_optparse?= =?utf-8?q?=2E?= Message-ID: <20140926123855.53382.39635@mail.hg.python.org> https://hg.python.org/cpython/rev/4eb847e7ddde changeset: 92586:4eb847e7ddde branch: 2.7 parent: 92571:22a46f05ce23 user: Berker Peksag date: Fri Sep 26 15:39:05 2014 +0300 summary: Issue #17462: Add a paragraph about advantages of argparse over optparse. Patch by Anastasia Filatova. files: Doc/library/argparse.rst | 10 ++++++++++ Misc/ACKS | 1 + 2 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1913,6 +1913,16 @@ :mod:`optparse` had either been copy-pasted over or monkey-patched, it no longer seemed practical to try to maintain the backwards compatibility. +The :mod:`argparse` module improves on the standard library :mod:`optparse` +module in a number of ways including: + +* Handling positional arguments. +* Supporting sub-commands. +* Allowing alternative option prefixes like ``+`` and ``/``. +* Handling zero-or-more and one-or-more style arguments. +* Producing more informative usage messages. +* Providing a much simpler interface for custom ``type`` and ``action``. + A partial upgrade path from :mod:`optparse` to :mod:`argparse`: * Replace all :meth:`optparse.OptionParser.add_option` calls with diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -406,6 +406,7 @@ John Feuerstein Carl Feynman Vincent Fiack +Anastasia Filatova Tomer Filiba Jeffrey Finkelstein Russell Finn -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 18:34:54 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 16:34:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_os=3A_Include_posix_functi?= =?utf-8?b?b25zIGluIG9zLl9fYWxsX18uIENsb3NlcyBpc3N1ZSAjMTg1NTQu?= Message-ID: <20140926163311.47393.64866@mail.hg.python.org> https://hg.python.org/cpython/rev/7230978647a8 changeset: 92587:7230978647a8 parent: 92585:45e1c0029aff user: Yury Selivanov date: Fri Sep 26 12:33:06 2014 -0400 summary: os: Include posix functions in os.__all__. Closes issue #18554. Patch by Ronald Oussoren. files: Lib/os.py | 4 ++++ Lib/test/test_os.py | 8 ++++++++ Misc/NEWS | 2 ++ 3 files changed, 14 insertions(+), 0 deletions(-) diff --git a/Lib/os.py b/Lib/os.py --- a/Lib/os.py +++ b/Lib/os.py @@ -61,6 +61,10 @@ except ImportError: pass + import posix + __all__.extend(_get_exports_list(posix)) + del posix + elif 'nt' in _names: name = 'nt' linesep = '\r\n' diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -2616,6 +2616,13 @@ self.assertEqual(os.get_blocking(fd), True) + +class ExportsTests(unittest.TestCase): + def test_os_all(self): + self.assertIn('open', os.__all__) + self.assertIn('walk', os.__all__) + + @support.reap_threads def test_main(): support.run_unittest( @@ -2652,6 +2659,7 @@ FDInheritanceTests, Win32JunctionTests, BlockingTests, + ExportsTests, ) if __name__ == "__main__": diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #18554: os.__all__ includes posix functions. + - Issue #21391: Use os.path.abspath in the shutil module. - Issue #11471: avoid generating a JUMP_FORWARD instruction at the end of -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:08:49 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 21:08:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogdGVtcGZpbGU6IEZp?= =?utf-8?q?x_docstring=2E_Issue_=2321397=2C_patch_by_R=2E_David_Murray=2E?= Message-ID: <20140926210848.82007.97831@mail.hg.python.org> https://hg.python.org/cpython/rev/adac8ba7b1b1 changeset: 92588:adac8ba7b1b1 branch: 2.7 parent: 92586:4eb847e7ddde user: Yury Selivanov date: Fri Sep 26 17:07:39 2014 -0400 summary: tempfile: Fix docstring. Issue #21397, patch by R. David Murray. files: Lib/tempfile.py | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Lib/tempfile.py b/Lib/tempfile.py --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -1,10 +1,10 @@ """Temporary files. This module provides generic, low- and high-level interfaces for -creating temporary files and directories. The interfaces listed -as "safe" just below can be used without fear of race conditions. -Those listed as "unsafe" cannot, and are provided for backward -compatibility only. +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. This module also provides some data items to the user: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:08:54 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 21:08:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_tempfile=3A_Fix_docstring?= =?utf-8?q?=2E_Issue_=2321397=2C_patch_by_R=2E_David_Murray=2E?= Message-ID: <20140926210849.82215.31894@mail.hg.python.org> https://hg.python.org/cpython/rev/db17f57c32af changeset: 92590:db17f57c32af parent: 92587:7230978647a8 user: Yury Selivanov date: Fri Sep 26 17:08:21 2014 -0400 summary: tempfile: Fix docstring. Issue #21397, patch by R. David Murray. files: Lib/tempfile.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/tempfile.py b/Lib/tempfile.py --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -1,10 +1,10 @@ """Temporary files. This module provides generic, low- and high-level interfaces for -creating temporary files and directories. The interfaces listed -as "safe" just below can be used without fear of race conditions. -Those listed as "unsafe" cannot, and are provided for backward -compatibility only. +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. This module also provides some data items to the user: @@ -544,7 +544,7 @@ else: # Setting newline="\n" avoids newline translation; # this is important because otherwise on Windows we'd - # hget double newline translation upon rollover(). + # get double newline translation upon rollover(). self._file = _io.StringIO(newline="\n") self._max_size = max_size self._rolled = False -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:08:55 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 21:08:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogdGVtcGZpbGU6IEZp?= =?utf-8?q?x_docstring=2E_Issue_=2321397=2C_patch_by_R=2E_David_Murray=2E?= Message-ID: <20140926210849.53366.88015@mail.hg.python.org> https://hg.python.org/cpython/rev/500d3d6f22ff changeset: 92589:500d3d6f22ff branch: 3.4 parent: 92584:84313c61e60d user: Yury Selivanov date: Fri Sep 26 17:08:02 2014 -0400 summary: tempfile: Fix docstring. Issue #21397, patch by R. David Murray. files: Lib/tempfile.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/tempfile.py b/Lib/tempfile.py --- a/Lib/tempfile.py +++ b/Lib/tempfile.py @@ -1,10 +1,10 @@ """Temporary files. This module provides generic, low- and high-level interfaces for -creating temporary files and directories. The interfaces listed -as "safe" just below can be used without fear of race conditions. -Those listed as "unsafe" cannot, and are provided for backward -compatibility only. +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. This module also provides some data items to the user: @@ -518,7 +518,7 @@ else: # Setting newline="\n" avoids newline translation; # this is important because otherwise on Windows we'd - # hget double newline translation upon rollover(). + # get double newline translation upon rollover(). self._file = _io.StringIO(newline="\n") self._max_size = max_size self._rolled = False -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:12:18 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 21:12:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_null_merge?= Message-ID: <20140926211206.47401.84007@mail.hg.python.org> https://hg.python.org/cpython/rev/7dc24b97dc6a changeset: 92591:7dc24b97dc6a parent: 92590:db17f57c32af parent: 92589:500d3d6f22ff user: Yury Selivanov date: Fri Sep 26 17:11:59 2014 -0400 summary: null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:32:14 2014 From: python-checkins at python.org (antoine.pitrou) Date: Fri, 26 Sep 2014 21:32:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=235309=3A_distutils?= =?utf-8?q?=27_build_and_build=5Fext_commands_now_accept_a_=60=60-j=60=60?= Message-ID: <20140926213205.82362.21543@mail.hg.python.org> https://hg.python.org/cpython/rev/bbe57429eba0 changeset: 92592:bbe57429eba0 user: Antoine Pitrou date: Fri Sep 26 23:31:59 2014 +0200 summary: Issue #5309: distutils' build and build_ext commands now accept a ``-j`` option to enable parallel building of extension modules. files: Lib/distutils/command/build.py | 9 ++ Lib/distutils/command/build_ext.py | 57 +++++++++++++- Lib/distutils/tests/test_build_ext.py | 56 +++++++++----- Misc/NEWS | 3 + Modules/Setup.dist | 1 + setup.py | 19 +++++ 6 files changed, 117 insertions(+), 28 deletions(-) diff --git a/Lib/distutils/command/build.py b/Lib/distutils/command/build.py --- a/Lib/distutils/command/build.py +++ b/Lib/distutils/command/build.py @@ -36,6 +36,8 @@ "(default: %s)" % get_platform()), ('compiler=', 'c', "specify the compiler type"), + ('parallel=', 'j', + "number of parallel build jobs"), ('debug', 'g', "compile extensions and libraries with debugging information"), ('force', 'f', @@ -65,6 +67,7 @@ self.debug = None self.force = 0 self.executable = None + self.parallel = None def finalize_options(self): if self.plat_name is None: @@ -116,6 +119,12 @@ if self.executable is None: self.executable = os.path.normpath(sys.executable) + if isinstance(self.parallel, str): + try: + self.parallel = int(self.parallel) + except ValueError: + raise DistutilsOptionError("parallel should be an integer") + def run(self): # Run all relevant sub-commands. This will be some subset of: # - build_py - pure Python modules diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py --- a/Lib/distutils/command/build_ext.py +++ b/Lib/distutils/command/build_ext.py @@ -4,7 +4,10 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re +import contextlib +import os +import re +import sys from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version @@ -85,6 +88,8 @@ "forcibly build everything (ignore file timestamps)"), ('compiler=', 'c', "specify the compiler type"), + ('parallel=', 'j', + "number of parallel build jobs"), ('swig-cpp', None, "make SWIG create C++ files (default is C)"), ('swig-opts=', None, @@ -124,6 +129,7 @@ self.swig_cpp = None self.swig_opts = None self.user = None + self.parallel = None def finalize_options(self): from distutils import sysconfig @@ -134,6 +140,7 @@ ('compiler', 'compiler'), ('debug', 'debug'), ('force', 'force'), + ('parallel', 'parallel'), ('plat_name', 'plat_name'), ) @@ -274,6 +281,12 @@ self.library_dirs.append(user_lib) self.rpath.append(user_lib) + if isinstance(self.parallel, str): + try: + self.parallel = int(self.parallel) + except ValueError: + raise DistutilsOptionError("parallel should be an integer") + def run(self): from distutils.ccompiler import new_compiler @@ -442,15 +455,45 @@ def build_extensions(self): # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) + if self.parallel: + self._build_extensions_parallel() + else: + self._build_extensions_serial() + def _build_extensions_parallel(self): + workers = self.parallel + if self.parallel is True: + workers = os.cpu_count() # may return None + try: + from concurrent.futures import ThreadPoolExecutor + except ImportError: + workers = None + + if workers is None: + self._build_extensions_serial() + return + + with ThreadPoolExecutor(max_workers=workers) as executor: + futures = [executor.submit(self.build_extension, ext) + for ext in self.extensions] + for ext, fut in zip(self.extensions, futures): + with self._filter_build_errors(ext): + fut.result() + + def _build_extensions_serial(self): for ext in self.extensions: - try: + with self._filter_build_errors(ext): self.build_extension(ext) - except (CCompilerError, DistutilsError, CompileError) as e: - if not ext.optional: - raise - self.warn('building extension "%s" failed: %s' % - (ext.name, e)) + + @contextlib.contextmanager + def _filter_build_errors(self, ext): + try: + yield + except (CCompilerError, DistutilsError, CompileError) as e: + if not ext.optional: + raise + self.warn('building extension "%s" failed: %s' % + (ext.name, e)) def build_extension(self, ext): sources = ext.sources diff --git a/Lib/distutils/tests/test_build_ext.py b/Lib/distutils/tests/test_build_ext.py --- a/Lib/distutils/tests/test_build_ext.py +++ b/Lib/distutils/tests/test_build_ext.py @@ -37,6 +37,9 @@ from distutils.command import build_ext build_ext.USER_BASE = site.USER_BASE + def build_ext(self, *args, **kwargs): + return build_ext(*args, **kwargs) + def test_build_ext(self): global ALREADY_TESTED copy_xxmodule_c(self.tmp_dir) @@ -44,7 +47,7 @@ xx_ext = Extension('xx', [xx_c]) dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]}) dist.package_dir = self.tmp_dir - cmd = build_ext(dist) + cmd = self.build_ext(dist) fixup_build_ext(cmd) cmd.build_lib = self.tmp_dir cmd.build_temp = self.tmp_dir @@ -91,7 +94,7 @@ def test_solaris_enable_shared(self): dist = Distribution({'name': 'xx'}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) old = sys.platform sys.platform = 'sunos' # fooling finalize_options @@ -113,7 +116,7 @@ def test_user_site(self): import site dist = Distribution({'name': 'xx'}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) # making sure the user option is there options = [name for name, short, lable in @@ -144,14 +147,14 @@ # with the optional argument. modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.ensure_finalized() self.assertRaises((UnknownFileError, CompileError), cmd.run) # should raise an error modules = [Extension('foo', ['xxx'], optional=True)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.ensure_finalized() cmd.run() # should pass @@ -160,7 +163,7 @@ # etc.) are in the include search path. modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.finalize_options() from distutils import sysconfig @@ -172,14 +175,14 @@ # make sure cmd.libraries is turned into a list # if it's a string - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.libraries = 'my_lib, other_lib lastlib' cmd.finalize_options() self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib']) # make sure cmd.library_dirs is turned into a list # if it's a string - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep cmd.finalize_options() self.assertIn('my_lib_dir', cmd.library_dirs) @@ -187,7 +190,7 @@ # make sure rpath is turned into a list # if it's a string - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.rpath = 'one%stwo' % os.pathsep cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) @@ -196,32 +199,32 @@ # make sure define is turned into 2-tuples # strings if they are ','-separated strings - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.define = 'one,two' cmd.finalize_options() self.assertEqual(cmd.define, [('one', '1'), ('two', '1')]) # make sure undef is turned into a list of # strings if they are ','-separated strings - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.undef = 'one,two' cmd.finalize_options() self.assertEqual(cmd.undef, ['one', 'two']) # make sure swig_opts is turned into a list - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.swig_opts = None cmd.finalize_options() self.assertEqual(cmd.swig_opts, []) - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.swig_opts = '1 2' cmd.finalize_options() self.assertEqual(cmd.swig_opts, ['1', '2']) def test_check_extensions_list(self): dist = Distribution() - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.finalize_options() #'extensions' option must be a list of Extension instances @@ -270,7 +273,7 @@ def test_get_source_files(self): modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.ensure_finalized() self.assertEqual(cmd.get_source_files(), ['xxx']) @@ -279,7 +282,7 @@ # should not be overriden by a compiler instance # when the command is run dist = Distribution() - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.compiler = 'unix' cmd.ensure_finalized() cmd.run() @@ -292,7 +295,7 @@ ext = Extension('foo', [c_file], optional=False) dist = Distribution({'name': 'xx', 'ext_modules': [ext]}) - cmd = build_ext(dist) + cmd = self.build_ext(dist) fixup_build_ext(cmd) cmd.ensure_finalized() self.assertEqual(len(cmd.get_outputs()), 1) @@ -355,7 +358,7 @@ #etree_ext = Extension('lxml.etree', [etree_c]) #dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]}) dist = Distribution() - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.inplace = 1 cmd.distribution.package_dir = {'': 'src'} cmd.distribution.packages = ['lxml', 'lxml.html'] @@ -462,7 +465,7 @@ 'ext_modules': [deptarget_ext] }) dist.package_dir = self.tmp_dir - cmd = build_ext(dist) + cmd = self.build_ext(dist) cmd.build_lib = self.tmp_dir cmd.build_temp = self.tmp_dir @@ -481,8 +484,19 @@ self.fail("Wrong deployment target during compilation") +class ParallelBuildExtTestCase(BuildExtTestCase): + + def build_ext(self, *args, **kwargs): + build_ext = super().build_ext(*args, **kwargs) + build_ext.parallel = True + return build_ext + + def test_suite(): - return unittest.makeSuite(BuildExtTestCase) + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(BuildExtTestCase)) + suite.addTest(unittest.makeSuite(ParallelBuildExtTestCase)) + return suite if __name__ == '__main__': - support.run_unittest(test_suite()) + support.run_unittest(__name__) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -139,6 +139,9 @@ Library ------- +- Issue #5309: distutils' build and build_ext commands now accept a ``-j`` + option to enable parallel building of extension modules. + - Issue #22448: Improve canceled timer handles cleanup to prevent unbound memory usage. Patch by Joshua Moore-Oliva. diff --git a/Modules/Setup.dist b/Modules/Setup.dist --- a/Modules/Setup.dist +++ b/Modules/Setup.dist @@ -118,6 +118,7 @@ itertools itertoolsmodule.c # Functions creating iterators for efficient looping atexit atexitmodule.c # Register functions to be run at interpreter-shutdown _stat _stat.c # stat.h interface +time timemodule.c # time module # access to ISO C locale support _locale _localemodule.c # -lintl diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -25,6 +25,11 @@ py_cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST') sysconfig.get_config_vars()['CFLAGS'] = cflags + ' ' + py_cflags_nodist +class Dummy: + """Hack for parallel build""" + ProcessPoolExecutor = None +sys.modules['concurrent.futures.process'] = Dummy + def get_platform(): # cross build if "_PYTHON_HOST_PLATFORM" in os.environ: @@ -174,6 +179,8 @@ build_ext.__init__(self, dist) self.failed = [] self.failed_on_import = [] + if '-j' in os.environ.get('MAKEFLAGS', ''): + self.parallel = True def build_extensions(self): @@ -253,6 +260,9 @@ build_ext.build_extensions(self) + for ext in self.extensions: + self.check_extension_import(ext) + longest = max([len(e.name) for e in self.extensions]) if self.failed or self.failed_on_import: all_failed = self.failed + self.failed_on_import @@ -305,6 +315,15 @@ (ext.name, sys.exc_info()[1])) self.failed.append(ext.name) return + + def check_extension_import(self, ext): + # Don't try to import an extension that has failed to compile + if ext.name in self.failed: + self.announce( + 'WARNING: skipping import check for failed build "%s"' % + ext.name, level=1) + return + # Workaround for Mac OS X: The Carbon-based modules cannot be # reliably imported into a command-line Python if 'Carbon' in ext.extra_link_args: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:35:14 2014 From: python-checkins at python.org (yury.selivanov) Date: Fri, 26 Sep 2014 21:35:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_inspect=3A_Fix_getsource?= =?utf-8?q?=28=29_to_support_decorated_functions=2E?= Message-ID: <20140926213513.82055.60067@mail.hg.python.org> https://hg.python.org/cpython/rev/ad9cc6124a19 changeset: 92593:ad9cc6124a19 user: Yury Selivanov date: Fri Sep 26 17:34:54 2014 -0400 summary: inspect: Fix getsource() to support decorated functions. Issue #1764286. Patch by Claudiu Popa. files: Lib/inspect.py | 1 + Lib/test/inspect_fodder2.py | 13 +++++++++++++ Lib/test/test_inspect.py | 3 +++ Misc/NEWS | 3 +++ 4 files changed, 20 insertions(+), 0 deletions(-) diff --git a/Lib/inspect.py b/Lib/inspect.py --- a/Lib/inspect.py +++ b/Lib/inspect.py @@ -817,6 +817,7 @@ corresponding to the object and the line number indicates where in the original source file the first line of code was found. An OSError is raised if the source code cannot be retrieved.""" + object = unwrap(object) lines, lnum = findsource(object) if ismodule(object): return lines, 0 diff --git a/Lib/test/inspect_fodder2.py b/Lib/test/inspect_fodder2.py --- a/Lib/test/inspect_fodder2.py +++ b/Lib/test/inspect_fodder2.py @@ -109,3 +109,16 @@ #line 109 def keyword_only_arg(*, arg): pass + +from functools import wraps + +def decorator(func): + @wraps(func) + def fake(): + return 42 + return fake + +#line 121 + at decorator +def real(): + return 20 diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py --- a/Lib/test/test_inspect.py +++ b/Lib/test/test_inspect.py @@ -377,6 +377,9 @@ def test_replacing_decorator(self): self.assertSourceEqual(mod2.gone, 9, 10) + def test_getsource_unwrap(self): + self.assertSourceEqual(mod2.real, 122, 124) + class TestOneliners(GetSourceBase): fodderModule = mod2 def test_oneline_lambda(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #1764286: Fix inspect.getsource() to support decorated functions. + Patch by Claudiu Popa. + - Issue #18554: os.__all__ includes posix functions. - Issue #21391: Use os.path.abspath in the shutil module. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Sep 26 23:57:22 2014 From: python-checkins at python.org (berker.peksag) Date: Fri, 26 Sep 2014 21:57:22 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2316324=3A_=5Fchars?= =?utf-8?q?et_parameter_of_MIMEText_now_also_accepts?= Message-ID: <20140926215718.53368.78320@mail.hg.python.org> https://hg.python.org/cpython/rev/d43d4d4ebf2c changeset: 92594:d43d4d4ebf2c user: Berker Peksag date: Sat Sep 27 00:57:29 2014 +0300 summary: Issue #16324: _charset parameter of MIMEText now also accepts email.charset.Charset instances. Initial patch by Claude Paroz. files: Doc/library/email.mime.rst | 6 +++++- Lib/email/mime/text.py | 3 +++ Lib/test/test_email/test_email.py | 4 ++++ Misc/ACKS | 1 + Misc/NEWS | 3 +++ 5 files changed, 16 insertions(+), 1 deletions(-) diff --git a/Doc/library/email.mime.rst b/Doc/library/email.mime.rst --- a/Doc/library/email.mime.rst +++ b/Doc/library/email.mime.rst @@ -195,7 +195,8 @@ set of the text and is passed as an argument to the :class:`~email.mime.nonmultipart.MIMENonMultipart` constructor; it defaults to ``us-ascii`` if the string contains only ``ascii`` codepoints, and - ``utf-8`` otherwise. + ``utf-8`` otherwise. The *_charset* parameter accepts either a string or a + :class:`~email.charset.Charset` instance. Unless the *_charset* argument is explicitly set to ``None``, the MIMEText object created will have both a :mailheader:`Content-Type` header @@ -206,3 +207,6 @@ ``Content-Transfer-Encoding`` header, after which a ``set_payload`` call will automatically encode the new payload (and add a new :mailheader:`Content-Transfer-Encoding` header). + + .. versionchanged:: 3.5 + *_charset* also accepts :class:`~email.charset.Charset` instances. diff --git a/Lib/email/mime/text.py b/Lib/email/mime/text.py --- a/Lib/email/mime/text.py +++ b/Lib/email/mime/text.py @@ -6,6 +6,7 @@ __all__ = ['MIMEText'] +from email.charset import Charset from email.mime.nonmultipart import MIMENonMultipart @@ -34,6 +35,8 @@ _charset = 'us-ascii' except UnicodeEncodeError: _charset = 'utf-8' + if isinstance(_charset, Charset): + _charset = str(_charset) MIMENonMultipart.__init__(self, 'text', _subtype, **{'charset': _charset}) diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py --- a/Lib/test/test_email/test_email.py +++ b/Lib/test/test_email/test_email.py @@ -1636,6 +1636,10 @@ msg = MIMEText('hello there', _charset='us-ascii') eq(msg.get_charset().input_charset, 'us-ascii') eq(msg['content-type'], 'text/plain; charset="us-ascii"') + # Also accept a Charset instance + msg = MIMEText('hello there', _charset=Charset('utf-8')) + eq(msg.get_charset().input_charset, 'utf-8') + eq(msg['content-type'], 'text/plain; charset="utf-8"') def test_7bit_input(self): eq = self.assertEqual diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1024,6 +1024,7 @@ Alexandre Parenteau Dan Parisien William Park +Claude Paroz Heikki Partanen Harri Pasanen Ga?l Pasgrimaud diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #16324: _charset parameter of MIMEText now also accepts + email.charset.Charset instances. Initial patch by Claude Paroz. + - Issue #1764286: Fix inspect.getsource() to support decorated functions. Patch by Claudiu Popa. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sat Sep 27 08:55:38 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 27 Sep 2014 08:55:38 +0200 Subject: [Python-checkins] Daily reference leaks (d43d4d4ebf2c): sum=3 Message-ID: results for d43d4d4ebf2c on branch "default" -------------------------------------------- test_collections leaked [2, -2, 0] references, sum=0 test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogyBjJuz', '-x'] From python-checkins at python.org Sat Sep 27 17:58:39 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 27 Sep 2014 15:58:39 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzk4NTA6?= =?utf-8?q?_Fixed_macpath=2Ejoin=28=29_for_empty_first_component=2E__Patch?= =?utf-8?q?_by?= Message-ID: <20140927155836.53364.25029@mail.hg.python.org> https://hg.python.org/cpython/rev/2ae2ca9d2b66 changeset: 92595:2ae2ca9d2b66 branch: 2.7 parent: 92588:adac8ba7b1b1 user: Serhiy Storchaka date: Sat Sep 27 18:53:01 2014 +0300 summary: Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. files: Lib/macpath.py | 2 +- Lib/test/test_macpath.py | 20 ++++++++++++++++++++ Misc/NEWS | 3 +++ 3 files changed, 24 insertions(+), 1 deletions(-) diff --git a/Lib/macpath.py b/Lib/macpath.py --- a/Lib/macpath.py +++ b/Lib/macpath.py @@ -42,7 +42,7 @@ def join(s, *p): path = s for t in p: - if (not s) or isabs(t): + if (not path) or isabs(t): path = t continue if t[:1] == ':': diff --git a/Lib/test/test_macpath.py b/Lib/test/test_macpath.py --- a/Lib/test/test_macpath.py +++ b/Lib/test/test_macpath.py @@ -29,6 +29,26 @@ self.assertEqual(split(":conky:mountpoint:"), (':conky:mountpoint', '')) + def test_join(self): + join = macpath.join + self.assertEqual(join('a', 'b'), ':a:b') + self.assertEqual(join(':a', 'b'), ':a:b') + self.assertEqual(join(':a:', 'b'), ':a:b') + self.assertEqual(join(':a::', 'b'), ':a::b') + self.assertEqual(join(':a', '::b'), ':a::b') + self.assertEqual(join('a', ':'), ':a:') + self.assertEqual(join('a:', ':'), 'a:') + self.assertEqual(join('a', ''), ':a:') + self.assertEqual(join('a:', ''), 'a:') + self.assertEqual(join('', ''), '') + self.assertEqual(join('', 'a:b'), 'a:b') + self.assertEqual(join('', 'a', 'b'), ':a:b') + self.assertEqual(join('a:b', 'c'), 'a:b:c') + self.assertEqual(join('a:b', ':c'), 'a:b:c') + self.assertEqual(join('a', ':b', ':c'), ':a:b:c') + self.assertEqual(join('a', 'b:'), 'b:') + self.assertEqual(join('a:', 'b:'), 'b:') + def test_splitext(self): splitext = macpath.splitext self.assertEqual(splitext(":foo.ext"), (':foo', '.ext')) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #9850: Fixed macpath.join() for empty first component. Patch by + Oleg Oshmyan. + - Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS directory attributes. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 17:58:39 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 27 Sep 2014 15:58:39 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzk4NTA6?= =?utf-8?q?_Fixed_macpath=2Ejoin=28=29_for_empty_first_component=2E__Patch?= =?utf-8?q?_by?= Message-ID: <20140927155837.81870.4660@mail.hg.python.org> https://hg.python.org/cpython/rev/54987723de99 changeset: 92596:54987723de99 branch: 3.4 parent: 92589:500d3d6f22ff user: Serhiy Storchaka date: Sat Sep 27 18:53:23 2014 +0300 summary: Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. files: Lib/macpath.py | 2 +- Lib/test/test_macpath.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ 3 files changed, 28 insertions(+), 1 deletions(-) diff --git a/Lib/macpath.py b/Lib/macpath.py --- a/Lib/macpath.py +++ b/Lib/macpath.py @@ -53,7 +53,7 @@ colon = _get_colon(s) path = s for t in p: - if (not s) or isabs(t): + if (not path) or isabs(t): path = t continue if t[:1] == colon: diff --git a/Lib/test/test_macpath.py b/Lib/test/test_macpath.py --- a/Lib/test/test_macpath.py +++ b/Lib/test/test_macpath.py @@ -49,16 +49,40 @@ def test_join(self): join = macpath.join self.assertEqual(join('a', 'b'), ':a:b') + self.assertEqual(join(':a', 'b'), ':a:b') + self.assertEqual(join(':a:', 'b'), ':a:b') + self.assertEqual(join(':a::', 'b'), ':a::b') + self.assertEqual(join(':a', '::b'), ':a::b') + self.assertEqual(join('a', ':'), ':a:') + self.assertEqual(join('a:', ':'), 'a:') + self.assertEqual(join('a', ''), ':a:') + self.assertEqual(join('a:', ''), 'a:') + self.assertEqual(join('', ''), '') self.assertEqual(join('', 'a:b'), 'a:b') + self.assertEqual(join('', 'a', 'b'), ':a:b') self.assertEqual(join('a:b', 'c'), 'a:b:c') self.assertEqual(join('a:b', ':c'), 'a:b:c') self.assertEqual(join('a', ':b', ':c'), ':a:b:c') + self.assertEqual(join('a', 'b:'), 'b:') + self.assertEqual(join('a:', 'b:'), 'b:') self.assertEqual(join(b'a', b'b'), b':a:b') + self.assertEqual(join(b':a', b'b'), b':a:b') + self.assertEqual(join(b':a:', b'b'), b':a:b') + self.assertEqual(join(b':a::', b'b'), b':a::b') + self.assertEqual(join(b':a', b'::b'), b':a::b') + self.assertEqual(join(b'a', b':'), b':a:') + self.assertEqual(join(b'a:', b':'), b'a:') + self.assertEqual(join(b'a', b''), b':a:') + self.assertEqual(join(b'a:', b''), b'a:') + self.assertEqual(join(b'', b''), b'') self.assertEqual(join(b'', b'a:b'), b'a:b') + self.assertEqual(join(b'', b'a', b'b'), b':a:b') self.assertEqual(join(b'a:b', b'c'), b'a:b:c') self.assertEqual(join(b'a:b', b':c'), b'a:b:c') self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c') + self.assertEqual(join(b'a', b'b:'), b'b:') + self.assertEqual(join(b'a:', b'b:'), b'b:') def test_splitext(self): splitext = macpath.splitext diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -27,6 +27,9 @@ Library ------- +- Issue #9850: Fixed macpath.join() for empty first component. Patch by + Oleg Oshmyan. + - Issue #22427: TemporaryDirectory no longer attempts to clean up twice when used in the with statement in generator. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 17:58:39 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 27 Sep 2014 15:58:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=239850=3A_Fixed_macpath=2Ejoin=28=29_for_empty_fi?= =?utf-8?q?rst_component=2E__Patch_by?= Message-ID: <20140927155837.81817.39109@mail.hg.python.org> https://hg.python.org/cpython/rev/e29866cb6b98 changeset: 92597:e29866cb6b98 parent: 92594:d43d4d4ebf2c parent: 92596:54987723de99 user: Serhiy Storchaka date: Sat Sep 27 18:55:15 2014 +0300 summary: Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. files: Lib/macpath.py | 2 +- Lib/test/test_macpath.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ 3 files changed, 28 insertions(+), 1 deletions(-) diff --git a/Lib/macpath.py b/Lib/macpath.py --- a/Lib/macpath.py +++ b/Lib/macpath.py @@ -53,7 +53,7 @@ colon = _get_colon(s) path = s for t in p: - if (not s) or isabs(t): + if (not path) or isabs(t): path = t continue if t[:1] == colon: diff --git a/Lib/test/test_macpath.py b/Lib/test/test_macpath.py --- a/Lib/test/test_macpath.py +++ b/Lib/test/test_macpath.py @@ -49,16 +49,40 @@ def test_join(self): join = macpath.join self.assertEqual(join('a', 'b'), ':a:b') + self.assertEqual(join(':a', 'b'), ':a:b') + self.assertEqual(join(':a:', 'b'), ':a:b') + self.assertEqual(join(':a::', 'b'), ':a::b') + self.assertEqual(join(':a', '::b'), ':a::b') + self.assertEqual(join('a', ':'), ':a:') + self.assertEqual(join('a:', ':'), 'a:') + self.assertEqual(join('a', ''), ':a:') + self.assertEqual(join('a:', ''), 'a:') + self.assertEqual(join('', ''), '') self.assertEqual(join('', 'a:b'), 'a:b') + self.assertEqual(join('', 'a', 'b'), ':a:b') self.assertEqual(join('a:b', 'c'), 'a:b:c') self.assertEqual(join('a:b', ':c'), 'a:b:c') self.assertEqual(join('a', ':b', ':c'), ':a:b:c') + self.assertEqual(join('a', 'b:'), 'b:') + self.assertEqual(join('a:', 'b:'), 'b:') self.assertEqual(join(b'a', b'b'), b':a:b') + self.assertEqual(join(b':a', b'b'), b':a:b') + self.assertEqual(join(b':a:', b'b'), b':a:b') + self.assertEqual(join(b':a::', b'b'), b':a::b') + self.assertEqual(join(b':a', b'::b'), b':a::b') + self.assertEqual(join(b'a', b':'), b':a:') + self.assertEqual(join(b'a:', b':'), b'a:') + self.assertEqual(join(b'a', b''), b':a:') + self.assertEqual(join(b'a:', b''), b'a:') + self.assertEqual(join(b'', b''), b'') self.assertEqual(join(b'', b'a:b'), b'a:b') + self.assertEqual(join(b'', b'a', b'b'), b':a:b') self.assertEqual(join(b'a:b', b'c'), b'a:b:c') self.assertEqual(join(b'a:b', b':c'), b'a:b:c') self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c') + self.assertEqual(join(b'a', b'b:'), b'b:') + self.assertEqual(join(b'a:', b'b:'), b'b:') def test_splitext(self): splitext = macpath.splitext diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -145,6 +145,9 @@ Library ------- +- Issue #9850: Fixed macpath.join() for empty first component. Patch by + Oleg Oshmyan. + - Issue #5309: distutils' build and build_ext commands now accept a ``-j`` option to enable parallel building of extension modules. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:13:52 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 19:13:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2320974=3A_Update_version_table_in_email_intro?= =?utf-8?q?=2E?= Message-ID: <20140927190312.82311.50649@mail.hg.python.org> https://hg.python.org/cpython/rev/655b34cd8871 changeset: 92599:655b34cd8871 parent: 92597:e29866cb6b98 parent: 92598:2eea52c287b7 user: R David Murray date: Sat Sep 27 15:00:10 2014 -0400 summary: Merge: #20974: Update version table in email intro. files: Doc/library/email.rst | 10 +++++++--- 1 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Doc/library/email.rst b/Doc/library/email.rst --- a/Doc/library/email.rst +++ b/Doc/library/email.rst @@ -91,15 +91,19 @@ +---------------+------------------------------+-----------------------+ | :const:`2.5` | Python 2.2.2+ and Python 2.3 | Python 2.1 to 2.5 | +---------------+------------------------------+-----------------------+ -| :const:`3.0` | Python 2.4 | Python 2.3 to 2.5 | +| :const:`3.0` | Python 2.4 and Python 2.5 | Python 2.3 to 2.6 | +---------------+------------------------------+-----------------------+ -| :const:`4.0` | Python 2.5 | Python 2.3 to 2.5 | +| :const:`4.0` | Python 2.5 to Python 2.7 | Python 2.3 to 2.7 | +---------------+------------------------------+-----------------------+ | :const:`5.0` | Python 3.0 and Python 3.1 | Python 3.0 to 3.2 | +---------------+------------------------------+-----------------------+ -| :const:`5.1` | Python 3.2 | Python 3.0 to 3.2 | +| :const:`5.1` | Python 3.2 | Python 3.2 | +---------------+------------------------------+-----------------------+ +After Version 5.1 (Python 3.2), the email package no longer has a version that +is separate from the Python version. (See the :ref:`whatsnew-index` documents +for the respective Python versions for details on changes.) + Here are the major differences between :mod:`email` version 5.1 and version 5.0: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:13:52 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 19:13:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIwOTc0OiBVcGRh?= =?utf-8?q?te_version_table_in_email_intro=2E?= Message-ID: <20140927190312.82413.92739@mail.hg.python.org> https://hg.python.org/cpython/rev/2eea52c287b7 changeset: 92598:2eea52c287b7 branch: 3.4 parent: 92596:54987723de99 user: R David Murray date: Sat Sep 27 14:59:36 2014 -0400 summary: #20974: Update version table in email intro. Note that the email version string in 3.3 and 3.4 is inaccurate, since it still exists but wasn't updated while the code was. files: Doc/library/email.rst | 10 +++++++--- 1 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Doc/library/email.rst b/Doc/library/email.rst --- a/Doc/library/email.rst +++ b/Doc/library/email.rst @@ -91,15 +91,19 @@ +---------------+------------------------------+-----------------------+ | :const:`2.5` | Python 2.2.2+ and Python 2.3 | Python 2.1 to 2.5 | +---------------+------------------------------+-----------------------+ -| :const:`3.0` | Python 2.4 | Python 2.3 to 2.5 | +| :const:`3.0` | Python 2.4 and Python 2.5 | Python 2.3 to 2.6 | +---------------+------------------------------+-----------------------+ -| :const:`4.0` | Python 2.5 | Python 2.3 to 2.5 | +| :const:`4.0` | Python 2.5 to Python 2.7 | Python 2.3 to 2.7 | +---------------+------------------------------+-----------------------+ | :const:`5.0` | Python 3.0 and Python 3.1 | Python 3.0 to 3.2 | +---------------+------------------------------+-----------------------+ -| :const:`5.1` | Python 3.2 | Python 3.0 to 3.2 | +| :const:`5.1` | Python 3.2 | Python 3.2 | +---------------+------------------------------+-----------------------+ +After Version 5.1 (Python 3.2), the email package no longer has a version that +is separate from the Python version. (See the :ref:`whatsnew-index` documents +for the respective Python versions for details on changes.) + Here are the major differences between :mod:`email` version 5.1 and version 5.0: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:38:43 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 19:38:43 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2318854=3A_make_it_explicit_that_is=5Fmultipar?= =?utf-8?q?t_does_not_mean_=27multipart/xxx=27=2E?= Message-ID: <20140927193841.47419.21494@mail.hg.python.org> https://hg.python.org/cpython/rev/9909de463dc9 changeset: 92601:9909de463dc9 parent: 92599:655b34cd8871 parent: 92600:b717128799b5 user: R David Murray date: Sat Sep 27 15:38:15 2014 -0400 summary: Merge: #18854: make it explicit that is_multipart does not mean 'multipart/xxx'. files: Doc/library/email.message.rst | 67 +++++++++++++++++----- 1 files changed, 52 insertions(+), 15 deletions(-) diff --git a/Doc/library/email.message.rst b/Doc/library/email.message.rst --- a/Doc/library/email.message.rst +++ b/Doc/library/email.message.rst @@ -131,7 +131,11 @@ Return ``True`` if the message's payload is a list of sub-\ :class:`Message` objects, otherwise return ``False``. When - :meth:`is_multipart` returns ``False``, the payload should be a string object. + :meth:`is_multipart` returns ``False``, the payload should be a string + object. (Note that :meth:`is_multipart` returning ``True`` does not + necessarily mean that "msg.get_content_maintype() == 'multipart'" will + return the ``True``. For example, ``is_multipart`` will return ``True`` + when the :class:`Message` is of type ``message/rfc822``.) .. method:: set_unixfrom(unixfrom) @@ -584,23 +588,56 @@ Here's an example that prints the MIME type of every part of a multipart message structure: - .. testsetup:: + .. testsetup:: - >>> from email import message_from_binary_file - >>> with open('Lib/test/test_email/data/msg_16.txt', 'rb') as f: - ... msg = message_from_binary_file(f) + >>> from email import message_from_binary_file + >>> with open('Lib/test/test_email/data/msg_16.txt', 'rb') as f: + ... msg = message_from_binary_file(f) + >>> from email.iterators import _structure - .. doctest:: + .. doctest:: - >>> for part in msg.walk(): - ... print(part.get_content_type()) - multipart/report - text/plain - message/delivery-status - text/plain - text/plain - message/rfc822 - text/plain + >>> for part in msg.walk(): + ... print(part.get_content_type()) + multipart/report + text/plain + message/delivery-status + text/plain + text/plain + message/rfc822 + text/plain + + ``walk`` iterates over the subparts of any part where + :meth:`is_multipart` returns ``True``, even though + ``msg.get_content_maintype() == 'multipart'`` may return ``False``. We + can see this in our example by making use of the ``_structure`` debug + helper function: + + .. doctest:: + + >>> for part in msg.walk(): + ... print(part.get_content_maintype() == 'multipart'), + ... part.is_multipart()) + True True + False False + False True + False False + False False + False True + False False + >>> _structure(msg) + multipart/report + text/plain + message/delivery-status + text/plain + text/plain + message/rfc822 + text/plain + + Here the ``message`` parts are not ``multiparts``, but they do contain + subparts. ``is_multipart()`` returns ``True`` and ``walk`` descends + into the subparts. + :class:`Message` objects can also optionally contain two instance attributes, which can be used when generating the plain text of a MIME message. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:38:43 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 19:38:43 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzE4ODU0OiBtYWtl?= =?utf-8?q?_it_explicit_that_is=5Fmultipart_does_not_mean_=27multipart/xxx?= =?utf-8?b?Jy4=?= Message-ID: <20140927193841.82513.68510@mail.hg.python.org> https://hg.python.org/cpython/rev/b717128799b5 changeset: 92600:b717128799b5 branch: 3.4 parent: 92598:2eea52c287b7 user: R David Murray date: Sat Sep 27 15:37:40 2014 -0400 summary: #18854: make it explicit that is_multipart does not mean 'multipart/xxx'. Original patch (and the idea of using _structure) by Abhilash Raj. files: Doc/library/email.message.rst | 67 +++++++++++++++++----- 1 files changed, 52 insertions(+), 15 deletions(-) diff --git a/Doc/library/email.message.rst b/Doc/library/email.message.rst --- a/Doc/library/email.message.rst +++ b/Doc/library/email.message.rst @@ -131,7 +131,11 @@ Return ``True`` if the message's payload is a list of sub-\ :class:`Message` objects, otherwise return ``False``. When - :meth:`is_multipart` returns ``False``, the payload should be a string object. + :meth:`is_multipart` returns ``False``, the payload should be a string + object. (Note that :meth:`is_multipart` returning ``True`` does not + necessarily mean that "msg.get_content_maintype() == 'multipart'" will + return the ``True``. For example, ``is_multipart`` will return ``True`` + when the :class:`Message` is of type ``message/rfc822``.) .. method:: set_unixfrom(unixfrom) @@ -584,23 +588,56 @@ Here's an example that prints the MIME type of every part of a multipart message structure: - .. testsetup:: + .. testsetup:: - >>> from email import message_from_binary_file - >>> with open('Lib/test/test_email/data/msg_16.txt', 'rb') as f: - ... msg = message_from_binary_file(f) + >>> from email import message_from_binary_file + >>> with open('Lib/test/test_email/data/msg_16.txt', 'rb') as f: + ... msg = message_from_binary_file(f) + >>> from email.iterators import _structure - .. doctest:: + .. doctest:: - >>> for part in msg.walk(): - ... print(part.get_content_type()) - multipart/report - text/plain - message/delivery-status - text/plain - text/plain - message/rfc822 - text/plain + >>> for part in msg.walk(): + ... print(part.get_content_type()) + multipart/report + text/plain + message/delivery-status + text/plain + text/plain + message/rfc822 + text/plain + + ``walk`` iterates over the subparts of any part where + :meth:`is_multipart` returns ``True``, even though + ``msg.get_content_maintype() == 'multipart'`` may return ``False``. We + can see this in our example by making use of the ``_structure`` debug + helper function: + + .. doctest:: + + >>> for part in msg.walk(): + ... print(part.get_content_maintype() == 'multipart'), + ... part.is_multipart()) + True True + False False + False True + False False + False False + False True + False False + >>> _structure(msg) + multipart/report + text/plain + message/delivery-status + text/plain + text/plain + message/rfc822 + text/plain + + Here the ``message`` parts are not ``multiparts``, but they do contain + subparts. ``is_multipart()`` returns ``True`` and ``walk`` descends + into the subparts. + :class:`Message` objects can also optionally contain two instance attributes, which can be used when generating the plain text of a MIME message. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:49:38 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:49:38 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE2NTM3?= =?utf-8?q?=3A_Check_whether_self=2Eextensions_is_empty_in_setup=2Epy=2E?= Message-ID: <20140927194937.82007.86617@mail.hg.python.org> https://hg.python.org/cpython/rev/a169ee4f254a changeset: 92602:a169ee4f254a branch: 3.4 parent: 92598:2eea52c287b7 user: Berker Peksag date: Sat Sep 27 22:37:15 2014 +0300 summary: Issue #16537: Check whether self.extensions is empty in setup.py. Patch by Jonathan Hosmer. files: Misc/ACKS | 1 + Misc/NEWS | 6 ++++++ setup.py | 4 +++- 3 files changed, 10 insertions(+), 1 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -588,6 +588,7 @@ Nadav Horesh Alon Horev Jan Hosang +Jonathan Hosmer Alan Hourihane Ken Howard Brad Howes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -15,6 +15,12 @@ - Issue #22448: Improve canceled timer handles cleanup to prevent unbound memory usage. Patch by Joshua Moore-Oliva. +Build +----- + +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + What's New in Python 3.4.2? =========================== diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -252,7 +252,9 @@ build_ext.build_extensions(self) - longest = max([len(e.name) for e in self.extensions]) + longest = 0 + if self.extensions: + longest = max([len(e.name) for e in self.extensions]) if self.failed: longest = max(longest, max([len(name) for name in self.failed])) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:49:38 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:49:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2316537=3A_Check_whether_self=2Eextensions_is_emp?= =?utf-8?q?ty_in_setup=2Epy=2E?= Message-ID: <20140927194937.47393.77807@mail.hg.python.org> https://hg.python.org/cpython/rev/491a4d3e2bdd changeset: 92603:491a4d3e2bdd parent: 92599:655b34cd8871 parent: 92602:a169ee4f254a user: Berker Peksag date: Sat Sep 27 22:39:38 2014 +0300 summary: Issue #16537: Check whether self.extensions is empty in setup.py. Patch by Jonathan Hosmer. files: Misc/ACKS | 1 + Misc/NEWS | 3 +++ setup.py | 4 +++- 3 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -592,6 +592,7 @@ Nadav Horesh Alon Horev Jan Hosang +Jonathan Hosmer Alan Hourihane Ken Howard Brad Howes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -977,6 +977,9 @@ Build ----- +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + - Issue #22359: Remove incorrect uses of recursive make. Patch by Jonas Wagner. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -263,7 +263,9 @@ for ext in self.extensions: self.check_extension_import(ext) - longest = max([len(e.name) for e in self.extensions]) + longest = 0 + if self.extensions: + longest = max([len(e.name) for e in self.extensions]) if self.failed or self.failed_on_import: all_failed = self.failed + self.failed_on_import longest = max(longest, max([len(name) for name in all_failed])) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:49:38 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:49:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogTWVyZ2Ugd2l0aCAzLjQu?= Message-ID: <20140927194938.82165.39377@mail.hg.python.org> https://hg.python.org/cpython/rev/8a56d87b81ef changeset: 92606:8a56d87b81ef parent: 92605:56742effd397 parent: 92604:95cc6220383c user: Berker Peksag date: Sat Sep 27 22:49:33 2014 +0300 summary: Merge with 3.4. files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:49:38 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:49:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?b?KTogTWVyZ2UgaGVhZHMu?= Message-ID: <20140927194938.53368.44044@mail.hg.python.org> https://hg.python.org/cpython/rev/56742effd397 changeset: 92605:56742effd397 parent: 92601:9909de463dc9 parent: 92603:491a4d3e2bdd user: Berker Peksag date: Sat Sep 27 22:48:58 2014 +0300 summary: Merge heads. files: Misc/ACKS | 1 + Misc/NEWS | 3 +++ setup.py | 4 +++- 3 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -592,6 +592,7 @@ Nadav Horesh Alon Horev Jan Hosang +Jonathan Hosmer Alan Hourihane Ken Howard Brad Howes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -977,6 +977,9 @@ Build ----- +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + - Issue #22359: Remove incorrect uses of recursive make. Patch by Jonas Wagner. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -263,7 +263,9 @@ for ext in self.extensions: self.check_extension_import(ext) - longest = max([len(e.name) for e in self.extensions]) + longest = 0 + if self.extensions: + longest = max([len(e.name) for e in self.extensions]) if self.failed or self.failed_on_import: all_failed = self.failed + self.failed_on_import longest = max(longest, max([len(name) for name in all_failed])) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:49:38 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:49:38 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge_heads=2E?= Message-ID: <20140927194938.47395.1264@mail.hg.python.org> https://hg.python.org/cpython/rev/95cc6220383c changeset: 92604:95cc6220383c branch: 3.4 parent: 92600:b717128799b5 parent: 92602:a169ee4f254a user: Berker Peksag date: Sat Sep 27 22:47:59 2014 +0300 summary: Merge heads. files: Misc/ACKS | 1 + Misc/NEWS | 6 ++++++ setup.py | 4 +++- 3 files changed, 10 insertions(+), 1 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -588,6 +588,7 @@ Nadav Horesh Alon Horev Jan Hosang +Jonathan Hosmer Alan Hourihane Ken Howard Brad Howes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -15,6 +15,12 @@ - Issue #22448: Improve canceled timer handles cleanup to prevent unbound memory usage. Patch by Joshua Moore-Oliva. +Build +----- + +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + What's New in Python 3.4.2? =========================== diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -252,7 +252,9 @@ build_ext.build_extensions(self) - longest = max([len(e.name) for e in self.extensions]) + longest = 0 + if self.extensions: + longest = max([len(e.name) for e in self.extensions]) if self.failed: longest = max(longest, max([len(name) for name in self.failed])) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 21:54:58 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 19:54:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE2NTM3?= =?utf-8?q?=3A_Check_whether_self=2Eextensions_is_empty_in_setup=2Epy=2E?= Message-ID: <20140927195456.47397.21058@mail.hg.python.org> https://hg.python.org/cpython/rev/6946036f21ef changeset: 92607:6946036f21ef branch: 2.7 parent: 92595:2ae2ca9d2b66 user: Berker Peksag date: Sat Sep 27 22:55:10 2014 +0300 summary: Issue #16537: Check whether self.extensions is empty in setup.py. Patch by Jonathan Hosmer. files: Misc/ACKS | 1 + Misc/NEWS | 3 +++ setup.py | 4 +++- 3 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -582,6 +582,7 @@ Nadav Horesh Alon Horev Jan Hosang +Jonathan Hosmer Alan Hourihane Ken Howard Brad Howes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -205,6 +205,9 @@ Build ----- +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + - The documentation Makefile no longer automatically downloads Sphinx. Users are now required to have Sphinx already installed to build the documentation. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -250,7 +250,9 @@ build_ext.build_extensions(self) - longest = max([len(e.name) for e in self.extensions]) + longest = 0 + if self.extensions: + longest = max([len(e.name) for e in self.extensions]) if self.failed: longest = max(longest, max([len(name) for name in self.failed])) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 22:22:28 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 20:22:28 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322251=3A_Fix_ReST_markup_to_avoid_errors_buildi?= =?utf-8?q?ng_docs=2E?= Message-ID: <20140927202227.26723.92@mail.hg.python.org> https://hg.python.org/cpython/rev/ed1dbac90b92 changeset: 92609:ed1dbac90b92 parent: 92606:8a56d87b81ef parent: 92608:0ec56e677bc3 user: Berker Peksag date: Sat Sep 27 23:22:35 2014 +0300 summary: Issue #22251: Fix ReST markup to avoid errors building docs. files: Doc/extending/newtypes.rst | 2 ++ Doc/faq/library.rst | 4 +++- Doc/library/argparse.rst | 1 + Doc/library/asyncio-eventloop.rst | 4 +++- Doc/library/asyncio-protocol.rst | 2 ++ Doc/library/asyncio-task.rst | 1 + Doc/library/email.contentmanager.rst | 4 +++- Doc/library/logging.handlers.rst | 4 +++- Doc/library/multiprocessing.rst | 1 + Doc/library/os.rst | 1 + Doc/library/pathlib.rst | 3 +++ Doc/library/pickle.rst | 1 + Doc/library/ssl.rst | 3 +++ Doc/library/statistics.rst | 4 +++- Doc/library/stdtypes.rst | 1 + Doc/library/sys.rst | 1 + Doc/library/warnings.rst | 1 + Doc/using/cmdline.rst | 6 +++++- 18 files changed, 38 insertions(+), 6 deletions(-) diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -983,6 +983,7 @@ } .. note:: + There are limitations to what you can safely do in a deallocator function. First, if your type supports garbage collection (using :c:member:`~PyTypeObject.tp_traverse` and/or :c:member:`~PyTypeObject.tp_clear`), some of the object's members can have been @@ -997,6 +998,7 @@ :c:member:`~PyTypeObject.tp_finalize` type method. .. seealso:: + :pep:`442` explains the new finalization scheme. .. index:: diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -697,7 +697,9 @@ >>> urllib.parse.urlencode({'name': 'Guy Steele, Jr.'}) 'name=Guy+Steele%2C+Jr.' -.. seealso:: :ref:`urllib-howto` for extensive examples. +.. seealso:: + + :ref:`urllib-howto` for extensive examples. What module should I use to help with generating HTML? diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1894,6 +1894,7 @@ (Namespace(bar='BAR', foo=True), ['--badger', 'spam']) .. warning:: + :ref:`Prefix matching ` rules apply to :meth:`parse_known_args`. The parser may consume an option even if it's just a prefix of one of its known options, instead of leaving it in the remaining diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -197,7 +197,9 @@ the transport; if *ssl* is :const:`True`, a context with some unspecified default settings is used. - .. seealso:: :ref:`SSL/TLS security considerations ` + .. seealso:: + + :ref:`SSL/TLS security considerations ` * *server_hostname*, is only for use together with *ssl*, and sets or overrides the hostname that the target server's certificate diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -407,6 +407,7 @@ buffer size reaches the low-water mark. .. note:: + If the buffer size equals the high-water mark, :meth:`pause_writing` is not called -- it must go strictly over. Conversely, :meth:`resume_writing` is called when the buffer size is @@ -415,6 +416,7 @@ mark is zero. .. note:: + On BSD systems (OS X, FreeBSD, etc.) flow control is not supported for :class:`DatagramProtocol`, because send failures caused by writing too many packets cannot be detected easily. The socket diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -269,6 +269,7 @@ the future. .. note:: + The :meth:`~BaseEventLoop.run_until_complete` method uses internally the :meth:`~Future.add_done_callback` method to be notified when the future is done. diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -405,7 +405,9 @@ ``message/rfc822``, use ``8bit`` if *cte* is not specified. For all other values of *subtype*, use ``7bit``. - .. note:: A *cte* of ``binary`` does not actually work correctly yet. + .. note:: + + A *cte* of ``binary`` does not actually work correctly yet. The ``Message`` object as modified by ``set_content`` is correct, but :class:`~email.generator.BytesGenerator` does not serialize it correctly. diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -864,7 +864,9 @@ :meth:`mapLogRecord` method is used to convert the record to the dictionary to be sent. - .. note:: Since preparing a record for sending it to a Web server is not + .. note:: + + Since preparing a record for sending it to a Web server is not the same as a generic formatting operation, using :meth:`~logging.Handler.setFormatter` to specify a :class:`~logging.Formatter` for a :class:`HTTPHandler` has no effect. diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -851,6 +851,7 @@ :exc:`NotImplementedError`. .. seealso:: + :func:`os.cpu_count` .. function:: current_process() diff --git a/Doc/library/os.rst b/Doc/library/os.rst --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -57,6 +57,7 @@ ``'ce'``, ``'java'``. .. seealso:: + :attr:`sys.platform` has a finer granularity. :func:`os.uname` gives system-dependent version information. diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -32,15 +32,18 @@ useful since those simply don't have any OS-accessing operations. .. note:: + This module has been included in the standard library on a :term:`provisional basis `. Backwards incompatible changes (up to and including removal of the package) may occur if deemed necessary by the core developers. .. seealso:: + :pep:`428`: The pathlib module -- object-oriented filesystem paths. .. seealso:: + For low-level path manipulation on strings, you can also use the :mod:`os.path` module. diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -141,6 +141,7 @@ brought by protocol 4. .. note:: + Serialization is a more primitive notion than persistence; although :mod:`pickle` reads and writes file objects, it does not handle the issue of naming persistent objects, nor the (even more complicated) issue of concurrent diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -259,6 +259,7 @@ default CA certificates. .. note:: + The protocol, options, cipher and other settings may change to more restrictive values anytime without prior deprecation. The values represent a fair balance between compatibility and security. @@ -267,6 +268,7 @@ :class:`SSLContext` and apply the settings yourself. .. note:: + If you find that when certain older clients or servers attempt to connect with a :class:`SSLContext` created by this function that they get an error stating "Protocol or cipher suite mismatch", it may be that they @@ -963,6 +965,7 @@ interoperability. .. seealso:: + :func:`create_default_context` lets the :mod:`ssl` module choose security settings for a given purpose. diff --git a/Doc/library/statistics.rst b/Doc/library/statistics.rst --- a/Doc/library/statistics.rst +++ b/Doc/library/statistics.rst @@ -135,7 +135,9 @@ This is suited for when your data is discrete, and you don't mind that the median may not be an actual data point. - .. seealso:: :func:`median_low`, :func:`median_high`, :func:`median_grouped` + .. seealso:: + + :func:`median_low`, :func:`median_high`, :func:`median_grouped` .. function:: median_low(data) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -3872,6 +3872,7 @@ :ref:`documentation of view objects `. .. seealso:: + :class:`types.MappingProxyType` can be used to create a read-only view of a :class:`dict`. diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -798,6 +798,7 @@ .. seealso:: + Module :mod:`site` This describes how to use .pth files to extend :data:`sys.path`. diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst --- a/Doc/library/warnings.rst +++ b/Doc/library/warnings.rst @@ -41,6 +41,7 @@ custom implementations. .. seealso:: + :func:`logging.captureWarnings` allows you to handle all warnings with the standard logging infrastructure. diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -107,6 +107,7 @@ python -mtimeit -h # for details .. seealso:: + :func:`runpy.run_module` Equivalent functionality directly available to Python code @@ -158,7 +159,9 @@ .. versionchanged:: 3.4 Automatic enabling of tab-completion and history editing. -.. seealso:: :ref:`tut-invoking` +.. seealso:: + + :ref:`tut-invoking` Generic options @@ -362,6 +365,7 @@ thus equivalent to an omitted line number. .. seealso:: + :mod:`warnings` -- the warnings module :pep:`230` -- Warning framework -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 22:22:28 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 20:22:28 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMjUx?= =?utf-8?q?=3A_Fix_ReST_markup_to_avoid_errors_building_docs=2E?= Message-ID: <20140927202227.73871.20573@mail.hg.python.org> https://hg.python.org/cpython/rev/0ec56e677bc3 changeset: 92608:0ec56e677bc3 branch: 3.4 parent: 92604:95cc6220383c user: Berker Peksag date: Sat Sep 27 23:21:35 2014 +0300 summary: Issue #22251: Fix ReST markup to avoid errors building docs. files: Doc/extending/newtypes.rst | 2 ++ Doc/faq/library.rst | 4 +++- Doc/library/argparse.rst | 1 + Doc/library/asyncio-eventloop.rst | 4 +++- Doc/library/asyncio-protocol.rst | 2 ++ Doc/library/asyncio-task.rst | 1 + Doc/library/email.contentmanager.rst | 4 +++- Doc/library/logging.handlers.rst | 4 +++- Doc/library/multiprocessing.rst | 1 + Doc/library/os.rst | 1 + Doc/library/pathlib.rst | 3 +++ Doc/library/pickle.rst | 1 + Doc/library/ssl.rst | 3 +++ Doc/library/statistics.rst | 4 +++- Doc/library/stdtypes.rst | 1 + Doc/library/sys.rst | 1 + Doc/library/warnings.rst | 1 + Doc/using/cmdline.rst | 6 +++++- 18 files changed, 38 insertions(+), 6 deletions(-) diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -983,6 +983,7 @@ } .. note:: + There are limitations to what you can safely do in a deallocator function. First, if your type supports garbage collection (using :c:member:`~PyTypeObject.tp_traverse` and/or :c:member:`~PyTypeObject.tp_clear`), some of the object's members can have been @@ -997,6 +998,7 @@ :c:member:`~PyTypeObject.tp_finalize` type method. .. seealso:: + :pep:`442` explains the new finalization scheme. .. index:: diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -697,7 +697,9 @@ >>> urllib.parse.urlencode({'name': 'Guy Steele, Jr.'}) 'name=Guy+Steele%2C+Jr.' -.. seealso:: :ref:`urllib-howto` for extensive examples. +.. seealso:: + + :ref:`urllib-howto` for extensive examples. What module should I use to help with generating HTML? diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1894,6 +1894,7 @@ (Namespace(bar='BAR', foo=True), ['--badger', 'spam']) .. warning:: + :ref:`Prefix matching ` rules apply to :meth:`parse_known_args`. The parser may consume an option even if it's just a prefix of one of its known options, instead of leaving it in the remaining diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -197,7 +197,9 @@ the transport; if *ssl* is :const:`True`, a context with some unspecified default settings is used. - .. seealso:: :ref:`SSL/TLS security considerations ` + .. seealso:: + + :ref:`SSL/TLS security considerations ` * *server_hostname*, is only for use together with *ssl*, and sets or overrides the hostname that the target server's certificate diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -407,6 +407,7 @@ buffer size reaches the low-water mark. .. note:: + If the buffer size equals the high-water mark, :meth:`pause_writing` is not called -- it must go strictly over. Conversely, :meth:`resume_writing` is called when the buffer size is @@ -415,6 +416,7 @@ mark is zero. .. note:: + On BSD systems (OS X, FreeBSD, etc.) flow control is not supported for :class:`DatagramProtocol`, because send failures caused by writing too many packets cannot be detected easily. The socket diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -269,6 +269,7 @@ the future. .. note:: + The :meth:`~BaseEventLoop.run_until_complete` method uses internally the :meth:`~Future.add_done_callback` method to be notified when the future is done. diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -405,7 +405,9 @@ ``message/rfc822``, use ``8bit`` if *cte* is not specified. For all other values of *subtype*, use ``7bit``. - .. note:: A *cte* of ``binary`` does not actually work correctly yet. + .. note:: + + A *cte* of ``binary`` does not actually work correctly yet. The ``Message`` object as modified by ``set_content`` is correct, but :class:`~email.generator.BytesGenerator` does not serialize it correctly. diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -864,7 +864,9 @@ :meth:`mapLogRecord` method is used to convert the record to the dictionary to be sent. - .. note:: Since preparing a record for sending it to a Web server is not + .. note:: + + Since preparing a record for sending it to a Web server is not the same as a generic formatting operation, using :meth:`~logging.Handler.setFormatter` to specify a :class:`~logging.Formatter` for a :class:`HTTPHandler` has no effect. diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -851,6 +851,7 @@ :exc:`NotImplementedError`. .. seealso:: + :func:`os.cpu_count` .. function:: current_process() diff --git a/Doc/library/os.rst b/Doc/library/os.rst --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -57,6 +57,7 @@ ``'ce'``, ``'java'``. .. seealso:: + :attr:`sys.platform` has a finer granularity. :func:`os.uname` gives system-dependent version information. diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -32,15 +32,18 @@ useful since those simply don't have any OS-accessing operations. .. note:: + This module has been included in the standard library on a :term:`provisional basis `. Backwards incompatible changes (up to and including removal of the package) may occur if deemed necessary by the core developers. .. seealso:: + :pep:`428`: The pathlib module -- object-oriented filesystem paths. .. seealso:: + For low-level path manipulation on strings, you can also use the :mod:`os.path` module. diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -141,6 +141,7 @@ brought by protocol 4. .. note:: + Serialization is a more primitive notion than persistence; although :mod:`pickle` reads and writes file objects, it does not handle the issue of naming persistent objects, nor the (even more complicated) issue of concurrent diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -259,6 +259,7 @@ default CA certificates. .. note:: + The protocol, options, cipher and other settings may change to more restrictive values anytime without prior deprecation. The values represent a fair balance between compatibility and security. @@ -267,6 +268,7 @@ :class:`SSLContext` and apply the settings yourself. .. note:: + If you find that when certain older clients or servers attempt to connect with a :class:`SSLContext` created by this function that they get an error stating "Protocol or cipher suite mismatch", it may be that they @@ -932,6 +934,7 @@ interoperability. .. seealso:: + :func:`create_default_context` lets the :mod:`ssl` module choose security settings for a given purpose. diff --git a/Doc/library/statistics.rst b/Doc/library/statistics.rst --- a/Doc/library/statistics.rst +++ b/Doc/library/statistics.rst @@ -135,7 +135,9 @@ This is suited for when your data is discrete, and you don't mind that the median may not be an actual data point. - .. seealso:: :func:`median_low`, :func:`median_high`, :func:`median_grouped` + .. seealso:: + + :func:`median_low`, :func:`median_high`, :func:`median_grouped` .. function:: median_low(data) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -3872,6 +3872,7 @@ :ref:`documentation of view objects `. .. seealso:: + :class:`types.MappingProxyType` can be used to create a read-only view of a :class:`dict`. diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -798,6 +798,7 @@ .. seealso:: + Module :mod:`site` This describes how to use .pth files to extend :data:`sys.path`. diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst --- a/Doc/library/warnings.rst +++ b/Doc/library/warnings.rst @@ -41,6 +41,7 @@ custom implementations. .. seealso:: + :func:`logging.captureWarnings` allows you to handle all warnings with the standard logging infrastructure. diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -107,6 +107,7 @@ python -mtimeit -h # for details .. seealso:: + :func:`runpy.run_module` Equivalent functionality directly available to Python code @@ -158,7 +159,9 @@ .. versionchanged:: 3.4 Automatic enabling of tab-completion and history editing. -.. seealso:: :ref:`tut-invoking` +.. seealso:: + + :ref:`tut-invoking` Generic options @@ -362,6 +365,7 @@ thus equivalent to an omitted line number. .. seealso:: + :mod:`warnings` -- the warnings module :pep:`230` -- Warning framework -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 22:59:21 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 20:59:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogIzEwNTEwOiBtYWtl?= =?utf-8?q?_distuitls_upload/register_use_HTML_standards_compliant_CRLF=2E?= Message-ID: <20140927205919.38203.47289@mail.hg.python.org> https://hg.python.org/cpython/rev/9ad78b4b169c changeset: 92612:9ad78b4b169c branch: 2.7 parent: 92607:6946036f21ef user: R David Murray date: Sat Sep 27 16:59:04 2014 -0400 summary: #10510: make distuitls upload/register use HTML standards compliant CRLF. Patch by Ian Cordasco, approved by ?ric Araujo. files: Lib/distutils/command/upload.py | 9 ++++----- Lib/distutils/tests/test_upload.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py --- a/Lib/distutils/command/upload.py +++ b/Lib/distutils/command/upload.py @@ -136,8 +136,8 @@ # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = '\n--' + boundary - end_boundary = sep_boundary + '--' + sep_boundary = '\r\n--' + boundary + end_boundary = sep_boundary + '--\r\n' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name @@ -151,14 +151,13 @@ fn = "" body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"'%key) + body.write('\r\nContent-Disposition: form-data; name="%s"' % key) body.write(fn) - body.write("\n\n") + body.write("\r\n\r\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) - body.write("\n") body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py --- a/Lib/distutils/tests/test_upload.py +++ b/Lib/distutils/tests/test_upload.py @@ -119,7 +119,7 @@ # what did we send ? self.assertIn('d?d?', self.last_open.req.data) headers = dict(self.last_open.req.headers) - self.assertEqual(headers['Content-length'], '2085') + self.assertEqual(headers['Content-length'], '2159') self.assertTrue(headers['Content-type'].startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') self.assertEqual(self.last_open.req.get_full_url(), diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -274,6 +274,7 @@ Jason R. Coombs Garrett Cooper Greg Copeland +Ian Cordasco Aldo Cortesi David Costanzo Scott Cotton diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #10510: distutils register and upload methods now use HTML standards + compliant CRLF line endings. + - Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 22:59:21 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 20:59:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzEwNTEwOiBtYWtl?= =?utf-8?q?_distuitls_upload/register_use_HTML_standards_compliant_CRLF=2E?= Message-ID: <20140927205918.26721.63139@mail.hg.python.org> https://hg.python.org/cpython/rev/5e3f8bd33cf2 changeset: 92610:5e3f8bd33cf2 branch: 3.4 parent: 92608:0ec56e677bc3 user: R David Murray date: Sat Sep 27 16:56:15 2014 -0400 summary: #10510: make distuitls upload/register use HTML standards compliant CRLF. Patch by Ian Cordasco, approved by ?ric Araujo. files: Lib/distutils/command/upload.py | 10 +++++----- Lib/distutils/tests/test_upload.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py --- a/Lib/distutils/command/upload.py +++ b/Lib/distutils/command/upload.py @@ -143,11 +143,11 @@ # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\n--' + boundary.encode('ascii') - end_boundary = sep_boundary + b'--' + sep_boundary = b'\r\n--' + boundary.encode('ascii') + end_boundary = sep_boundary + b'--\r\n' body = io.BytesIO() for key, value in data.items(): - title = '\nContent-Disposition: form-data; name="%s"' % key + title = '\r\nContent-Disposition: form-data; name="%s"' % key # handle multiple entries for the same name if type(value) != type([]): value = [value] @@ -159,12 +159,12 @@ value = str(value).encode('utf-8') body.write(sep_boundary) body.write(title.encode('utf-8')) - body.write(b"\n\n") + body.write(b"\r\n\r\n") body.write(value) if value and value[-1:] == b'\r': body.write(b'\n') # write an extra newline (lurve Macs) body.write(end_boundary) - body.write(b"\n") + body.write(b"\r\n") body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py --- a/Lib/distutils/tests/test_upload.py +++ b/Lib/distutils/tests/test_upload.py @@ -127,7 +127,7 @@ # what did we send ? headers = dict(self.last_open.req.headers) - self.assertEqual(headers['Content-length'], '2087') + self.assertEqual(headers['Content-length'], '2163') content_type = headers['Content-type'] self.assertTrue(content_type.startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -279,6 +279,7 @@ Garrett Cooper Greg Copeland Aldo Cortesi +Ian Cordasco David Costanzo Scott Cotton Greg Couch diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -33,6 +33,9 @@ Library ------- +- Issue #10510: distutils register and upload methods now use HTML standards + compliant CRLF line endings. + - Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 22:59:22 2014 From: python-checkins at python.org (r.david.murray) Date: Sat, 27 Sep 2014 20:59:22 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2310510=3A_make_distuitls_upload/register_use_?= =?utf-8?q?HTML_standards_compliant_CRLF=2E?= Message-ID: <20140927205918.73881.43633@mail.hg.python.org> https://hg.python.org/cpython/rev/ea665bae2ea0 changeset: 92611:ea665bae2ea0 parent: 92609:ed1dbac90b92 parent: 92610:5e3f8bd33cf2 user: R David Murray date: Sat Sep 27 16:57:51 2014 -0400 summary: Merge: #10510: make distuitls upload/register use HTML standards compliant CRLF. files: Lib/distutils/command/upload.py | 10 +++++----- Lib/distutils/tests/test_upload.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py --- a/Lib/distutils/command/upload.py +++ b/Lib/distutils/command/upload.py @@ -141,11 +141,11 @@ # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\n--' + boundary.encode('ascii') - end_boundary = sep_boundary + b'--' + sep_boundary = b'\r\n--' + boundary.encode('ascii') + end_boundary = sep_boundary + b'--\r\n' body = io.BytesIO() for key, value in data.items(): - title = '\nContent-Disposition: form-data; name="%s"' % key + title = '\r\nContent-Disposition: form-data; name="%s"' % key # handle multiple entries for the same name if not isinstance(value, list): value = [value] @@ -157,12 +157,12 @@ value = str(value).encode('utf-8') body.write(sep_boundary) body.write(title.encode('utf-8')) - body.write(b"\n\n") + body.write(b"\r\n\r\n") body.write(value) if value and value[-1:] == b'\r': body.write(b'\n') # write an extra newline (lurve Macs) body.write(end_boundary) - body.write(b"\n") + body.write(b"\r\n") body = body.getvalue() msg = "Submitting %s to %s" % (filename, self.repository) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py --- a/Lib/distutils/tests/test_upload.py +++ b/Lib/distutils/tests/test_upload.py @@ -127,7 +127,7 @@ # what did we send ? headers = dict(self.last_open.req.headers) - self.assertEqual(headers['Content-length'], '2087') + self.assertEqual(headers['Content-length'], '2163') content_type = headers['Content-type'] self.assertTrue(content_type.startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -281,6 +281,7 @@ Garrett Cooper Greg Copeland Aldo Cortesi +Ian Cordasco David Costanzo Scott Cotton Greg Couch diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -145,6 +145,9 @@ Library ------- +- Issue #10510: distutils register and upload methods now use HTML standards + compliant CRLF line endings. + - Issue #9850: Fixed macpath.join() for empty first component. Patch by Oleg Oshmyan. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 23:01:43 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 21:01:43 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Revert_=2322251?= Message-ID: <20140927210142.26715.53948@mail.hg.python.org> https://hg.python.org/cpython/rev/78ae78f967f1 changeset: 92614:78ae78f967f1 parent: 92611:ea665bae2ea0 parent: 92613:0b84904c9471 user: Berker Peksag date: Sun Sep 28 00:01:55 2014 +0300 summary: Revert #22251 files: Doc/extending/newtypes.rst | 2 -- Doc/faq/library.rst | 4 +--- Doc/library/argparse.rst | 1 - Doc/library/asyncio-eventloop.rst | 4 +--- Doc/library/asyncio-protocol.rst | 2 -- Doc/library/asyncio-task.rst | 1 - Doc/library/email.contentmanager.rst | 4 +--- Doc/library/logging.handlers.rst | 4 +--- Doc/library/multiprocessing.rst | 1 - Doc/library/os.rst | 1 - Doc/library/pathlib.rst | 3 --- Doc/library/pickle.rst | 1 - Doc/library/ssl.rst | 3 --- Doc/library/statistics.rst | 4 +--- Doc/library/stdtypes.rst | 1 - Doc/library/sys.rst | 1 - Doc/library/warnings.rst | 1 - Doc/using/cmdline.rst | 6 +----- 18 files changed, 6 insertions(+), 38 deletions(-) diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -983,7 +983,6 @@ } .. note:: - There are limitations to what you can safely do in a deallocator function. First, if your type supports garbage collection (using :c:member:`~PyTypeObject.tp_traverse` and/or :c:member:`~PyTypeObject.tp_clear`), some of the object's members can have been @@ -998,7 +997,6 @@ :c:member:`~PyTypeObject.tp_finalize` type method. .. seealso:: - :pep:`442` explains the new finalization scheme. .. index:: diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -697,9 +697,7 @@ >>> urllib.parse.urlencode({'name': 'Guy Steele, Jr.'}) 'name=Guy+Steele%2C+Jr.' -.. seealso:: - - :ref:`urllib-howto` for extensive examples. +.. seealso:: :ref:`urllib-howto` for extensive examples. What module should I use to help with generating HTML? diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1894,7 +1894,6 @@ (Namespace(bar='BAR', foo=True), ['--badger', 'spam']) .. warning:: - :ref:`Prefix matching ` rules apply to :meth:`parse_known_args`. The parser may consume an option even if it's just a prefix of one of its known options, instead of leaving it in the remaining diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -197,9 +197,7 @@ the transport; if *ssl* is :const:`True`, a context with some unspecified default settings is used. - .. seealso:: - - :ref:`SSL/TLS security considerations ` + .. seealso:: :ref:`SSL/TLS security considerations ` * *server_hostname*, is only for use together with *ssl*, and sets or overrides the hostname that the target server's certificate diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -407,7 +407,6 @@ buffer size reaches the low-water mark. .. note:: - If the buffer size equals the high-water mark, :meth:`pause_writing` is not called -- it must go strictly over. Conversely, :meth:`resume_writing` is called when the buffer size is @@ -416,7 +415,6 @@ mark is zero. .. note:: - On BSD systems (OS X, FreeBSD, etc.) flow control is not supported for :class:`DatagramProtocol`, because send failures caused by writing too many packets cannot be detected easily. The socket diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -269,7 +269,6 @@ the future. .. note:: - The :meth:`~BaseEventLoop.run_until_complete` method uses internally the :meth:`~Future.add_done_callback` method to be notified when the future is done. diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -405,9 +405,7 @@ ``message/rfc822``, use ``8bit`` if *cte* is not specified. For all other values of *subtype*, use ``7bit``. - .. note:: - - A *cte* of ``binary`` does not actually work correctly yet. + .. note:: A *cte* of ``binary`` does not actually work correctly yet. The ``Message`` object as modified by ``set_content`` is correct, but :class:`~email.generator.BytesGenerator` does not serialize it correctly. diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -864,9 +864,7 @@ :meth:`mapLogRecord` method is used to convert the record to the dictionary to be sent. - .. note:: - - Since preparing a record for sending it to a Web server is not + .. note:: Since preparing a record for sending it to a Web server is not the same as a generic formatting operation, using :meth:`~logging.Handler.setFormatter` to specify a :class:`~logging.Formatter` for a :class:`HTTPHandler` has no effect. diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -851,7 +851,6 @@ :exc:`NotImplementedError`. .. seealso:: - :func:`os.cpu_count` .. function:: current_process() diff --git a/Doc/library/os.rst b/Doc/library/os.rst --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -57,7 +57,6 @@ ``'ce'``, ``'java'``. .. seealso:: - :attr:`sys.platform` has a finer granularity. :func:`os.uname` gives system-dependent version information. diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -32,18 +32,15 @@ useful since those simply don't have any OS-accessing operations. .. note:: - This module has been included in the standard library on a :term:`provisional basis `. Backwards incompatible changes (up to and including removal of the package) may occur if deemed necessary by the core developers. .. seealso:: - :pep:`428`: The pathlib module -- object-oriented filesystem paths. .. seealso:: - For low-level path manipulation on strings, you can also use the :mod:`os.path` module. diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -141,7 +141,6 @@ brought by protocol 4. .. note:: - Serialization is a more primitive notion than persistence; although :mod:`pickle` reads and writes file objects, it does not handle the issue of naming persistent objects, nor the (even more complicated) issue of concurrent diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -259,7 +259,6 @@ default CA certificates. .. note:: - The protocol, options, cipher and other settings may change to more restrictive values anytime without prior deprecation. The values represent a fair balance between compatibility and security. @@ -268,7 +267,6 @@ :class:`SSLContext` and apply the settings yourself. .. note:: - If you find that when certain older clients or servers attempt to connect with a :class:`SSLContext` created by this function that they get an error stating "Protocol or cipher suite mismatch", it may be that they @@ -965,7 +963,6 @@ interoperability. .. seealso:: - :func:`create_default_context` lets the :mod:`ssl` module choose security settings for a given purpose. diff --git a/Doc/library/statistics.rst b/Doc/library/statistics.rst --- a/Doc/library/statistics.rst +++ b/Doc/library/statistics.rst @@ -135,9 +135,7 @@ This is suited for when your data is discrete, and you don't mind that the median may not be an actual data point. - .. seealso:: - - :func:`median_low`, :func:`median_high`, :func:`median_grouped` + .. seealso:: :func:`median_low`, :func:`median_high`, :func:`median_grouped` .. function:: median_low(data) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -3872,7 +3872,6 @@ :ref:`documentation of view objects `. .. seealso:: - :class:`types.MappingProxyType` can be used to create a read-only view of a :class:`dict`. diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -798,7 +798,6 @@ .. seealso:: - Module :mod:`site` This describes how to use .pth files to extend :data:`sys.path`. diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst --- a/Doc/library/warnings.rst +++ b/Doc/library/warnings.rst @@ -41,7 +41,6 @@ custom implementations. .. seealso:: - :func:`logging.captureWarnings` allows you to handle all warnings with the standard logging infrastructure. diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -107,7 +107,6 @@ python -mtimeit -h # for details .. seealso:: - :func:`runpy.run_module` Equivalent functionality directly available to Python code @@ -159,9 +158,7 @@ .. versionchanged:: 3.4 Automatic enabling of tab-completion and history editing. -.. seealso:: - - :ref:`tut-invoking` +.. seealso:: :ref:`tut-invoking` Generic options @@ -365,7 +362,6 @@ thus equivalent to an omitted line number. .. seealso:: - :mod:`warnings` -- the warnings module :pep:`230` -- Warning framework -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Sep 27 23:01:43 2014 From: python-checkins at python.org (berker.peksag) Date: Sat, 27 Sep 2014 21:01:43 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogUmV2ZXJ0ICMyMjI1?= =?utf-8?q?1?= Message-ID: <20140927210141.26729.83073@mail.hg.python.org> https://hg.python.org/cpython/rev/0b84904c9471 changeset: 92613:0b84904c9471 branch: 3.4 parent: 92610:5e3f8bd33cf2 user: Berker Peksag date: Sun Sep 28 00:00:58 2014 +0300 summary: Revert #22251 files: Doc/extending/newtypes.rst | 2 -- Doc/faq/library.rst | 4 +--- Doc/library/argparse.rst | 1 - Doc/library/asyncio-eventloop.rst | 4 +--- Doc/library/asyncio-protocol.rst | 2 -- Doc/library/asyncio-task.rst | 1 - Doc/library/email.contentmanager.rst | 4 +--- Doc/library/logging.handlers.rst | 4 +--- Doc/library/multiprocessing.rst | 1 - Doc/library/os.rst | 1 - Doc/library/pathlib.rst | 3 --- Doc/library/pickle.rst | 1 - Doc/library/ssl.rst | 3 --- Doc/library/statistics.rst | 4 +--- Doc/library/stdtypes.rst | 1 - Doc/library/sys.rst | 1 - Doc/library/warnings.rst | 1 - Doc/using/cmdline.rst | 6 +----- 18 files changed, 6 insertions(+), 38 deletions(-) diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst --- a/Doc/extending/newtypes.rst +++ b/Doc/extending/newtypes.rst @@ -983,7 +983,6 @@ } .. note:: - There are limitations to what you can safely do in a deallocator function. First, if your type supports garbage collection (using :c:member:`~PyTypeObject.tp_traverse` and/or :c:member:`~PyTypeObject.tp_clear`), some of the object's members can have been @@ -998,7 +997,6 @@ :c:member:`~PyTypeObject.tp_finalize` type method. .. seealso:: - :pep:`442` explains the new finalization scheme. .. index:: diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -697,9 +697,7 @@ >>> urllib.parse.urlencode({'name': 'Guy Steele, Jr.'}) 'name=Guy+Steele%2C+Jr.' -.. seealso:: - - :ref:`urllib-howto` for extensive examples. +.. seealso:: :ref:`urllib-howto` for extensive examples. What module should I use to help with generating HTML? diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1894,7 +1894,6 @@ (Namespace(bar='BAR', foo=True), ['--badger', 'spam']) .. warning:: - :ref:`Prefix matching ` rules apply to :meth:`parse_known_args`. The parser may consume an option even if it's just a prefix of one of its known options, instead of leaving it in the remaining diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -197,9 +197,7 @@ the transport; if *ssl* is :const:`True`, a context with some unspecified default settings is used. - .. seealso:: - - :ref:`SSL/TLS security considerations ` + .. seealso:: :ref:`SSL/TLS security considerations ` * *server_hostname*, is only for use together with *ssl*, and sets or overrides the hostname that the target server's certificate diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -407,7 +407,6 @@ buffer size reaches the low-water mark. .. note:: - If the buffer size equals the high-water mark, :meth:`pause_writing` is not called -- it must go strictly over. Conversely, :meth:`resume_writing` is called when the buffer size is @@ -416,7 +415,6 @@ mark is zero. .. note:: - On BSD systems (OS X, FreeBSD, etc.) flow control is not supported for :class:`DatagramProtocol`, because send failures caused by writing too many packets cannot be detected easily. The socket diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -269,7 +269,6 @@ the future. .. note:: - The :meth:`~BaseEventLoop.run_until_complete` method uses internally the :meth:`~Future.add_done_callback` method to be notified when the future is done. diff --git a/Doc/library/email.contentmanager.rst b/Doc/library/email.contentmanager.rst --- a/Doc/library/email.contentmanager.rst +++ b/Doc/library/email.contentmanager.rst @@ -405,9 +405,7 @@ ``message/rfc822``, use ``8bit`` if *cte* is not specified. For all other values of *subtype*, use ``7bit``. - .. note:: - - A *cte* of ``binary`` does not actually work correctly yet. + .. note:: A *cte* of ``binary`` does not actually work correctly yet. The ``Message`` object as modified by ``set_content`` is correct, but :class:`~email.generator.BytesGenerator` does not serialize it correctly. diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -864,9 +864,7 @@ :meth:`mapLogRecord` method is used to convert the record to the dictionary to be sent. - .. note:: - - Since preparing a record for sending it to a Web server is not + .. note:: Since preparing a record for sending it to a Web server is not the same as a generic formatting operation, using :meth:`~logging.Handler.setFormatter` to specify a :class:`~logging.Formatter` for a :class:`HTTPHandler` has no effect. diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -851,7 +851,6 @@ :exc:`NotImplementedError`. .. seealso:: - :func:`os.cpu_count` .. function:: current_process() diff --git a/Doc/library/os.rst b/Doc/library/os.rst --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -57,7 +57,6 @@ ``'ce'``, ``'java'``. .. seealso:: - :attr:`sys.platform` has a finer granularity. :func:`os.uname` gives system-dependent version information. diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -32,18 +32,15 @@ useful since those simply don't have any OS-accessing operations. .. note:: - This module has been included in the standard library on a :term:`provisional basis `. Backwards incompatible changes (up to and including removal of the package) may occur if deemed necessary by the core developers. .. seealso:: - :pep:`428`: The pathlib module -- object-oriented filesystem paths. .. seealso:: - For low-level path manipulation on strings, you can also use the :mod:`os.path` module. diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -141,7 +141,6 @@ brought by protocol 4. .. note:: - Serialization is a more primitive notion than persistence; although :mod:`pickle` reads and writes file objects, it does not handle the issue of naming persistent objects, nor the (even more complicated) issue of concurrent diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -259,7 +259,6 @@ default CA certificates. .. note:: - The protocol, options, cipher and other settings may change to more restrictive values anytime without prior deprecation. The values represent a fair balance between compatibility and security. @@ -268,7 +267,6 @@ :class:`SSLContext` and apply the settings yourself. .. note:: - If you find that when certain older clients or servers attempt to connect with a :class:`SSLContext` created by this function that they get an error stating "Protocol or cipher suite mismatch", it may be that they @@ -934,7 +932,6 @@ interoperability. .. seealso:: - :func:`create_default_context` lets the :mod:`ssl` module choose security settings for a given purpose. diff --git a/Doc/library/statistics.rst b/Doc/library/statistics.rst --- a/Doc/library/statistics.rst +++ b/Doc/library/statistics.rst @@ -135,9 +135,7 @@ This is suited for when your data is discrete, and you don't mind that the median may not be an actual data point. - .. seealso:: - - :func:`median_low`, :func:`median_high`, :func:`median_grouped` + .. seealso:: :func:`median_low`, :func:`median_high`, :func:`median_grouped` .. function:: median_low(data) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -3872,7 +3872,6 @@ :ref:`documentation of view objects `. .. seealso:: - :class:`types.MappingProxyType` can be used to create a read-only view of a :class:`dict`. diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -798,7 +798,6 @@ .. seealso:: - Module :mod:`site` This describes how to use .pth files to extend :data:`sys.path`. diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst --- a/Doc/library/warnings.rst +++ b/Doc/library/warnings.rst @@ -41,7 +41,6 @@ custom implementations. .. seealso:: - :func:`logging.captureWarnings` allows you to handle all warnings with the standard logging infrastructure. diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst --- a/Doc/using/cmdline.rst +++ b/Doc/using/cmdline.rst @@ -107,7 +107,6 @@ python -mtimeit -h # for details .. seealso:: - :func:`runpy.run_module` Equivalent functionality directly available to Python code @@ -159,9 +158,7 @@ .. versionchanged:: 3.4 Automatic enabling of tab-completion and history editing. -.. seealso:: - - :ref:`tut-invoking` +.. seealso:: :ref:`tut-invoking` Generic options @@ -365,7 +362,6 @@ thus equivalent to an omitted line number. .. seealso:: - :mod:`warnings` -- the warnings module :pep:`230` -- Warning framework -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sun Sep 28 09:48:51 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 28 Sep 2014 09:48:51 +0200 Subject: [Python-checkins] Daily reference leaks (78ae78f967f1): sum=5 Message-ID: results for 78ae78f967f1 on branch "default" -------------------------------------------- test_collections leaked [0, 2, 0] references, sum=2 test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogYkZ7UX', '-x'] From python-checkins at python.org Sun Sep 28 10:29:46 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 28 Sep 2014 08:29:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Removed_redundant_casts_to?= =?utf-8?q?_=60char_*=60=2E?= Message-ID: <20140928082943.50516.91980@mail.hg.python.org> https://hg.python.org/cpython/rev/599a957038fa changeset: 92615:599a957038fa user: Serhiy Storchaka date: Sun Sep 28 11:27:24 2014 +0300 summary: Removed redundant casts to `char *`. Corresponding functions now accept `const char *` (issue #1772673). files: Modules/_cursesmodule.c | 2 +- Modules/_decimal/_decimal.c | 4 ++-- Modules/cjkcodecs/cjkcodecs.h | 2 +- Modules/cjkcodecs/multibytecodec.c | 4 ++-- Modules/pyexpat.c | 9 ++++----- Objects/bytesobject.c | 2 +- Objects/floatobject.c | 4 ++-- Objects/longobject.c | 2 +- Objects/unicodeobject.c | 4 ++-- Python/import.c | 2 +- 10 files changed, 17 insertions(+), 18 deletions(-) diff --git a/Modules/_cursesmodule.c b/Modules/_cursesmodule.c --- a/Modules/_cursesmodule.c +++ b/Modules/_cursesmodule.c @@ -2675,7 +2675,7 @@ } knp = keyname(ch); - return PyBytes_FromString((knp == NULL) ? "" : (char *)knp); + return PyBytes_FromString((knp == NULL) ? "" : knp); } #endif diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -5640,7 +5640,7 @@ goto error; /* GCOV_NOT_REACHED */ } - ASSIGN_PTR(cm->ex, PyErr_NewException((char *)cm->fqname, base, NULL)); + ASSIGN_PTR(cm->ex, PyErr_NewException(cm->fqname, base, NULL)); Py_DECREF(base); /* add to module */ @@ -5672,7 +5672,7 @@ goto error; /* GCOV_NOT_REACHED */ } - ASSIGN_PTR(cm->ex, PyErr_NewException((char *)cm->fqname, base, NULL)); + ASSIGN_PTR(cm->ex, PyErr_NewException(cm->fqname, base, NULL)); Py_DECREF(base); Py_INCREF(cm->ex); diff --git a/Modules/cjkcodecs/cjkcodecs.h b/Modules/cjkcodecs/cjkcodecs.h --- a/Modules/cjkcodecs/cjkcodecs.h +++ b/Modules/cjkcodecs/cjkcodecs.h @@ -362,7 +362,7 @@ if (mod == NULL) return -1; - o = PyObject_GetAttrString(mod, (char*)symbol); + o = PyObject_GetAttrString(mod, symbol); if (o == NULL) goto errorexit; else if (!PyCapsule_IsValid(o, PyMultibyteCodec_CAPSULE_NAME)) { diff --git a/Modules/cjkcodecs/multibytecodec.c b/Modules/cjkcodecs/multibytecodec.c --- a/Modules/cjkcodecs/multibytecodec.c +++ b/Modules/cjkcodecs/multibytecodec.c @@ -1269,10 +1269,10 @@ if (sizehint < 0) cres = PyObject_CallMethod(self->stream, - (char *)method, NULL); + method, NULL); else cres = PyObject_CallMethod(self->stream, - (char *)method, "i", sizehint); + method, "i", sizehint); if (cres == NULL) goto errorexit; diff --git a/Modules/pyexpat.c b/Modules/pyexpat.c --- a/Modules/pyexpat.c +++ b/Modules/pyexpat.c @@ -1478,7 +1478,7 @@ static PyObject * -newxmlparseobject(char *encoding, char *namespace_separator, PyObject *intern) +newxmlparseobject(const char *encoding, const char *namespace_separator, PyObject *intern) { int i; xmlparseobject *self; @@ -1932,8 +1932,7 @@ return NULL; } - result = newxmlparseobject((char *)encoding, (char *)namespace_separator, - intern); + result = newxmlparseobject(encoding, namespace_separator, intern); if (intern_decref) { Py_DECREF(intern); } @@ -2074,7 +2073,7 @@ PyModule_AddObject(m, "XMLParserType", (PyObject *) &Xmlparsetype); PyModule_AddStringConstant(m, "EXPAT_VERSION", - (char *) XML_ExpatVersion()); + XML_ExpatVersion()); { XML_Expat_Version info = XML_ExpatVersionInfo(); PyModule_AddObject(m, "version_info", @@ -2154,7 +2153,7 @@ #define MYCONST(name) \ if (PyModule_AddStringConstant(errors_module, #name, \ - (char *)XML_ErrorString(name)) < 0) \ + XML_ErrorString(name)) < 0) \ return NULL; \ tmpnum = PyLong_FromLong(name); \ if (tmpnum == NULL) return NULL; \ diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3403,7 +3403,7 @@ _Py_DEC_REFTOTAL; _Py_ForgetReference(v); *pv = (PyObject *) - PyObject_REALLOC((char *)v, PyBytesObject_SIZE + newsize); + PyObject_REALLOC(v, PyBytesObject_SIZE + newsize); if (*pv == NULL) { PyObject_Del(v); PyErr_NoMemory(); diff --git a/Objects/floatobject.c b/Objects/floatobject.c --- a/Objects/floatobject.c +++ b/Objects/floatobject.c @@ -2026,7 +2026,7 @@ } else { float y = (float)x; - const char *s = (char*)&y; + const unsigned char *s = (unsigned char*)&y; int i, incr = 1; if (Py_IS_INFINITY(y) && !Py_IS_INFINITY(x)) @@ -2162,7 +2162,7 @@ return -1; } else { - const char *s = (char*)&x; + const unsigned char *s = (unsigned char*)&x; int i, incr = 1; if ((double_format == ieee_little_endian_format && !le) diff --git a/Objects/longobject.c b/Objects/longobject.c --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -2312,7 +2312,7 @@ PyObject *result, *strobj; char *end = NULL; - result = PyLong_FromString((char*)s, &end, base); + result = PyLong_FromString(s, &end, base); if (end == NULL || (result != NULL && end == s + len)) return result; Py_XDECREF(result); diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -727,7 +727,7 @@ _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); - new_unicode = (PyObject *)PyObject_REALLOC((char *)unicode, new_size); + new_unicode = (PyObject *)PyObject_REALLOC(unicode, new_size); if (new_unicode == NULL) { _Py_NewReference(unicode); PyErr_NoMemory(); @@ -3483,7 +3483,7 @@ memset(&mbs, 0, sizeof mbs); while (len) { - converted = mbrtowc(&ch, (char*)str, len, &mbs); + converted = mbrtowc(&ch, str, len, &mbs); if (converted == 0) /* Reached end of string */ break; diff --git a/Python/import.c b/Python/import.c --- a/Python/import.c +++ b/Python/import.c @@ -2064,7 +2064,7 @@ memset(newtab, '\0', sizeof newtab); - newtab[0].name = (char *)name; + newtab[0].name = name; newtab[0].initfunc = initfunc; return PyImport_ExtendInittab(newtab); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 14:37:30 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 28 Sep 2014 12:37:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Removed_a_code_for_suport_?= =?utf-8?q?Python_version_=3C2=2E2=2E?= Message-ID: <20140928123730.51491.25125@mail.hg.python.org> https://hg.python.org/cpython/rev/a4752c32cc79 changeset: 92616:a4752c32cc79 user: Serhiy Storchaka date: Sun Sep 28 15:36:18 2014 +0300 summary: Removed a code for suport Python version <2.2. files: Lib/re.py | 23 ++++++++++++----------- 1 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Lib/re.py b/Lib/re.py --- a/Lib/re.py +++ b/Lib/re.py @@ -124,10 +124,13 @@ import sre_parse # public symbols -__all__ = [ "match", "fullmatch", "search", "sub", "subn", "split", "findall", - "compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X", - "U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", - "UNICODE", "error" ] +__all__ = [ + "match", "fullmatch", "search", "sub", "subn", "split", + "findall", "finditer", "compile", "purge", "template", "escape", + "error", "A", "I", "L", "M", "S", "X", "U", + "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE", + "UNICODE", +] __version__ = "2.2.1" @@ -205,14 +208,12 @@ Empty matches are included in the result.""" return _compile(pattern, flags).findall(string) -if sys.hexversion >= 0x02020000: - __all__.append("finditer") - def finditer(pattern, string, flags=0): - """Return an iterator over all non-overlapping matches in the - string. For each match, the iterator returns a match object. +def finditer(pattern, string, flags=0): + """Return an iterator over all non-overlapping matches in the + string. For each match, the iterator returns a match object. - Empty matches are included in the result.""" - return _compile(pattern, flags).finditer(string) + Empty matches are included in the result.""" + return _compile(pattern, flags).finditer(string) def compile(pattern, flags=0): "Compile a regular expression pattern, returning a pattern object." -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 17:02:20 2014 From: python-checkins at python.org (r.david.murray) Date: Sun, 28 Sep 2014 15:02:20 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=2310510=3A_Fix_bug_in_forward_port_of_2=2E7_distutils_p?= =?utf-8?q?atch=2E?= Message-ID: <20140928150220.2397.39054@mail.hg.python.org> https://hg.python.org/cpython/rev/90b07d422bd9 changeset: 92618:90b07d422bd9 parent: 92616:a4752c32cc79 parent: 92617:6375bf34fff6 user: R David Murray date: Sun Sep 28 11:01:42 2014 -0400 summary: #10510: Fix bug in forward port of 2.7 distutils patch. Pointed out by Arfrever. files: Doc/faq/programming.rst | 77 +++++++++++++++++- Lib/distutils/command/upload.py | 1 - Lib/distutils/tests/test_upload.py | 2 +- 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -401,7 +401,7 @@ # Calculate the value result = ... expensive computation ... - _cache[(arg1, arg2)] = result # Store result in the cache + _cache[(arg1, arg2)] = result # Store result in the cache return result You could use a global variable containing a dictionary instead of the default @@ -448,6 +448,81 @@ the values ``42``, ``314``, and ``somevar`` are arguments. +Why did changing list 'y' also change list 'x'? +------------------------------------------------ + +If you wrote code like:: + + >>> x = [] + >>> y = x + >>> y.append(10) + >>> y + [10] + >>> x + [10] + +you might be wondering why appending an element to ``y`` changed ``x`` too. + +There are two factors that produce this result: + +1) Variables are simply names that refer to objects. Doing ``y = x`` doesn't + create a copy of the list -- it creates a new variable ``y`` that refers to + the same object ``x`` refers to. This means that there is only one object + (the list), and both ``x`` and ``y`` refer to it. +2) Lists are :term:`mutable`, which means that you can change their content. + +After the call to :meth:`~list.append`, the content of the mutable object has +changed from ``[]`` to ``[10]``. Since both the variables refer to the same +object, accessing either one of them accesses the modified value ``[10]``. + +If we instead assign an immutable object to ``x``:: + + >>> x = 5 # ints are immutable + >>> y = x + >>> x = x + 1 # 5 can't be mutated, we are creating a new object here + >>> x + 6 + >>> y + 5 + +we can see that in this case ``x`` and ``y`` are not equal anymore. This is +because integers are :term:`immutable`, and when we do ``x = x + 1`` we are not +mutating the int ``5`` by incrementing its value; instead, we are creating a +new object (the int ``6``) and assigning it to ``x`` (that is, changing which +object ``x`` refers to). After this assignment we have two objects (the ints +``6`` and ``5``) and two variables that refer to them (``x`` now refers to +``6`` but ``y`` still refers to ``5``). + +Some operations (for example ``y.append(10)`` and ``y.sort()``) mutate the +object, whereas superficially similar operations (for example ``y = y + [10]`` +and ``sorted(y)``) create a new object. In general in Python (and in all cases +in the standard library) a method that mutates an object will return ``None`` +to help avoid getting the two types of operations confused. So if you +mistakenly write ``y.sort()`` thinking it will give you a sorted copy of ``y``, +you'll instead end up with ``None``, which will likely cause your program to +generate an easily diagnosed error. + +However, there is one class of operations where the same operation sometimes +has different behaviors with different types: the augmented assignment +operators. For example, ``+=`` mutates lists but not tuples or ints (``a_list ++= [1, 2, 3]`` is equivalent to ``a_list.extend([1, 2, 3])`` and mutates +``a_list``, whereas ``some_tuple += (1, 2, 3)`` and ``some_int += 1`` create +new objects). + +In other words: + +* If we have a mutable object (:class:`list`, :class:`dict`, :class:`set`, + etc.), we can use some specific operations to mutate it and all the variables + that refer to it will see the change. +* If we have an immutable object (:class:`str`, :class:`int`, :class:`tuple`, + etc.), all the variables that refer to it will always see the same value, + but operations that transform that value into a new value always return a new + object. + +If you want to know if two variables refer to the same object or not, you can +use the :keyword:`is` operator, or the built-in function :func:`id`. + + How do I write a function with output parameters (call by reference)? --------------------------------------------------------------------- diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py --- a/Lib/distutils/command/upload.py +++ b/Lib/distutils/command/upload.py @@ -162,7 +162,6 @@ if value and value[-1:] == b'\r': body.write(b'\n') # write an extra newline (lurve Macs) body.write(end_boundary) - body.write(b"\r\n") body = body.getvalue() msg = "Submitting %s to %s" % (filename, self.repository) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py --- a/Lib/distutils/tests/test_upload.py +++ b/Lib/distutils/tests/test_upload.py @@ -127,7 +127,7 @@ # what did we send ? headers = dict(self.last_open.req.headers) - self.assertEqual(headers['Content-length'], '2163') + self.assertEqual(headers['Content-length'], '2161') content_type = headers['Content-type'] self.assertTrue(content_type.startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 17:02:23 2014 From: python-checkins at python.org (r.david.murray) Date: Sun, 28 Sep 2014 15:02:23 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzEwNTEwOiBGaXgg?= =?utf-8?q?bug_in_forward_port_of_2=2E7_distutils_patch=2E?= Message-ID: <20140928150220.51501.23977@mail.hg.python.org> https://hg.python.org/cpython/rev/6375bf34fff6 changeset: 92617:6375bf34fff6 branch: 3.4 parent: 92613:0b84904c9471 user: R David Murray date: Sun Sep 28 11:01:11 2014 -0400 summary: #10510: Fix bug in forward port of 2.7 distutils patch. Pointed out by Arfrever. files: Doc/faq/programming.rst | 77 +++++++++++++++++- Lib/distutils/command/upload.py | 1 - Lib/distutils/tests/test_upload.py | 2 +- 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -401,7 +401,7 @@ # Calculate the value result = ... expensive computation ... - _cache[(arg1, arg2)] = result # Store result in the cache + _cache[(arg1, arg2)] = result # Store result in the cache return result You could use a global variable containing a dictionary instead of the default @@ -448,6 +448,81 @@ the values ``42``, ``314``, and ``somevar`` are arguments. +Why did changing list 'y' also change list 'x'? +------------------------------------------------ + +If you wrote code like:: + + >>> x = [] + >>> y = x + >>> y.append(10) + >>> y + [10] + >>> x + [10] + +you might be wondering why appending an element to ``y`` changed ``x`` too. + +There are two factors that produce this result: + +1) Variables are simply names that refer to objects. Doing ``y = x`` doesn't + create a copy of the list -- it creates a new variable ``y`` that refers to + the same object ``x`` refers to. This means that there is only one object + (the list), and both ``x`` and ``y`` refer to it. +2) Lists are :term:`mutable`, which means that you can change their content. + +After the call to :meth:`~list.append`, the content of the mutable object has +changed from ``[]`` to ``[10]``. Since both the variables refer to the same +object, accessing either one of them accesses the modified value ``[10]``. + +If we instead assign an immutable object to ``x``:: + + >>> x = 5 # ints are immutable + >>> y = x + >>> x = x + 1 # 5 can't be mutated, we are creating a new object here + >>> x + 6 + >>> y + 5 + +we can see that in this case ``x`` and ``y`` are not equal anymore. This is +because integers are :term:`immutable`, and when we do ``x = x + 1`` we are not +mutating the int ``5`` by incrementing its value; instead, we are creating a +new object (the int ``6``) and assigning it to ``x`` (that is, changing which +object ``x`` refers to). After this assignment we have two objects (the ints +``6`` and ``5``) and two variables that refer to them (``x`` now refers to +``6`` but ``y`` still refers to ``5``). + +Some operations (for example ``y.append(10)`` and ``y.sort()``) mutate the +object, whereas superficially similar operations (for example ``y = y + [10]`` +and ``sorted(y)``) create a new object. In general in Python (and in all cases +in the standard library) a method that mutates an object will return ``None`` +to help avoid getting the two types of operations confused. So if you +mistakenly write ``y.sort()`` thinking it will give you a sorted copy of ``y``, +you'll instead end up with ``None``, which will likely cause your program to +generate an easily diagnosed error. + +However, there is one class of operations where the same operation sometimes +has different behaviors with different types: the augmented assignment +operators. For example, ``+=`` mutates lists but not tuples or ints (``a_list ++= [1, 2, 3]`` is equivalent to ``a_list.extend([1, 2, 3])`` and mutates +``a_list``, whereas ``some_tuple += (1, 2, 3)`` and ``some_int += 1`` create +new objects). + +In other words: + +* If we have a mutable object (:class:`list`, :class:`dict`, :class:`set`, + etc.), we can use some specific operations to mutate it and all the variables + that refer to it will see the change. +* If we have an immutable object (:class:`str`, :class:`int`, :class:`tuple`, + etc.), all the variables that refer to it will always see the same value, + but operations that transform that value into a new value always return a new + object. + +If you want to know if two variables refer to the same object or not, you can +use the :keyword:`is` operator, or the built-in function :func:`id`. + + How do I write a function with output parameters (call by reference)? --------------------------------------------------------------------- diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py --- a/Lib/distutils/command/upload.py +++ b/Lib/distutils/command/upload.py @@ -164,7 +164,6 @@ if value and value[-1:] == b'\r': body.write(b'\n') # write an extra newline (lurve Macs) body.write(end_boundary) - body.write(b"\r\n") body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py --- a/Lib/distutils/tests/test_upload.py +++ b/Lib/distutils/tests/test_upload.py @@ -127,7 +127,7 @@ # what did we send ? headers = dict(self.last_open.req.headers) - self.assertEqual(headers['Content-length'], '2163') + self.assertEqual(headers['Content-length'], '2161') content_type = headers['Content-type'] self.assertTrue(content_type.startswith('multipart/form-data')) self.assertEqual(self.last_open.req.get_method(), 'POST') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 18:57:29 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 28 Sep 2014 16:57:29 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_give_exception?= =?utf-8?q?_a_nice_message_=28closes_=2322379=29?= Message-ID: <20140928165729.2395.61474@mail.hg.python.org> https://hg.python.org/cpython/rev/0ad19246d16d changeset: 92619:0ad19246d16d branch: 2.7 parent: 92612:9ad78b4b169c user: Benjamin Peterson date: Sun Sep 28 12:48:46 2014 -0400 summary: give exception a nice message (closes #22379) Patch by Yongzhi Pan. files: Lib/test/string_tests.py | 13 ++++++------- Lib/test/test_string.py | 11 ++++------- Lib/test/test_userstring.py | 12 +++++------- Misc/ACKS | 1 + Misc/NEWS | 3 +++ Objects/stringobject.c | 2 +- 6 files changed, 20 insertions(+), 22 deletions(-) diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py --- a/Lib/test/string_tests.py +++ b/Lib/test/string_tests.py @@ -65,14 +65,12 @@ self.assertTrue(object is not realresult) # check that object.method(*args) raises exc - def checkraises(self, exc, object, methodname, *args): - object = self.fixtype(object) + def checkraises(self, exc, obj, methodname, *args): + obj = self.fixtype(obj) args = self.fixtype(args) - self.assertRaises( - exc, - getattr(object, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(cm.exception.message, '') # call object.method(*args) without any checks def checkcall(self, object, methodname, *args): @@ -1057,6 +1055,7 @@ self.checkequal('a b c', ' ', 'join', BadSeq2()) self.checkraises(TypeError, ' ', 'join') + self.checkraises(TypeError, ' ', 'join', None) self.checkraises(TypeError, ' ', 'join', 7) self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L])) try: diff --git a/Lib/test/test_string.py b/Lib/test/test_string.py --- a/Lib/test/test_string.py +++ b/Lib/test/test_string.py @@ -16,13 +16,10 @@ realresult ) - def checkraises(self, exc, object, methodname, *args): - self.assertRaises( - exc, - getattr(string, methodname), - object, - *args - ) + def checkraises(self, exc, obj, methodname, *args): + with self.assertRaises(exc) as cm: + getattr(string, methodname)(obj, *args) + self.assertNotEqual(cm.exception.message, '') def checkcall(self, object, methodname, *args): getattr(string, methodname)(object, *args) diff --git a/Lib/test/test_userstring.py b/Lib/test/test_userstring.py --- a/Lib/test/test_userstring.py +++ b/Lib/test/test_userstring.py @@ -28,14 +28,12 @@ realresult ) - def checkraises(self, exc, object, methodname, *args): - object = self.fixtype(object) + def checkraises(self, exc, obj, methodname, *args): + obj = self.fixtype(obj) # we don't fix the arguments, because UserString can't cope with it - self.assertRaises( - exc, - getattr(object, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(cm.exception.message, '') def checkcall(self, object, methodname, *args): object = self.fixtype(object) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1007,6 +1007,7 @@ Todd R. Palmer Juan David Ib??ez Palomar Jan Palus +Yongzhi Pan Mathias Panzenb?ck M. Papillon Peter Parente diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #22379: Fix empty exception message in a TypeError raised in + ``str.join``. + - Issue #22221: Now the source encoding declaration on the second line isn't effective if the first line contains anything except a comment. diff --git a/Objects/stringobject.c b/Objects/stringobject.c --- a/Objects/stringobject.c +++ b/Objects/stringobject.c @@ -1594,7 +1594,7 @@ Py_ssize_t i; PyObject *seq, *item; - seq = PySequence_Fast(orig, ""); + seq = PySequence_Fast(orig, "can only join an iterable"); if (seq == NULL) { return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 18:57:33 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 28 Sep 2014 16:57:33 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_check_that_exc?= =?utf-8?q?eption_messages_are_not_empty_=28=2322379=29?= Message-ID: <20140928165729.51483.17145@mail.hg.python.org> https://hg.python.org/cpython/rev/ab1570d0132d changeset: 92620:ab1570d0132d branch: 3.4 parent: 92617:6375bf34fff6 user: Benjamin Peterson date: Sun Sep 28 12:56:42 2014 -0400 summary: check that exception messages are not empty (#22379) Patch by Yongzhi Pan. files: Lib/test/string_tests.py | 14 ++++++-------- Lib/test/test_bytes.py | 1 + Lib/test/test_userstring.py | 12 +++++------- Misc/ACKS | 1 + 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py --- a/Lib/test/string_tests.py +++ b/Lib/test/string_tests.py @@ -1,5 +1,5 @@ """ -Common tests shared by test_str, test_unicode, test_userstring and test_string. +Common tests shared by test_unicode, test_userstring and test_string. """ import unittest, string, sys, struct @@ -79,11 +79,9 @@ def checkraises(self, exc, obj, methodname, *args): obj = self.fixtype(obj) args = self.fixtype(args) - self.assertRaises( - exc, - getattr(obj, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(str(cm.exception), '') # call obj.method(*args) without any checks def checkcall(self, obj, methodname, *args): @@ -1119,8 +1117,7 @@ def test_join(self): # join now works with any sequence type # moved here, because the argument order is - # different in string.join (see the test in - # test.test_string.StringTest.test_join) + # different in string.join self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd']) self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd')) self.checkequal('bd', '', 'join', ('', 'b', '', 'd')) @@ -1140,6 +1137,7 @@ self.checkequal('a b c', ' ', 'join', BadSeq2()) self.checkraises(TypeError, ' ', 'join') + self.checkraises(TypeError, ' ', 'join', None) self.checkraises(TypeError, ' ', 'join', 7) self.checkraises(TypeError, ' ', 'join', [1, 2, bytes()]) try: diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -298,6 +298,7 @@ seq = [b"abc"] * 1000 expected = b"abc" + b".:abc" * 999 self.assertEqual(dot_join(seq), expected) + self.assertRaises(TypeError, self.type2test(b" ").join, None) # Error handling and cleanup when some item in the middle of the # sequence has the wrong type. with self.assertRaises(TypeError): diff --git a/Lib/test/test_userstring.py b/Lib/test/test_userstring.py --- a/Lib/test/test_userstring.py +++ b/Lib/test/test_userstring.py @@ -28,14 +28,12 @@ realresult ) - def checkraises(self, exc, object, methodname, *args): - object = self.fixtype(object) + def checkraises(self, exc, obj, methodname, *args): + obj = self.fixtype(obj) # we don't fix the arguments, because UserString can't cope with it - self.assertRaises( - exc, - getattr(object, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(str(cm.exception), '') def checkcall(self, object, methodname, *args): object = self.fixtype(object) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1010,6 +1010,7 @@ Todd R. Palmer Juan David Ib??ez Palomar Jan Palus +Yongzhi Pan Martin Panter Mathias Panzenb?ck M. Papillon -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Sep 28 18:57:33 2014 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 28 Sep 2014 16:57:33 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjIzNzkp?= Message-ID: <20140928165730.50766.87970@mail.hg.python.org> https://hg.python.org/cpython/rev/78727a11b5ae changeset: 92621:78727a11b5ae parent: 92618:90b07d422bd9 parent: 92620:ab1570d0132d user: Benjamin Peterson date: Sun Sep 28 12:57:22 2014 -0400 summary: merge 3.4 (#22379) files: Lib/test/string_tests.py | 14 ++++++-------- Lib/test/test_bytes.py | 1 + Lib/test/test_userstring.py | 12 +++++------- Misc/ACKS | 1 + 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py --- a/Lib/test/string_tests.py +++ b/Lib/test/string_tests.py @@ -1,5 +1,5 @@ """ -Common tests shared by test_str, test_unicode, test_userstring and test_string. +Common tests shared by test_unicode, test_userstring and test_string. """ import unittest, string, sys, struct @@ -79,11 +79,9 @@ def checkraises(self, exc, obj, methodname, *args): obj = self.fixtype(obj) args = self.fixtype(args) - self.assertRaises( - exc, - getattr(obj, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(str(cm.exception), '') # call obj.method(*args) without any checks def checkcall(self, obj, methodname, *args): @@ -1119,8 +1117,7 @@ def test_join(self): # join now works with any sequence type # moved here, because the argument order is - # different in string.join (see the test in - # test.test_string.StringTest.test_join) + # different in string.join self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd']) self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd')) self.checkequal('bd', '', 'join', ('', 'b', '', 'd')) @@ -1140,6 +1137,7 @@ self.checkequal('a b c', ' ', 'join', BadSeq2()) self.checkraises(TypeError, ' ', 'join') + self.checkraises(TypeError, ' ', 'join', None) self.checkraises(TypeError, ' ', 'join', 7) self.checkraises(TypeError, ' ', 'join', [1, 2, bytes()]) try: diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -298,6 +298,7 @@ seq = [b"abc"] * 1000 expected = b"abc" + b".:abc" * 999 self.assertEqual(dot_join(seq), expected) + self.assertRaises(TypeError, self.type2test(b" ").join, None) # Error handling and cleanup when some item in the middle of the # sequence has the wrong type. with self.assertRaises(TypeError): diff --git a/Lib/test/test_userstring.py b/Lib/test/test_userstring.py --- a/Lib/test/test_userstring.py +++ b/Lib/test/test_userstring.py @@ -28,14 +28,12 @@ realresult ) - def checkraises(self, exc, object, methodname, *args): - object = self.fixtype(object) + def checkraises(self, exc, obj, methodname, *args): + obj = self.fixtype(obj) # we don't fix the arguments, because UserString can't cope with it - self.assertRaises( - exc, - getattr(object, methodname), - *args - ) + with self.assertRaises(exc) as cm: + getattr(obj, methodname)(*args) + self.assertNotEqual(str(cm.exception), '') def checkcall(self, object, methodname, *args): object = self.fixtype(object) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1019,6 +1019,7 @@ Todd R. Palmer Juan David Ib??ez Palomar Jan Palus +Yongzhi Pan Martin Panter Mathias Panzenb?ck M. Papillon -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 01:12:05 2014 From: python-checkins at python.org (alexander.belopolsky) Date: Sun, 28 Sep 2014 23:12:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_issue_=2320858=3A_E?= =?utf-8?q?nhancements/fixes_to_pure-python_datetime_module?= Message-ID: <20140928231205.51495.84360@mail.hg.python.org> https://hg.python.org/cpython/rev/5313b4c0bb6c changeset: 92622:5313b4c0bb6c user: Alexander Belopolsky date: Sun Sep 28 19:11:56 2014 -0400 summary: Closes issue #20858: Enhancements/fixes to pure-python datetime module This patch brings the pure-python datetime more in-line with the C module. Patch contributed by Brian Kearns, a PyPy developer. PyPy project has been running these modifications in PyPy2 stdlib. This commit includes: - General PEP8/cleanups; - Better testing of argument types passed to constructors; - Removal of duplicate operations; - Optimization of timedelta creation; - Caching the result of __hash__ like the C accelerator; - Enhancements/bug fixes in tests. files: Lib/datetime.py | 289 +++++++++++++----------- Lib/test/datetimetester.py | 94 +++++++- Misc/ACKS | 1 + 3 files changed, 241 insertions(+), 143 deletions(-) diff --git a/Lib/datetime.py b/Lib/datetime.py --- a/Lib/datetime.py +++ b/Lib/datetime.py @@ -12,7 +12,7 @@ MINYEAR = 1 MAXYEAR = 9999 -_MAXORDINAL = 3652059 # date.max.toordinal() +_MAXORDINAL = 3652059 # date.max.toordinal() # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in @@ -26,7 +26,7 @@ # -1 is a placeholder for indexing purposes. _DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] -_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes. +_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes. dbm = 0 for dim in _DAYS_IN_MONTH[1:]: _DAYS_BEFORE_MONTH.append(dbm) @@ -162,9 +162,9 @@ # Correctly substitute for %z and %Z escapes in strftime formats. def _wrap_strftime(object, format, timetuple): # Don't call utcoffset() or tzname() unless actually needed. - freplace = None # the string to use for %f - zreplace = None # the string to use for %z - Zreplace = None # the string to use for %Z + freplace = None # the string to use for %f + zreplace = None # the string to use for %z + Zreplace = None # the string to use for %Z # Scan format for %z and %Z escapes, replacing as needed. newformat = [] @@ -217,11 +217,6 @@ newformat = "".join(newformat) return _time.strftime(newformat, timetuple) -def _call_tzinfo_method(tzinfo, methname, tzinfoarg): - if tzinfo is None: - return None - return getattr(tzinfo, methname)(tzinfoarg) - # Just raise TypeError if the arg isn't None or a string. def _check_tzname(name): if name is not None and not isinstance(name, str): @@ -245,13 +240,31 @@ raise ValueError("tzinfo.%s() must return a whole number " "of minutes, got %s" % (name, offset)) if not -timedelta(1) < offset < timedelta(1): - raise ValueError("%s()=%s, must be must be strictly between" - " -timedelta(hours=24) and timedelta(hours=24)" - % (name, offset)) + raise ValueError("%s()=%s, must be must be strictly between " + "-timedelta(hours=24) and timedelta(hours=24)" % + (name, offset)) + +def _check_int_field(value): + if isinstance(value, int): + return value + if not isinstance(value, float): + try: + value = value.__int__() + except AttributeError: + pass + else: + if isinstance(value, int): + return value + raise TypeError('__int__ returned non-int (type %s)' % + type(value).__name__) + raise TypeError('an integer is required (got type %s)' % + type(value).__name__) + raise TypeError('integer argument expected, got float') def _check_date_fields(year, month, day): - if not isinstance(year, int): - raise TypeError('int expected') + year = _check_int_field(year) + month = _check_int_field(month) + day = _check_int_field(day) if not MINYEAR <= year <= MAXYEAR: raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year) if not 1 <= month <= 12: @@ -259,10 +272,13 @@ dim = _days_in_month(year, month) if not 1 <= day <= dim: raise ValueError('day must be in 1..%d' % dim, day) + return year, month, day def _check_time_fields(hour, minute, second, microsecond): - if not isinstance(hour, int): - raise TypeError('int expected') + hour = _check_int_field(hour) + minute = _check_int_field(minute) + second = _check_int_field(second) + microsecond = _check_int_field(microsecond) if not 0 <= hour <= 23: raise ValueError('hour must be in 0..23', hour) if not 0 <= minute <= 59: @@ -271,6 +287,7 @@ raise ValueError('second must be in 0..59', second) if not 0 <= microsecond <= 999999: raise ValueError('microsecond must be in 0..999999', microsecond) + return hour, minute, second, microsecond def _check_tzinfo_arg(tz): if tz is not None and not isinstance(tz, tzinfo): @@ -297,7 +314,7 @@ Representation: (days, seconds, microseconds). Why? Because I felt like it. """ - __slots__ = '_days', '_seconds', '_microseconds' + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0): @@ -363,38 +380,26 @@ # secondsfrac isn't referenced again if isinstance(microseconds, float): - microseconds += usdouble - microseconds = round(microseconds, 0) - seconds, microseconds = divmod(microseconds, 1e6) - assert microseconds == int(microseconds) - assert seconds == int(seconds) - days, seconds = divmod(seconds, 24.*3600.) - assert days == int(days) - assert seconds == int(seconds) - d += int(days) - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 3 * 24 * 3600 - else: + microseconds = round(microseconds + usdouble) seconds, microseconds = divmod(microseconds, 1000000) days, seconds = divmod(seconds, 24*3600) d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 3 * 24 * 3600 - microseconds = float(microseconds) - microseconds += usdouble - microseconds = round(microseconds, 0) + s += seconds + else: + microseconds = int(microseconds) + seconds, microseconds = divmod(microseconds, 1000000) + days, seconds = divmod(seconds, 24*3600) + d += days + s += seconds + microseconds = round(microseconds + usdouble) + assert isinstance(s, int) + assert isinstance(microseconds, int) assert abs(s) <= 3 * 24 * 3600 assert abs(microseconds) < 3.1e6 # Just a little bit of carrying possible for microseconds and seconds. - assert isinstance(microseconds, float) - assert int(microseconds) == microseconds - us = int(microseconds) - seconds, us = divmod(us, 1000000) - s += seconds # cant't overflow - assert isinstance(s, int) + seconds, us = divmod(microseconds, 1000000) + s += seconds days, s = divmod(s, 24*3600) d += days @@ -402,14 +407,14 @@ assert isinstance(s, int) and 0 <= s < 24*3600 assert isinstance(us, int) and 0 <= us < 1000000 + if abs(d) > 999999999: + raise OverflowError("timedelta # of days is too large: %d" % d) + self = object.__new__(cls) - self._days = d self._seconds = s self._microseconds = us - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) - + self._hashcode = -1 return self def __repr__(self): @@ -442,7 +447,7 @@ def total_seconds(self): """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds)*10**6 + + return ((self.days * 86400 + self.seconds) * 10**6 + self.microseconds) / 10**6 # Read-only field accessors @@ -597,7 +602,9 @@ return _cmp(self._getstate(), other._getstate()) def __hash__(self): - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode def __bool__(self): return (self._days != 0 or @@ -645,7 +652,7 @@ Properties (readonly): year, month, day """ - __slots__ = '_year', '_month', '_day' + __slots__ = '_year', '_month', '_day', '_hashcode' def __new__(cls, year, month=None, day=None): """Constructor. @@ -654,17 +661,19 @@ year, month, day (required, base 1) """ - if (isinstance(year, bytes) and len(year) == 4 and - 1 <= year[2] <= 12 and month is None): # Month is sane + if month is None and isinstance(year, bytes) and len(year) == 4 and \ + 1 <= year[2] <= 12: # Pickle support self = object.__new__(cls) self.__setstate(year) + self._hashcode = -1 return self - _check_date_fields(year, month, day) + year, month, day = _check_date_fields(year, month, day) self = object.__new__(cls) self._year = year self._month = month self._day = day + self._hashcode = -1 return self # Additional constructors @@ -728,6 +737,8 @@ return _wrap_strftime(self, fmt, self.timetuple()) def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) if len(fmt) != 0: return self.strftime(fmt) return str(self) @@ -784,7 +795,6 @@ month = self._month if day is None: day = self._day - _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -827,7 +837,9 @@ def __hash__(self): "Hash." - return hash(self._getstate()) + if self._hashcode == -1: + self._hashcode = hash(self._getstate()) + return self._hashcode # Computations @@ -897,8 +909,6 @@ return bytes([yhi, ylo, self._month, self._day]), def __setstate(self, string): - if len(string) != 4 or not (1 <= string[2] <= 12): - raise TypeError("not enough arguments") yhi, ylo, self._month, self._day = string self._year = yhi * 256 + ylo @@ -917,6 +927,7 @@ Subclasses must override the name(), utcoffset() and dst() methods. """ __slots__ = () + def tzname(self, dt): "datetime -> string name of time zone." raise NotImplementedError("tzinfo subclass must override tzname()") @@ -1003,6 +1014,7 @@ Properties (readonly): hour, minute, second, microsecond, tzinfo """ + __slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode' def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Constructor. @@ -1013,18 +1025,22 @@ second, microsecond (default to zero) tzinfo (default to None) """ + if isinstance(hour, bytes) and len(hour) == 6 and hour[0] < 24: + # Pickle support + self = object.__new__(cls) + self.__setstate(hour, minute or None) + self._hashcode = -1 + return self + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) + _check_tzinfo_arg(tzinfo) self = object.__new__(cls) - if isinstance(hour, bytes) and len(hour) == 6: - # Pickle support - self.__setstate(hour, minute or None) - return self - _check_tzinfo_arg(tzinfo) - _check_time_fields(hour, minute, second, microsecond) self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1109,8 +1125,8 @@ if base_compare: return _cmp((self._hour, self._minute, self._second, self._microsecond), - (other._hour, other._minute, other._second, - other._microsecond)) + (other._hour, other._minute, other._second, + other._microsecond)) if myoff is None or otoff is None: if allow_mixed: return 2 # arbitrary non-zero value @@ -1123,16 +1139,20 @@ def __hash__(self): """Hash.""" - tzoff = self.utcoffset() - if not tzoff: # zero or None - return hash(self._getstate()[0]) - h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, - timedelta(hours=1)) - assert not m % timedelta(minutes=1), "whole minute" - m //= timedelta(minutes=1) - if 0 <= h < 24: - return hash(time(h, m, self.second, self.microsecond)) - return hash((h, m, self.second, self.microsecond)) + if self._hashcode == -1: + tzoff = self.utcoffset() + if not tzoff: # zero or None + self._hashcode = hash(self._getstate()[0]) + else: + h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff, + timedelta(hours=1)) + assert not m % timedelta(minutes=1), "whole minute" + m //= timedelta(minutes=1) + if 0 <= h < 24: + self._hashcode = hash(time(h, m, self.second, self.microsecond)) + else: + self._hashcode = hash((h, m, self.second, self.microsecond)) + return self._hashcode # Conversion to string @@ -1195,6 +1215,8 @@ return _wrap_strftime(self, fmt, timetuple) def __format__(self, fmt): + if not isinstance(fmt, str): + raise TypeError("must be str, not %s" % type(fmt).__name__) if len(fmt) != 0: return self.strftime(fmt) return str(self) @@ -1251,8 +1273,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) # Pickle support. @@ -1268,15 +1288,11 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): - if len(string) != 6 or string[0] >= 24: - raise TypeError("an integer is required") - (self._hour, self._minute, self._second, - us1, us2, us3) = string + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") + self._hour, self._minute, self._second, us1, us2, us3 = string self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg %r" % tzinfo) + self._tzinfo = tzinfo def __reduce__(self): return (time, self._getstate()) @@ -1293,25 +1309,30 @@ The year, month and day arguments are required. tzinfo may be None, or an instance of a tzinfo subclass. The remaining arguments may be ints. """ + __slots__ = date.__slots__ + time.__slots__ - __slots__ = date.__slots__ + ( - '_hour', '_minute', '_second', - '_microsecond', '_tzinfo') def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): - if isinstance(year, bytes) and len(year) == 10: + if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2] <= 12: # Pickle support - self = date.__new__(cls, year[:4]) + self = object.__new__(cls) self.__setstate(year, month) + self._hashcode = -1 return self + year, month, day = _check_date_fields(year, month, day) + hour, minute, second, microsecond = _check_time_fields( + hour, minute, second, microsecond) _check_tzinfo_arg(tzinfo) - _check_time_fields(hour, minute, second, microsecond) - self = date.__new__(cls, year, month, day) + self = object.__new__(cls) + self._year = year + self._month = month + self._day = day self._hour = hour self._minute = minute self._second = second self._microsecond = microsecond self._tzinfo = tzinfo + self._hashcode = -1 return self # Read-only field accessors @@ -1346,7 +1367,6 @@ A timezone info object may be passed in as well. """ - _check_tzinfo_arg(tz) converter = _time.localtime if tz is None else _time.gmtime @@ -1385,11 +1405,6 @@ ss = min(ss, 59) # clamp out leap seconds if the platform has them return cls(y, m, d, hh, mm, ss, us) - # XXX This is supposed to do better than we *can* do by using time.time(), - # XXX if the platform supports a more accurate way. The C implementation - # XXX uses gettimeofday on platforms that have it, but that isn't - # XXX available from Python. So now() may return different results - # XXX across the implementations. @classmethod def now(cls, tz=None): "Construct a datetime from time.time() and optional time zone info." @@ -1476,11 +1491,8 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - _check_date_fields(year, month, day) - _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) - return datetime(year, month, day, hour, minute, second, - microsecond, tzinfo) + return datetime(year, month, day, hour, minute, second, microsecond, + tzinfo) def astimezone(self, tz=None): if tz is None: @@ -1550,10 +1562,9 @@ Optional argument sep specifies the separator between date and time, default 'T'. """ - s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, - sep) + - _format_time(self._hour, self._minute, self._second, - self._microsecond)) + s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + + _format_time(self._hour, self._minute, self._second, + self._microsecond)) off = self.utcoffset() if off is not None: if off.days < 0: @@ -1569,7 +1580,7 @@ def __repr__(self): """Convert to formal string, for repr().""" - L = [self._year, self._month, self._day, # These are never zero + L = [self._year, self._month, self._day, # These are never zero self._hour, self._minute, self._second, self._microsecond] if L[-1] == 0: del L[-1] @@ -1609,7 +1620,9 @@ it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ - name = _call_tzinfo_method(self._tzinfo, "tzname", self) + if self._tzinfo is None: + return None + name = self._tzinfo.tzname(self) _check_tzname(name) return name @@ -1695,9 +1708,9 @@ return _cmp((self._year, self._month, self._day, self._hour, self._minute, self._second, self._microsecond), - (other._year, other._month, other._day, - other._hour, other._minute, other._second, - other._microsecond)) + (other._year, other._month, other._day, + other._hour, other._minute, other._second, + other._microsecond)) if myoff is None or otoff is None: if allow_mixed: return 2 # arbitrary non-zero value @@ -1755,12 +1768,15 @@ return base + otoff - myoff def __hash__(self): - tzoff = self.utcoffset() - if tzoff is None: - return hash(self._getstate()[0]) - days = _ymd2ord(self.year, self.month, self.day) - seconds = self.hour * 3600 + self.minute * 60 + self.second - return hash(timedelta(days, seconds, self.microsecond) - tzoff) + if self._hashcode == -1: + tzoff = self.utcoffset() + if tzoff is None: + self._hashcode = hash(self._getstate()[0]) + else: + days = _ymd2ord(self.year, self.month, self.day) + seconds = self.hour * 3600 + self.minute * 60 + self.second + self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff) + return self._hashcode # Pickle support. @@ -1777,14 +1793,13 @@ return (basestate, self._tzinfo) def __setstate(self, string, tzinfo): + if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class): + raise TypeError("bad tzinfo state arg") (yhi, ylo, self._month, self._day, self._hour, self._minute, self._second, us1, us2, us3) = string self._year = yhi * 256 + ylo self._microsecond = (((us1 << 8) | us2) << 8) | us3 - if tzinfo is None or isinstance(tzinfo, _tzinfo_class): - self._tzinfo = tzinfo - else: - raise TypeError("bad tzinfo state arg %r" % tzinfo) + self._tzinfo = tzinfo def __reduce__(self): return (self.__class__, self._getstate()) @@ -1800,7 +1815,7 @@ # XXX This could be done more efficiently THURSDAY = 3 firstday = _ymd2ord(year, 1, 1) - firstweekday = (firstday + 6) % 7 # See weekday() above + firstweekday = (firstday + 6) % 7 # See weekday() above week1monday = firstday - firstweekday if firstweekday > THURSDAY: week1monday += 7 @@ -1821,13 +1836,12 @@ elif not isinstance(name, str): raise TypeError("name must be a string") if not cls._minoffset <= offset <= cls._maxoffset: - raise ValueError("offset must be a timedelta" - " strictly between -timedelta(hours=24) and" - " timedelta(hours=24).") - if (offset.microseconds != 0 or - offset.seconds % 60 != 0): - raise ValueError("offset must be a timedelta" - " representing a whole number of minutes") + raise ValueError("offset must be a timedelta " + "strictly between -timedelta(hours=24) and " + "timedelta(hours=24).") + if (offset.microseconds != 0 or offset.seconds % 60 != 0): + raise ValueError("offset must be a timedelta " + "representing a whole number of minutes") return cls._create(offset, name) @classmethod @@ -2124,14 +2138,13 @@ pass else: # Clean up unused names - del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, - _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES, - _build_struct_time, _call_tzinfo_method, _check_date_fields, - _check_time_fields, _check_tzinfo_arg, _check_tzname, - _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month, - _days_before_year, _days_in_month, _format_time, _is_leap, - _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class, - _wrap_strftime, _ymd2ord) + del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y, + _DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time, + _check_date_fields, _check_int_field, _check_time_fields, + _check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror, + _date_class, _days_before_month, _days_before_year, _days_in_month, + _format_time, _is_leap, _isoweek1monday, _math, _ord2ymd, + _time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord) # XXX Since import * above excludes names that start with _, # docstring does not get overwritten. In the future, it may be # appropriate to maintain a single module level docstring and diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py --- a/Lib/test/datetimetester.py +++ b/Lib/test/datetimetester.py @@ -50,6 +50,17 @@ self.assertEqual(datetime.MINYEAR, 1) self.assertEqual(datetime.MAXYEAR, 9999) + def test_name_cleanup(self): + if '_Fast' not in str(self): + return + datetime = datetime_module + names = set(name for name in dir(datetime) + if not name.startswith('__') and not name.endswith('__')) + allowed = set(['MAXYEAR', 'MINYEAR', 'date', 'datetime', + 'datetime_CAPI', 'time', 'timedelta', 'timezone', + 'tzinfo']) + self.assertEqual(names - allowed, set([])) + ############################################################################# # tzinfo tests @@ -616,8 +627,12 @@ # Single-field rounding. eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0 + eq(td(milliseconds=0.5/1000), td(microseconds=0)) + eq(td(milliseconds=-0.5/1000), td(microseconds=0)) eq(td(milliseconds=0.6/1000), td(microseconds=1)) eq(td(milliseconds=-0.6/1000), td(microseconds=-1)) + eq(td(seconds=0.5/10**6), td(microseconds=0)) + eq(td(seconds=-0.5/10**6), td(microseconds=0)) # Rounding due to contributions from more than one field. us_per_hour = 3600e6 @@ -1131,11 +1146,13 @@ #check that this standard extension works t.strftime("%f") - def test_format(self): dt = self.theclass(2007, 9, 10) self.assertEqual(dt.__format__(''), str(dt)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + dt.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -1391,9 +1408,10 @@ for month_byte in b'9', b'\0', b'\r', b'\xff': self.assertRaises(TypeError, self.theclass, base[:2] + month_byte + base[3:]) - # Good bytes, but bad tzinfo: - self.assertRaises(TypeError, self.theclass, - bytes([1] * len(base)), 'EST') + if issubclass(self.theclass, datetime): + # Good bytes, but bad tzinfo: + with self.assertRaisesRegex(TypeError, '^bad tzinfo state arg$'): + self.theclass(bytes([1] * len(base)), 'EST') for ord_byte in range(1, 13): # This shouldn't blow up because of the month byte alone. If @@ -1469,6 +1487,9 @@ dt = self.theclass(2007, 9, 10, 4, 5, 1, 123) self.assertEqual(dt.__format__(''), str(dt)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + dt.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -1789,6 +1810,7 @@ tzinfo=timezone(timedelta(hours=-5), 'EST')) self.assertEqual(t.timestamp(), 18000 + 3600 + 2*60 + 3 + 4*1e-6) + def test_microsecond_rounding(self): for fts in [self.theclass.fromtimestamp, self.theclass.utcfromtimestamp]: @@ -1839,6 +1861,7 @@ for insane in -1e200, 1e200: self.assertRaises(OverflowError, self.theclass.utcfromtimestamp, insane) + @unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps") def test_negative_float_fromtimestamp(self): # The result is tz-dependent; at least test that this doesn't @@ -2218,6 +2241,9 @@ t = self.theclass(1, 2, 3, 4) self.assertEqual(t.__format__(''), str(t)) + with self.assertRaisesRegex(TypeError, '^must be str, not int$'): + t.__format__(123) + # check that a derived class's __str__() gets called class A(self.theclass): def __str__(self): @@ -2347,6 +2373,9 @@ for hour_byte in ' ', '9', chr(24), '\xff': self.assertRaises(TypeError, self.theclass, hour_byte + base[1:]) + # Good bytes, but bad tzinfo: + with self.assertRaisesRegex(TypeError, '^bad tzinfo state arg$'): + self.theclass(bytes([1] * len(base)), 'EST') # A mixin for classes with a tzinfo= argument. Subclasses must define # theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever) @@ -2606,7 +2635,7 @@ self.assertRaises(TypeError, t.strftime, "%Z") # Issue #6697: - if '_Fast' in str(type(self)): + if '_Fast' in str(self): Badtzname.tz = '\ud800' self.assertRaises(ValueError, t.strftime, "%Z") @@ -3768,6 +3797,61 @@ self.assertEqual(as_datetime, datetime_sc) self.assertEqual(datetime_sc, as_datetime) + def test_extra_attributes(self): + for x in [date.today(), + time(), + datetime.utcnow(), + timedelta(), + tzinfo(), + timezone(timedelta())]: + with self.assertRaises(AttributeError): + x.abc = 1 + + def test_check_arg_types(self): + import decimal + class Number: + def __init__(self, value): + self.value = value + def __int__(self): + return self.value + + for xx in [decimal.Decimal(10), + decimal.Decimal('10.9'), + Number(10)]: + self.assertEqual(datetime(10, 10, 10, 10, 10, 10, 10), + datetime(xx, xx, xx, xx, xx, xx, xx)) + + with self.assertRaisesRegex(TypeError, '^an integer is required ' + '\(got type str\)$'): + datetime(10, 10, '10') + + f10 = Number(10.9) + with self.assertRaisesRegex(TypeError, '^__int__ returned non-int ' + '\(type float\)$'): + datetime(10, 10, f10) + + class Float(float): + pass + s10 = Float(10.9) + with self.assertRaisesRegex(TypeError, '^integer argument expected, ' + 'got float$'): + datetime(10, 10, s10) + + with self.assertRaises(TypeError): + datetime(10., 10, 10) + with self.assertRaises(TypeError): + datetime(10, 10., 10) + with self.assertRaises(TypeError): + datetime(10, 10, 10.) + with self.assertRaises(TypeError): + datetime(10, 10, 10, 10.) + with self.assertRaises(TypeError): + datetime(10, 10, 10, 10, 10.) + with self.assertRaises(TypeError): + datetime(10, 10, 10, 10, 10, 10.) + with self.assertRaises(TypeError): + datetime(10, 10, 10, 10, 10, 10, 10.) + def test_main(): support.run_unittest(__name__) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -689,6 +689,7 @@ Anton Kasyanov Lou Kates Hiroaki Kawai +Brian Kearns Sebastien Keim Ryan Kelly Dan Kenigsberg -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Mon Sep 29 08:49:55 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 29 Sep 2014 08:49:55 +0200 Subject: [Python-checkins] Daily reference leaks (5313b4c0bb6c): sum=3 Message-ID: results for 5313b4c0bb6c on branch "default" -------------------------------------------- test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogQjr_0j', '-x'] From python-checkins at python.org Mon Sep 29 16:24:22 2014 From: python-checkins at python.org (r.david.murray) Date: Mon, 29 Sep 2014 14:24:22 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2320135=3A_FAQ_entry_for_list_mutation=2E__=28?= =?utf-8?q?See_also_90b07d422bd9=2E=29?= Message-ID: <20140929142416.1233.14044@mail.hg.python.org> https://hg.python.org/cpython/rev/3d924bbfdcbc changeset: 92624:3d924bbfdcbc parent: 92622:5313b4c0bb6c parent: 92623:138f54622841 user: R David Murray date: Mon Sep 29 10:19:20 2014 -0400 summary: Merge: #20135: FAQ entry for list mutation. (See also 90b07d422bd9.) I accidentally merged this in 90b07d422bd9; this merge addresses the last review comments on the patch. files: Doc/faq/programming.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -473,7 +473,7 @@ After the call to :meth:`~list.append`, the content of the mutable object has changed from ``[]`` to ``[10]``. Since both the variables refer to the same -object, accessing either one of them accesses the modified value ``[10]``. +object, using either name accesses the modified value ``[10]``. If we instead assign an immutable object to ``x``:: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 16:24:22 2014 From: python-checkins at python.org (r.david.murray) Date: Mon, 29 Sep 2014 14:24:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzIwMTM1OiBGQVEg?= =?utf-8?q?entry_for_list_mutation=2E__=28See_also_6375bf34fff6=2E=29?= Message-ID: <20140929142416.100041.72707@mail.hg.python.org> https://hg.python.org/cpython/rev/138f54622841 changeset: 92623:138f54622841 branch: 3.4 parent: 92620:ab1570d0132d user: R David Murray date: Mon Sep 29 10:17:28 2014 -0400 summary: #20135: FAQ entry for list mutation. (See also 6375bf34fff6.) I accidentally committed this in 6375bf34fff6; this changeset addresses the last review comments on the patch. This is a perennial question and something someone opens a ticket for probably every other month or so, so I'm surprised we didn't already have a FAQ entry for it. The original patch was written by M. Votz, refined first by Ezio Melotti and further refined by me. files: Doc/faq/programming.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -473,7 +473,7 @@ After the call to :meth:`~list.append`, the content of the mutable object has changed from ``[]`` to ``[10]``. Since both the variables refer to the same -object, accessing either one of them accesses the modified value ``[10]``. +object, using either name accesses the modified value ``[10]``. If we instead assign an immutable object to ``x``:: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 16:24:22 2014 From: python-checkins at python.org (r.david.murray) Date: Mon, 29 Sep 2014 14:24:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogIzIwMTM1OiBGQVEg?= =?utf-8?q?entry_for_list_mutation=2E?= Message-ID: <20140929142416.1233.32896@mail.hg.python.org> https://hg.python.org/cpython/rev/2b9db1fce82e changeset: 92625:2b9db1fce82e branch: 2.7 parent: 92619:0ad19246d16d user: R David Murray date: Mon Sep 29 10:23:43 2014 -0400 summary: #20135: FAQ entry for list mutation. This is a perennial question and something someone opens a ticket for probably every other month or so, so I'm surprised we didn't already have a FAQ entry for it. The original patch was written by M. Votz, refined first by Ezio Melotti and further refined by me. files: Doc/faq/programming.rst | 77 ++++++++++++++++++++++++++++- 1 files changed, 76 insertions(+), 1 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -548,7 +548,7 @@ # Calculate the value result = ... expensive computation ... - _cache[(arg1, arg2)] = result # Store result in the cache + _cache[(arg1, arg2)] = result # Store result in the cache return result You could use a global variable containing a dictionary instead of the default @@ -604,6 +604,81 @@ the values ``42``, ``314``, and ``somevar`` are arguments. +Why did changing list 'y' also change list 'x'? +------------------------------------------------ + +If you wrote code like:: + + >>> x = [] + >>> y = x + >>> y.append(10) + >>> y + [10] + >>> x + [10] + +you might be wondering why appending an element to ``y`` changed ``x`` too. + +There are two factors that produce this result: + +1) Variables are simply names that refer to objects. Doing ``y = x`` doesn't + create a copy of the list -- it creates a new variable ``y`` that refers to + the same object ``x`` refers to. This means that there is only one object + (the list), and both ``x`` and ``y`` refer to it. +2) Lists are :term:`mutable`, which means that you can change their content. + +After the call to :meth:`~list.append`, the content of the mutable object has +changed from ``[]`` to ``[10]``. Since both the variables refer to the same +object, using either name accesses the modified value ``[10]``. + +If we instead assign an immutable object to ``x``:: + + >>> x = 5 # ints are immutable + >>> y = x + >>> x = x + 1 # 5 can't be mutated, we are creating a new object here + >>> x + 6 + >>> y + 5 + +we can see that in this case ``x`` and ``y`` are not equal anymore. This is +because integers are :term:`immutable`, and when we do ``x = x + 1`` we are not +mutating the int ``5`` by incrementing its value; instead, we are creating a +new object (the int ``6``) and assigning it to ``x`` (that is, changing which +object ``x`` refers to). After this assignment we have two objects (the ints +``6`` and ``5``) and two variables that refer to them (``x`` now refers to +``6`` but ``y`` still refers to ``5``). + +Some operations (for example ``y.append(10)`` and ``y.sort()``) mutate the +object, whereas superficially similar operations (for example ``y = y + [10]`` +and ``sorted(y)``) create a new object. In general in Python (and in all cases +in the standard library) a method that mutates an object will return ``None`` +to help avoid getting the two types of operations confused. So if you +mistakenly write ``y.sort()`` thinking it will give you a sorted copy of ``y``, +you'll instead end up with ``None``, which will likely cause your program to +generate an easily diagnosed error. + +However, there is one class of operations where the same operation sometimes +has different behaviors with different types: the augmented assignment +operators. For example, ``+=`` mutates lists but not tuples or ints (``a_list ++= [1, 2, 3]`` is equivalent to ``a_list.extend([1, 2, 3])`` and mutates +``a_list``, whereas ``some_tuple += (1, 2, 3)`` and ``some_int += 1`` create +new objects). + +In other words: + +* If we have a mutable object (:class:`list`, :class:`dict`, :class:`set`, + etc.), we can use some specific operations to mutate it and all the variables + that refer to it will see the change. +* If we have an immutable object (:class:`str`, :class:`int`, :class:`tuple`, + etc.), all the variables that refer to it will always see the same value, + but operations that transform that value into a new value always return a new + object. + +If you want to know if two variables refer to the same object or not, you can +use the :keyword:`is` operator, or the built-in function :func:`id`. + + How do I write a function with output parameters (call by reference)? --------------------------------------------------------------------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 17:15:02 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 29 Sep 2014 15:15:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322510=3A_Get_rid_?= =?utf-8?q?of_little_overhead_of_testing_re=2EDEBUG_flag=2E?= Message-ID: <20140929151421.100133.22525@mail.hg.python.org> https://hg.python.org/cpython/rev/565096a32ce4 changeset: 92626:565096a32ce4 parent: 92624:3d924bbfdcbc user: Serhiy Storchaka date: Mon Sep 29 18:13:02 2014 +0300 summary: Issue #22510: Get rid of little overhead of testing re.DEBUG flag. files: Lib/re.py | 12 +++++------- 1 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Lib/re.py b/Lib/re.py --- a/Lib/re.py +++ b/Lib/re.py @@ -273,12 +273,10 @@ _MAXCACHE = 512 def _compile(pattern, flags): # internal: compile pattern - bypass_cache = flags & DEBUG - if not bypass_cache: - try: - return _cache[type(pattern), pattern, flags] - except KeyError: - pass + try: + return _cache[type(pattern), pattern, flags] + except KeyError: + pass if isinstance(pattern, _pattern_type): if flags: raise ValueError( @@ -287,7 +285,7 @@ if not sre_compile.isstring(pattern): raise TypeError("first argument must be string or compiled pattern") p = sre_compile.compile(pattern, flags) - if not bypass_cache: + if not (flags & DEBUG): if len(_cache) >= _MAXCACHE: _cache.clear() _cache[type(pattern), pattern, flags] = p -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 17:26:06 2014 From: python-checkins at python.org (r.david.murray) Date: Mon, 29 Sep 2014 15:26:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_=2317442=3A_Add_chained_tr?= =?utf-8?q?aceback_support_to_InteractiveInterpreter=2E?= Message-ID: <20140929152603.1217.90406@mail.hg.python.org> https://hg.python.org/cpython/rev/2b212a8186e0 changeset: 92627:2b212a8186e0 user: R David Murray date: Mon Sep 29 11:25:00 2014 -0400 summary: #17442: Add chained traceback support to InteractiveInterpreter. Patch by Claudiu Popa. files: Doc/library/code.rst | 3 ++ Doc/whatsnew/3.5.rst | 7 ++++ Lib/code.py | 34 +++++++++++++++-------- Lib/test/test_code_module.py | 35 ++++++++++++++++++++++++ Misc/NEWS | 3 ++ 5 files changed, 70 insertions(+), 12 deletions(-) diff --git a/Doc/library/code.rst b/Doc/library/code.rst --- a/Doc/library/code.rst +++ b/Doc/library/code.rst @@ -114,6 +114,9 @@ because it is within the interpreter object implementation. The output is written by the :meth:`write` method. + .. versionchanged:: 3.5 The full chained traceback is displayed instead + of just the primary traceback. + .. method:: InteractiveInterpreter.write(data) diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -134,6 +134,13 @@ Improved Modules ================ +code +---- + +* The :func:`code.InteractiveInterpreter.showtraceback` method now prints + the full chained traceback, just like the interactive interpreter + (contributed by Claudiu.Popa in :issue:`17442`). + compileall ---------- diff --git a/Lib/code.py b/Lib/code.py --- a/Lib/code.py +++ b/Lib/code.py @@ -137,25 +137,35 @@ The output is written by self.write(), below. """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb try: - type, value, tb = sys.exc_info() - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - tblist = traceback.extract_tb(tb) - del tblist[:1] - lines = traceback.format_list(tblist) - if lines: - lines.insert(0, "Traceback (most recent call last):\n") - lines.extend(traceback.format_exception_only(type, value)) + lines = [] + for value, tb in traceback._iter_chain(*ei[1:]): + if isinstance(value, str): + lines.append(value) + lines.append('\n') + continue + if tb: + tblist = traceback.extract_tb(tb) + if tb is last_tb: + # The last traceback includes the frame we + # exec'd in + del tblist[:1] + tblines = traceback.format_list(tblist) + if tblines: + lines.append("Traceback (most recent call last):\n") + lines.extend(tblines) + lines.extend(traceback.format_exception_only(type(value), + value)) finally: - tblist = tb = None + tblist = last_tb = ei = None if sys.excepthook is sys.__excepthook__: self.write(''.join(lines)) else: # If someone has set sys.excepthook, we let that take precedence # over self.write - sys.excepthook(type, value, tb) + sys.excepthook(type, value, last_tb) def write(self, data): """Write a string. diff --git a/Lib/test/test_code_module.py b/Lib/test/test_code_module.py --- a/Lib/test/test_code_module.py +++ b/Lib/test/test_code_module.py @@ -1,6 +1,7 @@ "Test InteractiveConsole and InteractiveInterpreter from code module" import sys import unittest +from textwrap import dedent from contextlib import ExitStack from unittest import mock from test import support @@ -78,6 +79,40 @@ self.console.interact(banner='') self.assertEqual(len(self.stderr.method_calls), 1) + def test_cause_tb(self): + self.infunc.side_effect = ["raise ValueError('') from AttributeError", + EOFError('Finished')] + self.console.interact() + output = ''.join(''.join(call[1]) for call in self.stderr.method_calls) + expected = dedent(""" + AttributeError + + The above exception was the direct cause of the following exception: + + Traceback (most recent call last): + File "", line 1, in + ValueError + """) + self.assertIn(expected, output) + + def test_context_tb(self): + self.infunc.side_effect = ["try: ham\nexcept: eggs\n", + EOFError('Finished')] + self.console.interact() + output = ''.join(''.join(call[1]) for call in self.stderr.method_calls) + expected = dedent(""" + Traceback (most recent call last): + File "", line 1, in + NameError: name 'ham' is not defined + + During handling of the above exception, another exception occurred: + + Traceback (most recent call last): + File "", line 2, in + NameError: name 'eggs' is not defined + """) + self.assertIn(expected, output) + def test_main(): support.run_unittest(TestInteractiveConsole) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -145,6 +145,9 @@ Library ------- +- Issue #17442: InteractiveInterpreter now displays the full chained traceback + in its showtraceback method, to match the built in interactive interpreter. + - Issue #10510: distutils register and upload methods now use HTML standards compliant CRLF line endings. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Sep 29 21:50:59 2014 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 29 Sep 2014 19:50:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322437=3A_Number_o?= =?utf-8?q?f_capturing_groups_in_regular_expression_is_no_longer?= Message-ID: <20140929195047.100041.16171@mail.hg.python.org> https://hg.python.org/cpython/rev/0b85ea4bd1af changeset: 92628:0b85ea4bd1af user: Serhiy Storchaka date: Mon Sep 29 22:49:23 2014 +0300 summary: Issue #22437: Number of capturing groups in regular expression is no longer limited by 100. files: Doc/whatsnew/3.5.rst | 6 +++ Lib/sre_compile.py | 6 --- Lib/sre_constants.py | 2 +- Lib/sre_parse.py | 10 +++++ Lib/test/test_re.py | 18 ++++++++- Misc/NEWS | 3 + Modules/_sre.c | 57 ++++++++++++++++++++++--------- Modules/sre.h | 7 +-- 8 files changed, 79 insertions(+), 30 deletions(-) diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -217,6 +217,12 @@ * :class:`os.stat_result` now has a :attr:`~os.stat_result.st_file_attributes` attribute on Windows (contributed by Ben Hoyt in :issue:`21719`). +re +-- + +* Number of capturing groups in regular expression is no longer limited by 100. + (Contributed by Serhiy Storchaka in :issue:`22437`.) + shutil ------ diff --git a/Lib/sre_compile.py b/Lib/sre_compile.py --- a/Lib/sre_compile.py +++ b/Lib/sre_compile.py @@ -470,12 +470,6 @@ # print code - # XXX: get rid of this limitation! - if p.pattern.groups > 100: - raise AssertionError( - "sorry, but this version only supports 100 named groups" - ) - # map in either direction groupindex = p.pattern.groupdict indexgroup = [None] * p.pattern.groups diff --git a/Lib/sre_constants.py b/Lib/sre_constants.py --- a/Lib/sre_constants.py +++ b/Lib/sre_constants.py @@ -15,7 +15,7 @@ MAGIC = 20031017 -from _sre import MAXREPEAT +from _sre import MAXREPEAT, MAXGROUPS # SRE standard exception (access as sre.error) # should this really be here? diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py --- a/Lib/sre_parse.py +++ b/Lib/sre_parse.py @@ -72,6 +72,8 @@ def opengroup(self, name=None): gid = self.groups self.groups = gid + 1 + if self.groups > MAXGROUPS: + raise error("groups number is too large") if name is not None: ogid = self.groupdict.get(name, None) if ogid is not None: @@ -695,8 +697,14 @@ else: try: condgroup = int(condname) + if condgroup < 0: + raise ValueError except ValueError: raise error("bad character in group name") + if not condgroup: + raise error("bad group number") + if condgroup >= MAXGROUPS: + raise error("the group number is too large") else: # flags if not source.next in FLAGS: @@ -822,6 +830,8 @@ index = int(name) if index < 0: raise error("negative group number") + if index >= MAXGROUPS: + raise error("the group number is too large") except ValueError: if not name.isidentifier(): raise error("bad character in group name") diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -193,6 +193,7 @@ def test_symbolic_groups(self): re.compile('(?Px)(?P=a)(?(a)y)') re.compile('(?Px)(?P=a1)(?(a1)y)') + re.compile('(?Px)\1(?(1)y)') self.assertRaises(re.error, re.compile, '(?P)(?P)') self.assertRaises(re.error, re.compile, '(?Px)') self.assertRaises(re.error, re.compile, '(?P=)') @@ -212,6 +213,10 @@ re.compile('(?Px)(?P=?)(?(?)y)') re.compile('(?Px)(?P=???????)(?(???????)y)') self.assertRaises(re.error, re.compile, '(?Px)') + # Support > 100 groups. + pat = '|'.join('x(?P%x)y' % (i, i) for i in range(1, 200 + 1)) + pat = '(?:%s)(?(200)z|t)' % pat + self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5)) def test_symbolic_refs(self): self.assertRaises(re.error, re.sub, '(?Px)', '\gx)', r'\g', 'xx'), 'xx') self.assertEqual(re.sub('(?Px)', r'\g', 'xx'), 'xx') self.assertRaises(re.error, re.sub, '(?Px)', r'\g', 'xx') + # Support > 100 groups. + pat = '|'.join('x(?P%x)y' % (i, i) for i in range(1, 200 + 1)) + self.assertEqual(re.sub(pat, '\g<200>', 'xc8yzxc8y'), 'c8zc8') def test_re_subn(self): self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) @@ -404,6 +412,10 @@ self.assertIsNone(p.match('abd')) self.assertIsNone(p.match('ac')) + # Support > 100 groups. + pat = '|'.join('x(?P%x)y' % (i, i) for i in range(1, 200 + 1)) + pat = '(?:%s)(?(200)z)' % pat + self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5)) def test_re_groupref(self): self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(), @@ -1070,8 +1082,10 @@ # a RuntimeError is raised instead of OverflowError. long_overflow = 2**128 self.assertRaises(TypeError, re.finditer, "a", {}) - self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow]) - self.assertRaises(TypeError, _sre.compile, {}, 0, []) + with self.assertRaises(OverflowError): + _sre.compile("abc", 0, [long_overflow], 0, [], []) + with self.assertRaises(TypeError): + _sre.compile({}, 0, [], 0, [], []) def test_search_dot_unicode(self): self.assertTrue(re.search("123.*-", '123abc-')) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -145,6 +145,9 @@ Library ------- +- Issue #22437: Number of capturing groups in regular expression is no longer + limited by 100. + - Issue #17442: InteractiveInterpreter now displays the full chained traceback in its showtraceback method, to match the built in interactive interpreter. diff --git a/Modules/_sre.c b/Modules/_sre.c --- a/Modules/_sre.c +++ b/Modules/_sre.c @@ -357,6 +357,11 @@ memset(state, 0, sizeof(SRE_STATE)); + state->mark = PyMem_New(void *, pattern->groups * 2); + if (!state->mark) { + PyErr_NoMemory(); + goto err; + } state->lastmark = -1; state->lastindex = -1; @@ -409,6 +414,8 @@ return string; err: + PyMem_Del(state->mark); + state->mark = NULL; if (state->buffer.buf) PyBuffer_Release(&state->buffer); return NULL; @@ -421,6 +428,8 @@ PyBuffer_Release(&state->buffer); Py_XDECREF(state->string); data_stack_dealloc(state); + PyMem_Del(state->mark); + state->mark = NULL; } /* calculate offset from start of string */ @@ -560,6 +569,7 @@ PyObject *pattern = NULL; SRE_STATE state; Py_ssize_t status; + PyObject *match; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Onn$O:match", _keywords, @@ -579,12 +589,14 @@ status = sre_match(&state, PatternObject_GetCode(self), 0); TRACE(("|%p|%p|END\n", PatternObject_GetCode(self), state.ptr)); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { + state_fini(&state); return NULL; - + } + + match = pattern_new_match(self, &state, status); state_fini(&state); - - return (PyObject *)pattern_new_match(self, &state, status); + return match; } static PyObject* @@ -592,6 +604,7 @@ { SRE_STATE state; Py_ssize_t status; + PyObject *match; PyObject *string = NULL, *string2 = NULL; Py_ssize_t start = 0; @@ -616,12 +629,14 @@ status = sre_match(&state, PatternObject_GetCode(self), 1); TRACE(("|%p|%p|END\n", PatternObject_GetCode(self), state.ptr)); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { + state_fini(&state); return NULL; - + } + + match = pattern_new_match(self, &state, status); state_fini(&state); - - return pattern_new_match(self, &state, status); + return match; } static PyObject* @@ -629,6 +644,7 @@ { SRE_STATE state; Py_ssize_t status; + PyObject *match; PyObject *string = NULL, *string2 = NULL; Py_ssize_t start = 0; @@ -652,12 +668,14 @@ TRACE(("|%p|%p|END\n", PatternObject_GetCode(self), state.ptr)); + if (PyErr_Occurred()) { + state_fini(&state); + return NULL; + } + + match = pattern_new_match(self, &state, status); state_fini(&state); - - if (PyErr_Occurred()) - return NULL; - - return pattern_new_match(self, &state, status); + return match; } static PyObject* @@ -1417,7 +1435,7 @@ PyObject* groupindex = NULL; PyObject* indexgroup = NULL; - if (!PyArg_ParseTuple(args, "OiO!|nOO", &pattern, &flags, + if (!PyArg_ParseTuple(args, "OiO!nOO", &pattern, &flags, &PyList_Type, &code, &groups, &groupindex, &indexgroup)) return NULL; @@ -1933,10 +1951,9 @@ static int _validate_outer(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups) { - if (groups < 0 || groups > 100 || code >= end || end[-1] != SRE_OP_SUCCESS) + if (groups < 0 || (size_t)groups > SRE_MAXGROUPS || + code >= end || end[-1] != SRE_OP_SUCCESS) FAIL; - if (groups == 0) /* fix for simplejson */ - groups = 100; /* 100 groups should always be safe */ return _validate_inner(code, end-1, groups); } @@ -2747,6 +2764,12 @@ Py_DECREF(x); } + x = PyLong_FromUnsignedLong(SRE_MAXGROUPS); + if (x) { + PyDict_SetItemString(d, "MAXGROUPS", x); + Py_DECREF(x); + } + x = PyUnicode_FromString(copyright); if (x) { PyDict_SetItemString(d, "copyright", x); diff --git a/Modules/sre.h b/Modules/sre.h --- a/Modules/sre.h +++ b/Modules/sre.h @@ -18,8 +18,10 @@ #define SRE_CODE Py_UCS4 #if SIZEOF_SIZE_T > 4 # define SRE_MAXREPEAT (~(SRE_CODE)0) +# define SRE_MAXGROUPS ((~(SRE_CODE)0) / 2) #else # define SRE_MAXREPEAT ((SRE_CODE)PY_SSIZE_T_MAX) +# define SRE_MAXGROUPS ((SRE_CODE)PY_SSIZE_T_MAX / SIZEOF_SIZE_T / 2) #endif typedef struct { @@ -52,9 +54,6 @@ typedef unsigned int (*SRE_TOLOWER_HOOK)(unsigned int ch); -/* FIXME: shouldn't be a constant, really... */ -#define SRE_MARK_SIZE 200 - typedef struct SRE_REPEAT_T { Py_ssize_t count; SRE_CODE* pattern; /* points to REPEAT operator arguments */ @@ -76,7 +75,7 @@ /* registers */ Py_ssize_t lastindex; Py_ssize_t lastmark; - void* mark[SRE_MARK_SIZE]; + void** mark; /* dynamically allocated stuff */ char* data_stack; size_t data_stack_size; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:09 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_NEWS_issue_for?= =?utf-8?q?_=2322518?= Message-ID: <20140929225509.75007.99857@mail.hg.python.org> https://hg.python.org/cpython/rev/88332ea4c140 changeset: 92632:88332ea4c140 branch: 3.3 user: Benjamin Peterson date: Mon Sep 29 18:42:35 2014 -0400 summary: NEWS issue for #22518 files: Misc/NEWS | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22518: Fix integer overflow issues in latin-1 encoding. + Library ------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:09 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_add_NEWS_note_?= =?utf-8?q?for_=2322518?= Message-ID: <20140929225509.23199.51699@mail.hg.python.org> https://hg.python.org/cpython/rev/3b7e93249700 changeset: 92630:3b7e93249700 branch: 2.7 user: Benjamin Peterson date: Mon Sep 29 18:41:48 2014 -0400 summary: add NEWS note for #22518 files: Misc/NEWS | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22518: Fix integer overflow issues in latin-1 encoding. + - Issue #22379: Fix empty exception message in a TypeError raised in ``str.join``. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:09 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_cleanup_overfl?= =?utf-8?q?owing_handling_in_unicode=5Fdecode=5Fcall=5Ferrorhandler_and?= Message-ID: <20140929225509.86187.20019@mail.hg.python.org> https://hg.python.org/cpython/rev/3c67d19c624f changeset: 92631:3c67d19c624f branch: 3.3 parent: 92453:a4e0aee1a9b5 user: Benjamin Peterson date: Mon Sep 29 18:18:57 2014 -0400 summary: cleanup overflowing handling in unicode_decode_call_errorhandler and unicode_encode_ucs1 (closes #22518) files: Objects/unicodeobject.c | 76 +++++++++++++++++++++------- 1 files changed, 57 insertions(+), 19 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4168,9 +4168,15 @@ at the new input position), so we won't have to check space when there are no errors in the rest of the string) */ Py_ssize_t replen = PyUnicode_GET_LENGTH(repunicode); - requiredsize = *outpos + replen + insize-newpos; + requiredsize = *outpos; + if (requiredsize > PY_SSIZE_T_MAX - replen) + goto overflow; + requiredsize += replen; + if (requiredsize > PY_SSIZE_T_MAX - (insize - newpos)) + goto overflow; + requiredsize += insize - newpos; if (requiredsize > outsize) { - if (requiredsize<2*outsize) + if (outsize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*outsize) requiredsize = 2*outsize; if (unicode_resize(output, requiredsize) < 0) goto onError; @@ -4191,9 +4197,15 @@ have+the replacement+the rest of the string (starting at the new input position), so we won't have to check space when there are no errors in the rest of the string) */ - requiredsize = *outpos + repwlen + insize-newpos; + requiredsize = *outpos; + if (requiredsize > PY_SSIZE_T_MAX - repwlen) + goto overflow; + requiredsize += repwlen; + if (requiredsize > PY_SSIZE_T_MAX - (insize - newpos)) + goto overflow; + requiredsize += insize - newpos; if (requiredsize > outsize) { - if (requiredsize < 2*outsize) + if (outsize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*outsize) requiredsize = 2*outsize; if (unicode_resize(output, requiredsize) < 0) goto onError; @@ -4210,6 +4222,11 @@ onError: Py_XDECREF(restuple); return res; + + overflow: + PyErr_SetString(PyExc_OverflowError, + "decoded result is too long for a Python string"); + goto onError; } /* --- UTF-7 Codec -------------------------------------------------------- */ @@ -6358,7 +6375,7 @@ Py_ssize_t collstart = pos; Py_ssize_t collend = pos; /* find all unecodable characters */ - while ((collend < size) && (PyUnicode_READ(kind, data, collend)>=limit)) + while ((collend < size) && (PyUnicode_READ(kind, data, collend) >= limit)) ++collend; /* cache callback name lookup (if not done yet, i.e. it's the first error) */ if (known_errorHandler==-1) { @@ -6378,36 +6395,43 @@ raise_encode_exception(&exc, encoding, unicode, collstart, collend, reason); goto onError; case 2: /* replace */ - while (collstart++ PY_SSIZE_T_MAX - incr) + goto overflow; + requiredsize += incr; + } + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) goto onError; @@ -6433,6 +6457,10 @@ if (repsize > 1) { /* Make room for all additional bytes. */ respos = str - PyBytes_AS_STRING(res); + if (ressize > PY_SSIZE_T_MAX - repsize - 1) { + Py_DECREF(repunicode); + goto overflow; + } if (_PyBytes_Resize(&res, ressize+repsize-1)) { Py_DECREF(repunicode); goto onError; @@ -6451,9 +6479,15 @@ we won't have to check space for encodable characters) */ respos = str - PyBytes_AS_STRING(res); repsize = PyUnicode_GET_LENGTH(repunicode); - requiredsize = respos+repsize+(size-collend); + requiredsize = respos; + if (requiredsize > PY_SSIZE_T_MAX - repsize) + goto overflow; + requiredsize += repsize; + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) { Py_DECREF(repunicode); @@ -6491,6 +6525,10 @@ Py_XDECREF(exc); return res; + overflow: + PyErr_SetString(PyExc_OverflowError, + "encoded result is too long for a Python string"); + onError: Py_XDECREF(res); Py_XDECREF(errorHandler); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:09 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3_=28closes_=2322518=29?= Message-ID: <20140929225509.23207.50599@mail.hg.python.org> https://hg.python.org/cpython/rev/7dab27fffff2 changeset: 92633:7dab27fffff2 branch: 3.4 parent: 92623:138f54622841 parent: 92632:88332ea4c140 user: Benjamin Peterson date: Mon Sep 29 18:50:06 2014 -0400 summary: merge 3.3 (closes #22518) files: Misc/NEWS | 2 + Objects/unicodeobject.c | 64 +++++++++++++++++++++------- 2 files changed, 49 insertions(+), 17 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,8 @@ Core and Builtins ----------------- +- Issue #22518: Fix integer overflow issues in latin-1 encoding. + Library ------- diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4093,16 +4093,21 @@ have+the replacement+the rest of the string (starting at the new input position), so we won't have to check space when there are no errors in the rest of the string) */ - requiredsize = *outpos + repwlen + insize-newpos; + requiredsize = *outpos; + if (requiredsize > PY_SSIZE_T_MAX - repwlen) + goto overflow; + requiredsize += repwlen; + if (requiredsize > PY_SSIZE_T_MAX - (insize - newpos)) + goto overflow; + requiredsize += insize - newpos; if (requiredsize > outsize) { - if (requiredsize < 2*outsize) + if (outsize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*outsize) requiredsize = 2*outsize; if (unicode_resize(output, requiredsize) < 0) goto onError; } wcsncpy(_PyUnicode_WSTR(*output) + *outpos, repwstr, repwlen); *outpos += repwlen; - *endinpos = newpos; *inptr = *input + newpos; @@ -4110,6 +4115,10 @@ Py_XDECREF(restuple); return 0; + overflow: + PyErr_SetString(PyExc_OverflowError, + "decoded result is too long for a Python string"); + onError: Py_XDECREF(restuple); return -1; @@ -6502,7 +6511,7 @@ Py_ssize_t collstart = pos; Py_ssize_t collend = pos; /* find all unecodable characters */ - while ((collend < size) && (PyUnicode_READ(kind, data, collend)>=limit)) + while ((collend < size) && (PyUnicode_READ(kind, data, collend) >= limit)) ++collend; /* cache callback name lookup (if not done yet, i.e. it's the first error) */ if (known_errorHandler==-1) { @@ -6522,36 +6531,43 @@ raise_encode_exception(&exc, encoding, unicode, collstart, collend, reason); goto onError; case 2: /* replace */ - while (collstart++ PY_SSIZE_T_MAX - incr) + goto overflow; + requiredsize += incr; } - requiredsize = respos+repsize+(size-collend); + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) goto onError; @@ -6577,6 +6593,10 @@ if (repsize > 1) { /* Make room for all additional bytes. */ respos = str - PyBytes_AS_STRING(res); + if (ressize > PY_SSIZE_T_MAX - repsize - 1) { + Py_DECREF(repunicode); + goto overflow; + } if (_PyBytes_Resize(&res, ressize+repsize-1)) { Py_DECREF(repunicode); goto onError; @@ -6595,9 +6615,15 @@ we won't have to check space for encodable characters) */ respos = str - PyBytes_AS_STRING(res); repsize = PyUnicode_GET_LENGTH(repunicode); - requiredsize = respos+repsize+(size-collend); + requiredsize = respos; + if (requiredsize > PY_SSIZE_T_MAX - repsize) + goto overflow; + requiredsize += repsize; + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) { Py_DECREF(repunicode); @@ -6635,6 +6661,10 @@ Py_XDECREF(exc); return res; + overflow: + PyErr_SetString(PyExc_OverflowError, + "encoded result is too long for a Python string"); + onError: Py_XDECREF(res); Py_XDECREF(errorHandler); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:10 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_cleanup_overfl?= =?utf-8?q?owing_handling_in_unicode=5Fdecode=5Fcall=5Ferrorhandler_and?= Message-ID: <20140929225508.23197.60418@mail.hg.python.org> https://hg.python.org/cpython/rev/b2e68274aa8e changeset: 92629:b2e68274aa8e branch: 2.7 parent: 92619:0ad19246d16d user: Benjamin Peterson date: Mon Sep 29 18:18:57 2014 -0400 summary: cleanup overflowing handling in unicode_decode_call_errorhandler and unicode_encode_ucs1 (closes #22518) files: Objects/unicodeobject.c | 69 ++++++++++++++++++++-------- 1 files changed, 48 insertions(+), 21 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -1510,9 +1510,15 @@ when there are no errors in the rest of the string) */ repptr = PyUnicode_AS_UNICODE(repunicode); repsize = PyUnicode_GET_SIZE(repunicode); - requiredsize = *outpos + repsize + insize-newpos; + requiredsize = *outpos; + if (requiredsize > PY_SSIZE_T_MAX - repsize) + goto overflow; + requiredsize += repsize; + if (requiredsize > PY_SSIZE_T_MAX - (insize - newpos)) + goto overflow; + requiredsize += insize - newpos; if (requiredsize > outsize) { - if (requiredsize<2*outsize) + if (outsize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*outsize) requiredsize = 2*outsize; if (_PyUnicode_Resize(output, requiredsize) < 0) goto onError; @@ -1529,6 +1535,11 @@ onError: Py_XDECREF(restuple); return res; + + overflow: + PyErr_SetString(PyExc_OverflowError, + "decoded result is too long for a Python string"); + goto onError; } /* --- UTF-7 Codec -------------------------------------------------------- */ @@ -3646,7 +3657,7 @@ const Py_UNICODE *collstart = p; const Py_UNICODE *collend = p; /* find all unecodable characters */ - while ((collend < endp) && ((*collend)>=limit)) + while ((collend < endp) && ((*collend) >= limit)) ++collend; /* cache callback name lookup (if not done yet, i.e. it's the first error) */ if (known_errorHandler==-1) { @@ -3666,34 +3677,41 @@ raise_encode_exception(&exc, encoding, startp, size, collstart-startp, collend-startp, reason); goto onError; case 2: /* replace */ - while (collstart++ PY_SSIZE_T_MAX - incr) + goto overflow; + requiredsize += incr; } - requiredsize = respos+repsize+(endp-collend); + if (requiredsize > PY_SSIZE_T_MAX - (endp - collend)) + goto overflow; + requiredsize += endp - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyString_Resize(&res, requiredsize)) goto onError; @@ -3716,11 +3734,16 @@ /* need more space? (at least enough for what we have+the replacement+the rest of the string, so we won't have to check space for encodable characters) */ - respos = str-PyString_AS_STRING(res); + respos = str - PyString_AS_STRING(res); repsize = PyUnicode_GET_SIZE(repunicode); - requiredsize = respos+repsize+(endp-collend); + if (respos > PY_SSIZE_T_MAX - repsize) + goto overflow; + requiredsize = respos + repsize; + if (requiredsize > PY_SSIZE_T_MAX - (endp - collend)) + goto overflow; + requiredsize += endp - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyString_Resize(&res, requiredsize)) { Py_DECREF(repunicode); @@ -3731,7 +3754,7 @@ } /* check if there is anything unencodable in the replacement and copy it to the output */ - for (uni2 = PyUnicode_AS_UNICODE(repunicode);repsize-->0; ++uni2, ++str) { + for (uni2 = PyUnicode_AS_UNICODE(repunicode); repsize-->0; ++uni2, ++str) { c = *uni2; if (c >= limit) { raise_encode_exception(&exc, encoding, startp, size, @@ -3747,14 +3770,18 @@ } } /* Resize if we allocated to much */ - respos = str-PyString_AS_STRING(res); - if (respos https://hg.python.org/cpython/rev/c55a75d4bcc7 changeset: 92635:c55a75d4bcc7 branch: 2.7 parent: 92630:3b7e93249700 parent: 92625:2b9db1fce82e user: Benjamin Peterson date: Mon Sep 29 18:55:02 2014 -0400 summary: merge heads files: Doc/faq/programming.rst | 77 ++++++++++++++++++++++++++++- 1 files changed, 76 insertions(+), 1 deletions(-) diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst --- a/Doc/faq/programming.rst +++ b/Doc/faq/programming.rst @@ -548,7 +548,7 @@ # Calculate the value result = ... expensive computation ... - _cache[(arg1, arg2)] = result # Store result in the cache + _cache[(arg1, arg2)] = result # Store result in the cache return result You could use a global variable containing a dictionary instead of the default @@ -604,6 +604,81 @@ the values ``42``, ``314``, and ``somevar`` are arguments. +Why did changing list 'y' also change list 'x'? +------------------------------------------------ + +If you wrote code like:: + + >>> x = [] + >>> y = x + >>> y.append(10) + >>> y + [10] + >>> x + [10] + +you might be wondering why appending an element to ``y`` changed ``x`` too. + +There are two factors that produce this result: + +1) Variables are simply names that refer to objects. Doing ``y = x`` doesn't + create a copy of the list -- it creates a new variable ``y`` that refers to + the same object ``x`` refers to. This means that there is only one object + (the list), and both ``x`` and ``y`` refer to it. +2) Lists are :term:`mutable`, which means that you can change their content. + +After the call to :meth:`~list.append`, the content of the mutable object has +changed from ``[]`` to ``[10]``. Since both the variables refer to the same +object, using either name accesses the modified value ``[10]``. + +If we instead assign an immutable object to ``x``:: + + >>> x = 5 # ints are immutable + >>> y = x + >>> x = x + 1 # 5 can't be mutated, we are creating a new object here + >>> x + 6 + >>> y + 5 + +we can see that in this case ``x`` and ``y`` are not equal anymore. This is +because integers are :term:`immutable`, and when we do ``x = x + 1`` we are not +mutating the int ``5`` by incrementing its value; instead, we are creating a +new object (the int ``6``) and assigning it to ``x`` (that is, changing which +object ``x`` refers to). After this assignment we have two objects (the ints +``6`` and ``5``) and two variables that refer to them (``x`` now refers to +``6`` but ``y`` still refers to ``5``). + +Some operations (for example ``y.append(10)`` and ``y.sort()``) mutate the +object, whereas superficially similar operations (for example ``y = y + [10]`` +and ``sorted(y)``) create a new object. In general in Python (and in all cases +in the standard library) a method that mutates an object will return ``None`` +to help avoid getting the two types of operations confused. So if you +mistakenly write ``y.sort()`` thinking it will give you a sorted copy of ``y``, +you'll instead end up with ``None``, which will likely cause your program to +generate an easily diagnosed error. + +However, there is one class of operations where the same operation sometimes +has different behaviors with different types: the augmented assignment +operators. For example, ``+=`` mutates lists but not tuples or ints (``a_list ++= [1, 2, 3]`` is equivalent to ``a_list.extend([1, 2, 3])`` and mutates +``a_list``, whereas ``some_tuple += (1, 2, 3)`` and ``some_int += 1`` create +new objects). + +In other words: + +* If we have a mutable object (:class:`list`, :class:`dict`, :class:`set`, + etc.), we can use some specific operations to mutate it and all the variables + that refer to it will see the change. +* If we have an immutable object (:class:`str`, :class:`int`, :class:`tuple`, + etc.), all the variables that refer to it will always see the same value, + but operations that transform that value into a new value always return a new + object. + +If you want to know if two variables refer to the same object or not, you can +use the :keyword:`is` operator, or the built-in function :func:`id`. + + How do I write a function with output parameters (call by reference)? --------------------------------------------------------------------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 00:55:15 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 22:55:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_3=2E4_=28closes_=2322518=29?= Message-ID: <20140929225510.23199.67514@mail.hg.python.org> https://hg.python.org/cpython/rev/f86fde20e9ce changeset: 92634:f86fde20e9ce parent: 92628:0b85ea4bd1af parent: 92633:7dab27fffff2 user: Benjamin Peterson date: Mon Sep 29 18:53:58 2014 -0400 summary: merge 3.4 (closes #22518) files: Misc/NEWS | 62 ++++++++++++++++++++++++++++ Objects/unicodeobject.c | 64 +++++++++++++++++++++------- 2 files changed, 109 insertions(+), 17 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22518: Fix integer overflow issues in latin-1 encoding. + - Issue #16324: _charset parameter of MIMEText now also accepts email.charset.Charset instances. Initial patch by Claude Paroz. @@ -27,6 +29,66 @@ argument contains not permitted null character or byte. - Issue #22258: Fix the internal function set_inheritable() on Illumos. + +Library +------- + +- Issue #22448: Improve canceled timer handles cleanup to prevent + unbound memory usage. Patch by Joshua Moore-Oliva. + +Build +----- + +- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by + Jonathan Hosmer. + + +What's New in Python 3.4.2? +=========================== + +Release date: 2014-10-06 + +Core and Builtins +----------------- + +Library +------- + +- Issue #10510: distutils register and upload methods now use HTML standards + compliant CRLF line endings. + +- Issue #9850: Fixed macpath.join() for empty first component. Patch by + Oleg Oshmyan. + +- Issue #22427: TemporaryDirectory no longer attempts to clean up twice when + used in the with statement in generator. + +- Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS + directory attributes. + +- Issue #21866: ZipFile.close() no longer writes ZIP64 central directory + records if allowZip64 is false. + +- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re + module. Removed trailing spaces in debugging output. + +- Issue #22423: Unhandled exception in thread no longer causes unhandled + AttributeError when sys.stderr is None. + +- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects + line buffering, rather than block buffering. Patch by Akira Li. + + +What's New in Python 3.4.2rc1? +============================== + +Release date: 2014-09-22 + +Core and Builtins +----------------- + +- Issue #22258: Fix the the internal function set_inheritable() on Illumos. +>>>>>>> other This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() now falls back to the slower ``fcntl()`` (``F_GETFD`` and then ``F_SETFD``). diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4093,16 +4093,21 @@ have+the replacement+the rest of the string (starting at the new input position), so we won't have to check space when there are no errors in the rest of the string) */ - requiredsize = *outpos + repwlen + insize-newpos; + requiredsize = *outpos; + if (requiredsize > PY_SSIZE_T_MAX - repwlen) + goto overflow; + requiredsize += repwlen; + if (requiredsize > PY_SSIZE_T_MAX - (insize - newpos)) + goto overflow; + requiredsize += insize - newpos; if (requiredsize > outsize) { - if (requiredsize < 2*outsize) + if (outsize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*outsize) requiredsize = 2*outsize; if (unicode_resize(output, requiredsize) < 0) goto onError; } wcsncpy(_PyUnicode_WSTR(*output) + *outpos, repwstr, repwlen); *outpos += repwlen; - *endinpos = newpos; *inptr = *input + newpos; @@ -4110,6 +4115,10 @@ Py_XDECREF(restuple); return 0; + overflow: + PyErr_SetString(PyExc_OverflowError, + "decoded result is too long for a Python string"); + onError: Py_XDECREF(restuple); return -1; @@ -6502,7 +6511,7 @@ Py_ssize_t collstart = pos; Py_ssize_t collend = pos; /* find all unecodable characters */ - while ((collend < size) && (PyUnicode_READ(kind, data, collend)>=limit)) + while ((collend < size) && (PyUnicode_READ(kind, data, collend) >= limit)) ++collend; /* cache callback name lookup (if not done yet, i.e. it's the first error) */ if (known_errorHandler==-1) { @@ -6522,36 +6531,43 @@ raise_encode_exception(&exc, encoding, unicode, collstart, collend, reason); goto onError; case 2: /* replace */ - while (collstart++ PY_SSIZE_T_MAX - incr) + goto overflow; + requiredsize += incr; } - requiredsize = respos+repsize+(size-collend); + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) goto onError; @@ -6577,6 +6593,10 @@ if (repsize > 1) { /* Make room for all additional bytes. */ respos = str - PyBytes_AS_STRING(res); + if (ressize > PY_SSIZE_T_MAX - repsize - 1) { + Py_DECREF(repunicode); + goto overflow; + } if (_PyBytes_Resize(&res, ressize+repsize-1)) { Py_DECREF(repunicode); goto onError; @@ -6595,9 +6615,15 @@ we won't have to check space for encodable characters) */ respos = str - PyBytes_AS_STRING(res); repsize = PyUnicode_GET_LENGTH(repunicode); - requiredsize = respos+repsize+(size-collend); + requiredsize = respos; + if (requiredsize > PY_SSIZE_T_MAX - repsize) + goto overflow; + requiredsize += repsize; + if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) + goto overflow; + requiredsize += size - collend; if (requiredsize > ressize) { - if (requiredsize<2*ressize) + if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) requiredsize = 2*ressize; if (_PyBytes_Resize(&res, requiredsize)) { Py_DECREF(repunicode); @@ -6635,6 +6661,10 @@ Py_XDECREF(exc); return res; + overflow: + PyErr_SetString(PyExc_OverflowError, + "encoded result is too long for a Python string"); + onError: Py_XDECREF(res); Py_XDECREF(errorHandler); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:11:12 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:11:12 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_fix_overflow_c?= =?utf-8?q?hecking_in_PyString=5FRepr_=28closes_=2322519=29?= Message-ID: <20140929231112.75003.44720@mail.hg.python.org> https://hg.python.org/cpython/rev/d9cd11eda152 changeset: 92636:d9cd11eda152 branch: 2.7 user: Benjamin Peterson date: Mon Sep 29 19:01:18 2014 -0400 summary: fix overflow checking in PyString_Repr (closes #22519) files: Misc/NEWS | 2 ++ Objects/stringobject.c | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22519: Fix overflow checking in PyString_Repr. + - Issue #22518: Fix integer overflow issues in latin-1 encoding. - Issue #22379: Fix empty exception message in a TypeError raised in diff --git a/Objects/stringobject.c b/Objects/stringobject.c --- a/Objects/stringobject.c +++ b/Objects/stringobject.c @@ -926,13 +926,14 @@ PyString_Repr(PyObject *obj, int smartquotes) { register PyStringObject* op = (PyStringObject*) obj; - size_t newsize = 2 + 4 * Py_SIZE(op); + size_t newsize; PyObject *v; - if (newsize > PY_SSIZE_T_MAX || newsize / 4 != Py_SIZE(op)) { + if (Py_SIZE(op) > (PY_SSIZE_T_MAX - 2)/4) { PyErr_SetString(PyExc_OverflowError, "string is too large to make repr"); return NULL; } + newsize = 2 + 4*Py_SIZE(op); v = PyString_FromStringAndSize((char *)NULL, newsize); if (v == NULL) { return NULL; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:11:12 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:11:12 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_fix_overflow_c?= =?utf-8?q?hecking_in_PyBytes=5FRepr_=28closes_=2322519=29?= Message-ID: <20140929231112.86181.2790@mail.hg.python.org> https://hg.python.org/cpython/rev/f5c662a7f7e6 changeset: 92637:f5c662a7f7e6 branch: 3.3 parent: 92632:88332ea4c140 user: Benjamin Peterson date: Mon Sep 29 19:01:18 2014 -0400 summary: fix overflow checking in PyBytes_Repr (closes #22519) files: Misc/NEWS | 2 ++ Objects/bytesobject.c | 28 ++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22519: Fix overflow checking in PyBytes_Repr. + - Issue #22518: Fix integer overflow issues in latin-1 encoding. Library diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -593,28 +593,27 @@ newsize = 3; /* b'' */ s = (unsigned char*)op->ob_sval; for (i = 0; i < length; i++) { + Py_ssize_t incr = 1; switch(s[i]) { - case '\'': squotes++; newsize++; break; - case '"': dquotes++; newsize++; break; + case '\'': squotes++; break; + case '"': dquotes++; break; case '\\': case '\t': case '\n': case '\r': - newsize += 2; break; /* \C */ + incr = 2; break; /* \C */ default: if (s[i] < ' ' || s[i] >= 0x7f) - newsize += 4; /* \xHH */ - else - newsize++; + incr = 4; /* \xHH */ } + if (newsize > PY_SSIZE_T_MAX - incr) + goto overflow; + newsize += incr; } quote = '\''; if (smartquotes && squotes && !dquotes) quote = '"'; - if (squotes && quote == '\'') + if (squotes && quote == '\'') { + if (newsize > PY_SSIZE_T_MAX - squotes) + goto overflow; newsize += squotes; - - if (newsize > (PY_SSIZE_T_MAX - sizeof(PyUnicodeObject) - 1)) { - PyErr_SetString(PyExc_OverflowError, - "bytes object is too large to make repr"); - return NULL; } v = PyUnicode_New(newsize, 127); @@ -646,6 +645,11 @@ *p++ = quote; assert(_PyUnicode_CheckConsistency(v, 1)); return v; + + overflow: + PyErr_SetString(PyExc_OverflowError, + "bytes object is too large to make repr"); + return NULL; } static PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:11:14 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:11:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI1MTkp?= Message-ID: <20140929231113.23189.43114@mail.hg.python.org> https://hg.python.org/cpython/rev/0ddc5fc5f395 changeset: 92639:0ddc5fc5f395 parent: 92634:f86fde20e9ce parent: 92638:ed31cdf11ac2 user: Benjamin Peterson date: Mon Sep 29 19:11:05 2014 -0400 summary: merge 3.4 (#22519) files: Misc/NEWS | 2 ++ Objects/bytesobject.c | 28 ++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #22519: Fix overflow checking in PyBytes_Repr. + - Issue #22518: Fix integer overflow issues in latin-1 encoding. - Issue #16324: _charset parameter of MIMEText now also accepts diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -629,28 +629,27 @@ newsize = 3; /* b'' */ s = (unsigned char*)op->ob_sval; for (i = 0; i < length; i++) { + Py_ssize_t incr = 1; switch(s[i]) { - case '\'': squotes++; newsize++; break; - case '"': dquotes++; newsize++; break; + case '\'': squotes++; break; + case '"': dquotes++; break; case '\\': case '\t': case '\n': case '\r': - newsize += 2; break; /* \C */ + incr = 2; break; /* \C */ default: if (s[i] < ' ' || s[i] >= 0x7f) - newsize += 4; /* \xHH */ - else - newsize++; + incr = 4; /* \xHH */ } + if (newsize > PY_SSIZE_T_MAX - incr) + goto overflow; + newsize += incr; } quote = '\''; if (smartquotes && squotes && !dquotes) quote = '"'; - if (squotes && quote == '\'') + if (squotes && quote == '\'') { + if (newsize > PY_SSIZE_T_MAX - squotes) + goto overflow; newsize += squotes; - - if (newsize > (PY_SSIZE_T_MAX - sizeof(PyUnicodeObject) - 1)) { - PyErr_SetString(PyExc_OverflowError, - "bytes object is too large to make repr"); - return NULL; } v = PyUnicode_New(newsize, 127); @@ -682,6 +681,11 @@ *p++ = quote; assert(_PyUnicode_CheckConsistency(v, 1)); return v; + + overflow: + PyErr_SetString(PyExc_OverflowError, + "bytes object is too large to make repr"); + return NULL; } static PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:11:14 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:11:14 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3_=28closes_=2322519=29?= Message-ID: <20140929231112.86177.52739@mail.hg.python.org> https://hg.python.org/cpython/rev/ed31cdf11ac2 changeset: 92638:ed31cdf11ac2 branch: 3.4 parent: 92633:7dab27fffff2 parent: 92637:f5c662a7f7e6 user: Benjamin Peterson date: Mon Sep 29 19:09:49 2014 -0400 summary: merge 3.3 (closes #22519) files: Misc/NEWS | 2 ++ Objects/bytesobject.c | 28 ++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,8 @@ Core and Builtins ----------------- +- Issue #22519: Fix overflow checking in PyBytes_Repr. + - Issue #22518: Fix integer overflow issues in latin-1 encoding. Library diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -603,28 +603,27 @@ newsize = 3; /* b'' */ s = (unsigned char*)op->ob_sval; for (i = 0; i < length; i++) { + Py_ssize_t incr = 1; switch(s[i]) { - case '\'': squotes++; newsize++; break; - case '"': dquotes++; newsize++; break; + case '\'': squotes++; break; + case '"': dquotes++; break; case '\\': case '\t': case '\n': case '\r': - newsize += 2; break; /* \C */ + incr = 2; break; /* \C */ default: if (s[i] < ' ' || s[i] >= 0x7f) - newsize += 4; /* \xHH */ - else - newsize++; + incr = 4; /* \xHH */ } + if (newsize > PY_SSIZE_T_MAX - incr) + goto overflow; + newsize += incr; } quote = '\''; if (smartquotes && squotes && !dquotes) quote = '"'; - if (squotes && quote == '\'') + if (squotes && quote == '\'') { + if (newsize > PY_SSIZE_T_MAX - squotes) + goto overflow; newsize += squotes; - - if (newsize > (PY_SSIZE_T_MAX - sizeof(PyUnicodeObject) - 1)) { - PyErr_SetString(PyExc_OverflowError, - "bytes object is too large to make repr"); - return NULL; } v = PyUnicode_New(newsize, 127); @@ -656,6 +655,11 @@ *p++ = quote; assert(_PyUnicode_CheckConsistency(v, 1)); return v; + + overflow: + PyErr_SetString(PyExc_OverflowError, + "bytes object is too large to make repr"); + return NULL; } static PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:12:57 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:12:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40?= Message-ID: <20140929231255.86171.26345@mail.hg.python.org> https://hg.python.org/cpython/rev/b1442a6832ab changeset: 92642:b1442a6832ab parent: 92639:0ddc5fc5f395 parent: 92641:158127da99ac user: Benjamin Peterson date: Mon Sep 29 19:12:44 2014 -0400 summary: merge 3.4 files: Objects/bytesobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -620,7 +620,7 @@ { PyBytesObject* op = (PyBytesObject*) obj; Py_ssize_t i, length = Py_SIZE(op); - size_t newsize, squotes, dquotes; + Py_ssize_t newsize, squotes, dquotes; PyObject *v; unsigned char quote, *s, *p; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:12:57 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:12:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_these_variable?= =?utf-8?q?s_ought_to_be_Py=5Fssize=5Ft?= Message-ID: <20140929231254.23215.55373@mail.hg.python.org> https://hg.python.org/cpython/rev/226740b14f1c changeset: 92640:226740b14f1c branch: 3.3 parent: 92637:f5c662a7f7e6 user: Benjamin Peterson date: Mon Sep 29 19:12:26 2014 -0400 summary: these variables ought to be Py_ssize_t files: Objects/bytesobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -584,7 +584,7 @@ { register PyBytesObject* op = (PyBytesObject*) obj; Py_ssize_t i, length = Py_SIZE(op); - size_t newsize, squotes, dquotes; + Py_ssize_t newsize, squotes, dquotes; PyObject *v; unsigned char quote, *s, *p; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 01:12:57 2014 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 29 Sep 2014 23:12:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3?= Message-ID: <20140929231254.23201.63065@mail.hg.python.org> https://hg.python.org/cpython/rev/158127da99ac changeset: 92641:158127da99ac branch: 3.4 parent: 92638:ed31cdf11ac2 parent: 92640:226740b14f1c user: Benjamin Peterson date: Mon Sep 29 19:12:37 2014 -0400 summary: merge 3.3 files: Objects/bytesobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -594,7 +594,7 @@ { PyBytesObject* op = (PyBytesObject*) obj; Py_ssize_t i, length = Py_SIZE(op); - size_t newsize, squotes, dquotes; + Py_ssize_t newsize, squotes, dquotes; PyObject *v; unsigned char quote, *s, *p; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 03:54:58 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 01:54:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_fix_windows_tests_=28=2316?= =?utf-8?q?662=29?= Message-ID: <20140930015455.23189.62797@mail.hg.python.org> https://hg.python.org/cpython/rev/090dc85f4226 changeset: 92643:090dc85f4226 user: Benjamin Peterson date: Mon Sep 29 21:54:28 2014 -0400 summary: fix windows tests (#16662) From Robert Collins. files: Lib/unittest/test/test_discovery.py | 29 ++++++++-------- 1 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Lib/unittest/test/test_discovery.py b/Lib/unittest/test/test_discovery.py --- a/Lib/unittest/test/test_discovery.py +++ b/Lib/unittest/test/test_discovery.py @@ -1,4 +1,5 @@ -import os +import os.path +from os.path import abspath import re import sys import types @@ -250,7 +251,7 @@ def restore_isdir(): os.path.isdir = original_isdir self.addCleanup(restore_isdir) - self.addCleanup(sys.path.remove, '/foo') + self.addCleanup(sys.path.remove, abspath('/foo')) # Test data: we expect the following: # a listdir to find our package, and a isfile and isdir check on it. @@ -263,8 +264,8 @@ # the module load tests for both package and plain module called, # and the plain module result nested by the package module load_tests # indicating that it was processed and could have been mutated. - vfs = {'/foo': ['my_package'], - '/foo/my_package': ['__init__.py', 'test_module.py']} + vfs = {abspath('/foo'): ['my_package'], + abspath('/foo/my_package'): ['__init__.py', 'test_module.py']} def list_dir(path): return list(vfs[path]) os.listdir = list_dir @@ -301,10 +302,10 @@ loader._get_module_from_name = lambda name: Module(name) loader.suiteClass = lambda thing: thing - loader._top_level_dir = '/foo' + loader._top_level_dir = abspath('/foo') # this time no '.py' on the pattern so that it can match # a test package - suite = list(loader._find_tests('/foo', 'test*.py')) + suite = list(loader._find_tests(abspath('/foo'), 'test*.py')) # We should have loaded tests from both my_package and # my_pacakge.test_module, and also run the load_tests hook in both. @@ -404,8 +405,8 @@ test.test_this_does_not_exist() def test_discover_with_init_modules_that_fail_to_import(self): - vfs = {'/foo': ['my_package'], - '/foo/my_package': ['__init__.py', 'test_module.py']} + vfs = {abspath('/foo'): ['my_package'], + abspath('/foo/my_package'): ['__init__.py', 'test_module.py']} self.setup_import_issue_package_tests(vfs) import_calls = [] def _get_module_from_name(name): @@ -413,9 +414,9 @@ raise ImportError("Cannot import Name") loader = unittest.TestLoader() loader._get_module_from_name = _get_module_from_name - suite = loader.discover('/foo') + suite = loader.discover(abspath('/foo')) - self.assertIn('/foo', sys.path) + self.assertIn(abspath('/foo'), sys.path) self.assertEqual(suite.countTestCases(), 1) test = list(list(suite)[0])[0] # extract test from suite with self.assertRaises(ImportError): @@ -439,8 +440,8 @@ self.assertEqual(len(result.skipped), 1) def test_discover_with_init_module_that_raises_SkipTest_on_import(self): - vfs = {'/foo': ['my_package'], - '/foo/my_package': ['__init__.py', 'test_module.py']} + vfs = {abspath('/foo'): ['my_package'], + abspath('/foo/my_package'): ['__init__.py', 'test_module.py']} self.setup_import_issue_package_tests(vfs) import_calls = [] def _get_module_from_name(name): @@ -448,9 +449,9 @@ raise unittest.SkipTest('skipperoo') loader = unittest.TestLoader() loader._get_module_from_name = _get_module_from_name - suite = loader.discover('/foo') + suite = loader.discover(abspath('/foo')) - self.assertIn('/foo', sys.path) + self.assertIn(abspath('/foo'), sys.path) self.assertEqual(suite.countTestCases(), 1) result = unittest.TestResult() suite.run(result) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 04:49:40 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 02:49:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_clear_Buffered?= =?utf-8?q?RWPair_weakrefs_on_deallocation_=28closes_=2322517=29?= Message-ID: <20140930024940.86187.50488@mail.hg.python.org> https://hg.python.org/cpython/rev/e834b32f0422 changeset: 92645:e834b32f0422 branch: 3.3 parent: 92640:226740b14f1c user: Benjamin Peterson date: Mon Sep 29 22:46:57 2014 -0400 summary: clear BufferedRWPair weakrefs on deallocation (closes #22517) files: Lib/test/test_io.py | 6 ++++++ Misc/NEWS | 3 +++ Modules/_io/bufferedio.c | 2 ++ 3 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1524,6 +1524,12 @@ pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) + def test_weakref_clearing(self): + brw = self.tp(self.MockRawIO(), self.MockRawIO()) + ref = weakref.ref(brw) + brw = None + ref = None # Shouldn't segfault. + class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -17,6 +17,9 @@ Library ------- +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + - Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -2254,6 +2254,8 @@ bufferedrwpair_dealloc(rwpair *self) { _PyObject_GC_UNTRACK(self); + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); Py_CLEAR(self->reader); Py_CLEAR(self->writer); Py_CLEAR(self->dict); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 04:49:40 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 02:49:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_clear_Buffered?= =?utf-8?q?RWPair_weakrefs_on_deallocation_=28closes_=2322517=29?= Message-ID: <20140930024940.86187.32564@mail.hg.python.org> https://hg.python.org/cpython/rev/9b4673d7b046 changeset: 92644:9b4673d7b046 branch: 2.7 parent: 92636:d9cd11eda152 user: Benjamin Peterson date: Mon Sep 29 22:46:57 2014 -0400 summary: clear BufferedRWPair weakrefs on deallocation (closes #22517) files: Lib/test/test_io.py | 6 ++++++ Misc/NEWS | 3 +++ Modules/_io/bufferedio.c | 2 ++ 3 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1474,6 +1474,12 @@ pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) + def test_weakref_clearing(self): + brw = self.tp(self.MockRawIO(), self.MockRawIO()) + ref = weakref.ref(brw) + brw = None + ref = None # Shouldn't segfault. + class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -29,6 +29,9 @@ Library ------- +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + - Issue #10510: distutils register and upload methods now use HTML standards compliant CRLF line endings. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -2120,6 +2120,8 @@ bufferedrwpair_dealloc(rwpair *self) { _PyObject_GC_UNTRACK(self); + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); Py_CLEAR(self->reader); Py_CLEAR(self->writer); Py_CLEAR(self->dict); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 04:49:41 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 02:49:41 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3_=28=2322517=29?= Message-ID: <20140930024940.75007.99509@mail.hg.python.org> https://hg.python.org/cpython/rev/e54da75100f6 changeset: 92646:e54da75100f6 branch: 3.4 parent: 92641:158127da99ac parent: 92645:e834b32f0422 user: Benjamin Peterson date: Mon Sep 29 22:48:51 2014 -0400 summary: merge 3.3 (#22517) files: Lib/test/test_io.py | 6 ++++++ Misc/NEWS | 3 +++ Modules/_io/bufferedio.c | 2 ++ 3 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1567,6 +1567,12 @@ pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) + def test_weakref_clearing(self): + brw = self.tp(self.MockRawIO(), self.MockRawIO()) + ref = weakref.ref(brw) + brw = None + ref = None # Shouldn't segfault. + class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -16,6 +16,9 @@ Library ------- +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + - Issue #22448: Improve canceled timer handles cleanup to prevent unbound memory usage. Patch by Joshua Moore-Oliva. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -2297,6 +2297,8 @@ bufferedrwpair_dealloc(rwpair *self) { _PyObject_GC_UNTRACK(self); + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); Py_CLEAR(self->reader); Py_CLEAR(self->writer); Py_CLEAR(self->dict); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 04:49:41 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 02:49:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI1MTcp?= Message-ID: <20140930024941.74997.25198@mail.hg.python.org> https://hg.python.org/cpython/rev/a2add97e28b9 changeset: 92647:a2add97e28b9 parent: 92643:090dc85f4226 parent: 92646:e54da75100f6 user: Benjamin Peterson date: Mon Sep 29 22:49:05 2014 -0400 summary: merge 3.4 (#22517) files: Lib/test/test_io.py | 6 ++++++ Misc/NEWS | 3 +++ Modules/_io/bufferedio.c | 2 ++ 3 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1628,6 +1628,12 @@ pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) + def test_weakref_clearing(self): + brw = self.tp(self.MockRawIO(), self.MockRawIO()) + ref = weakref.ref(brw) + brw = None + ref = None # Shouldn't segfault. + class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -35,6 +35,9 @@ Library ------- +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + - Issue #22448: Improve canceled timer handles cleanup to prevent unbound memory usage. Patch by Joshua Moore-Oliva. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -2343,6 +2343,8 @@ bufferedrwpair_dealloc(rwpair *self) { _PyObject_GC_UNTRACK(self); + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); Py_CLEAR(self->reader); Py_CLEAR(self->writer); Py_CLEAR(self->dict); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 05:03:07 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 03:03:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_prevent_overfl?= =?utf-8?q?ow_in_unicode=5Frepr_=28closes_=2322520=29?= Message-ID: <20140930030307.86193.69048@mail.hg.python.org> https://hg.python.org/cpython/rev/8ba7e5f43952 changeset: 92648:8ba7e5f43952 branch: 3.3 parent: 92645:e834b32f0422 user: Benjamin Peterson date: Mon Sep 29 23:02:15 2014 -0400 summary: prevent overflow in unicode_repr (closes #22520) files: Misc/NEWS | 3 ++ Objects/unicodeobject.c | 30 +++++++++++++++++----------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #22520: Fix overflow checking when generating the repr of a unicode + object. + - Issue #22519: Fix overflow checking in PyBytes_Repr. - Issue #22518: Fix integer overflow issues in latin-1 encoding. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -12000,28 +12000,34 @@ ikind = PyUnicode_KIND(unicode); for (i = 0; i < isize; i++) { Py_UCS4 ch = PyUnicode_READ(ikind, idata, i); + Py_ssize_t incr = 1; switch (ch) { - case '\'': squote++; osize++; break; - case '"': dquote++; osize++; break; + case '\'': squote++; break; + case '"': dquote++; break; case '\\': case '\t': case '\r': case '\n': - osize += 2; break; + incr = 2; + break; default: /* Fast-path ASCII */ if (ch < ' ' || ch == 0x7f) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x7f) - osize++; - else if (Py_UNICODE_ISPRINTABLE(ch)) { - osize++; + ; + else if (Py_UNICODE_ISPRINTABLE(ch)) max = ch > max ? ch : max; - } else if (ch < 0x100) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x10000) - osize += 6; /* \uHHHH */ + incr = 6; /* \uHHHH */ else - osize += 10; /* \uHHHHHHHH */ - } + incr = 10; /* \uHHHHHHHH */ + } + if (osize > PY_SSIZE_T_MAX - incr) { + PyErr_SetString(PyExc_OverflowError, + "string is too long to generate repr"); + return NULL; + } + osize += incr; } quote = '\''; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 05:03:08 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 03:03:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI1MjAp?= Message-ID: <20140930030308.121785.89071@mail.hg.python.org> https://hg.python.org/cpython/rev/245d9679cd5b changeset: 92650:245d9679cd5b parent: 92647:a2add97e28b9 parent: 92649:6f54dfa675eb user: Benjamin Peterson date: Mon Sep 29 23:02:56 2014 -0400 summary: merge 3.4 (#22520) files: Misc/NEWS | 3 ++ Objects/unicodeobject.c | 30 +++++++++++++++++----------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #22520: Fix overflow checking when generating the repr of a unicode + object. + - Issue #22519: Fix overflow checking in PyBytes_Repr. - Issue #22518: Fix integer overflow issues in latin-1 encoding. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -12365,28 +12365,34 @@ ikind = PyUnicode_KIND(unicode); for (i = 0; i < isize; i++) { Py_UCS4 ch = PyUnicode_READ(ikind, idata, i); + Py_ssize_t incr = 1; switch (ch) { - case '\'': squote++; osize++; break; - case '"': dquote++; osize++; break; + case '\'': squote++; break; + case '"': dquote++; break; case '\\': case '\t': case '\r': case '\n': - osize += 2; break; + incr = 2; + break; default: /* Fast-path ASCII */ if (ch < ' ' || ch == 0x7f) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x7f) - osize++; - else if (Py_UNICODE_ISPRINTABLE(ch)) { - osize++; + ; + else if (Py_UNICODE_ISPRINTABLE(ch)) max = ch > max ? ch : max; - } else if (ch < 0x100) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x10000) - osize += 6; /* \uHHHH */ + incr = 6; /* \uHHHH */ else - osize += 10; /* \uHHHHHHHH */ - } + incr = 10; /* \uHHHHHHHH */ + } + if (osize > PY_SSIZE_T_MAX - incr) { + PyErr_SetString(PyExc_OverflowError, + "string is too long to generate repr"); + return NULL; + } + osize += incr; } quote = '\''; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 05:03:08 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 03:03:08 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3_=28=2322520=29?= Message-ID: <20140930030308.74993.85223@mail.hg.python.org> https://hg.python.org/cpython/rev/6f54dfa675eb changeset: 92649:6f54dfa675eb branch: 3.4 parent: 92646:e54da75100f6 parent: 92648:8ba7e5f43952 user: Benjamin Peterson date: Mon Sep 29 23:02:35 2014 -0400 summary: merge 3.3 (#22520) files: Misc/NEWS | 3 ++ Objects/unicodeobject.c | 30 +++++++++++++++++----------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,9 @@ Core and Builtins ----------------- +- Issue #22520: Fix overflow checking when generating the repr of a unicode + object. + - Issue #22519: Fix overflow checking in PyBytes_Repr. - Issue #22518: Fix integer overflow issues in latin-1 encoding. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -12341,28 +12341,34 @@ ikind = PyUnicode_KIND(unicode); for (i = 0; i < isize; i++) { Py_UCS4 ch = PyUnicode_READ(ikind, idata, i); + Py_ssize_t incr = 1; switch (ch) { - case '\'': squote++; osize++; break; - case '"': dquote++; osize++; break; + case '\'': squote++; break; + case '"': dquote++; break; case '\\': case '\t': case '\r': case '\n': - osize += 2; break; + incr = 2; + break; default: /* Fast-path ASCII */ if (ch < ' ' || ch == 0x7f) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x7f) - osize++; - else if (Py_UNICODE_ISPRINTABLE(ch)) { - osize++; + ; + else if (Py_UNICODE_ISPRINTABLE(ch)) max = ch > max ? ch : max; - } else if (ch < 0x100) - osize += 4; /* \xHH */ + incr = 4; /* \xHH */ else if (ch < 0x10000) - osize += 6; /* \uHHHH */ + incr = 6; /* \uHHHH */ else - osize += 10; /* \uHHHHHHHH */ - } + incr = 10; /* \uHHHHHHHH */ + } + if (osize > PY_SSIZE_T_MAX - incr) { + PyErr_SetString(PyExc_OverflowError, + "string is too long to generate repr"); + return NULL; + } + osize += incr; } quote = '\''; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 05:55:57 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 03:55:57 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_use_https?= Message-ID: <20140930035555.74991.49615@mail.hg.python.org> https://hg.python.org/peps/rev/2a717d45512c changeset: 5563:2a717d45512c user: Benjamin Peterson date: Mon Sep 29 23:55:52 2014 -0400 summary: use https files: Makefile | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -29,5 +29,4 @@ -rm *.html update: - hg pull --update http://hg.python.org/peps - + hg pull -u https://hg.python.org/peps -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 09:34:11 2014 From: python-checkins at python.org (gregory.p.smith) Date: Tue, 30 Sep 2014 07:34:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_=22warning=3A_comparis?= =?utf-8?q?on_between_signed_and_unsigned_integer_expressions=22?= Message-ID: <20140930073341.23193.20673@mail.hg.python.org> https://hg.python.org/cpython/rev/a404bf4db6a6 changeset: 92651:a404bf4db6a6 user: Gregory P. Smith date: Tue Sep 30 00:33:24 2014 -0700 summary: Fix "warning: comparison between signed and unsigned integer expressions" -Wsign-compare warnings in unicodeobject.c. These were all a result of sizeof() being unsigned and being compared to a Py_ssize_t. Not actual problems. files: Objects/unicodeobject.c | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -816,7 +816,7 @@ assert(_PyUnicode_WSTR(unicode) != NULL); /* check for integer overflow */ - if (length > PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) { + if (length > PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(wchar_t) - 1) { PyErr_NoMemory(); return -1; } @@ -888,7 +888,7 @@ } /* Ensure we won't overflow the size. */ - if (length > ((PY_SSIZE_T_MAX / sizeof(Py_UNICODE)) - 1)) { + if (length > ((PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(Py_UNICODE)) - 1)) { return (PyUnicodeObject *)PyErr_NoMemory(); } if (length < 0) { @@ -2239,7 +2239,7 @@ if (copy_null) targetlen++; if (!target) { - if (PY_SSIZE_T_MAX / sizeof(Py_UCS4) < targetlen) { + if (PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(Py_UCS4) < targetlen) { PyErr_NoMemory(); return NULL; } @@ -2852,7 +2852,7 @@ buflen = unicode_aswidechar(unicode, NULL, 0); if (buflen == -1) return NULL; - if (PY_SSIZE_T_MAX / sizeof(wchar_t) < buflen) { + if (PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(wchar_t) < buflen) { PyErr_NoMemory(); return NULL; } @@ -15430,7 +15430,7 @@ if (u == NULL) return NULL; /* Ensure we won't overflow the size. */ - if (len > ((PY_SSIZE_T_MAX / sizeof(Py_UNICODE)) - 1)) { + if (len > ((PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(Py_UNICODE)) - 1)) { PyErr_NoMemory(); return NULL; } -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Tue Sep 30 09:48:10 2014 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 30 Sep 2014 09:48:10 +0200 Subject: [Python-checkins] Daily reference leaks (b1442a6832ab): sum=3 Message-ID: results for b1442a6832ab on branch "default" -------------------------------------------- test_functools leaked [0, 0, 3] memory blocks, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogfCoh1u', '-x'] From python-checkins at python.org Tue Sep 30 12:36:16 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 10:36:16 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyMzk2?= =?utf-8?q?=3A_On_32-bit_AIX_platform=2C_don=27t_expose_os=2Eposix=5Ffadvi?= =?utf-8?q?se=28=29_nor?= Message-ID: <20140930103612.86183.70378@mail.hg.python.org> https://hg.python.org/cpython/rev/8e5e19b3cd4e changeset: 92652:8e5e19b3cd4e branch: 3.4 parent: 92649:6f54dfa675eb user: Victor Stinner date: Tue Sep 30 12:20:05 2014 +0200 summary: Issue #22396: On 32-bit AIX platform, don't expose os.posix_fadvise() nor os.posix_fallocate() because their prototypes in system headers are wrong. files: Misc/NEWS | 3 +++ Modules/posixmodule.c | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,9 @@ Library ------- +- Issue #22396: On 32-bit AIX platform, don't expose os.posix_fadvise() nor + os.posix_fallocate() because their prototypes in system headers are wrong. + - Issue #22517: When a io.BufferedRWPair object is deallocated, clear its weakrefs. diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -8786,7 +8786,15 @@ } #endif -#ifdef HAVE_POSIX_FALLOCATE +/* Issue #22396: On 32-bit AIX platform, the prototypes of os.posix_fadvise() + and os.posix_fallocate() in system headers are wrong if _LARGE_FILES is + defined, which is the case in Python on AIX. AIX bug report: + http://www-01.ibm.com/support/docview.wss?uid=isg1IV56170 */ +#if defined(_AIX) && defined(_LARGE_FILES) && !defined(__64BIT__) +# define POSIX_FADVISE_AIX_BUG +#endif + +#if defined(HAVE_POSIX_FALLOCATE) && !defined(POSIX_FADVISE_AIX_BUG) PyDoc_STRVAR(posix_posix_fallocate__doc__, "posix_fallocate(fd, offset, len)\n\n\ Ensures that enough disk space is allocated for the file specified by fd\n\ @@ -8813,7 +8821,7 @@ } #endif -#ifdef HAVE_POSIX_FADVISE +#if defined(HAVE_POSIX_FADVISE) && !defined(POSIX_FADVISE_AIX_BUG) PyDoc_STRVAR(posix_posix_fadvise__doc__, "posix_fadvise(fd, offset, len, advice)\n\n\ Announces an intention to access data in a specific pattern thus allowing\n\ @@ -11485,10 +11493,10 @@ METH_VARARGS | METH_KEYWORDS, posix_truncate__doc__}, #endif -#ifdef HAVE_POSIX_FALLOCATE +#if defined(HAVE_POSIX_FALLOCATE) && !defined(POSIX_FADVISE_AIX_BUG) {"posix_fallocate", posix_posix_fallocate, METH_VARARGS, posix_posix_fallocate__doc__}, #endif -#ifdef HAVE_POSIX_FADVISE +#if defined(HAVE_POSIX_FADVISE) && !defined(POSIX_FADVISE_AIX_BUG) {"posix_fadvise", posix_posix_fadvise, METH_VARARGS, posix_posix_fadvise__doc__}, #endif #ifdef HAVE_PUTENV -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 12:36:16 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 10:36:16 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2322396=3A_On_32-bit_AIX_platfo?= =?utf-8?q?rm=2C_don=27t_expose?= Message-ID: <20140930103612.74997.64885@mail.hg.python.org> https://hg.python.org/cpython/rev/5ade1061fa3d changeset: 92653:5ade1061fa3d parent: 92651:a404bf4db6a6 parent: 92652:8e5e19b3cd4e user: Victor Stinner date: Tue Sep 30 12:35:58 2014 +0200 summary: (Merge 3.4) Issue #22396: On 32-bit AIX platform, don't expose os.posix_fadvise() nor os.posix_fallocate() because their prototypes in system headers are wrong. files: Misc/NEWS | 3 +++ Modules/posixmodule.c | 21 +++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -38,6 +38,9 @@ Library ------- +- Issue #22396: On 32-bit AIX platform, don't expose os.posix_fadvise() nor + os.posix_fallocate() because their prototypes in system headers are wrong. + - Issue #22517: When a io.BufferedRWPair object is deallocated, clear its weakrefs. diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -12708,7 +12708,16 @@ #endif /* HAVE_TRUNCATE */ -#ifdef HAVE_POSIX_FALLOCATE +/* Issue #22396: On 32-bit AIX platform, the prototypes of os.posix_fadvise() + and os.posix_fallocate() in system headers are wrong if _LARGE_FILES is + defined, which is the case in Python on AIX. AIX bug report: + http://www-01.ibm.com/support/docview.wss?uid=isg1IV56170 */ +#if defined(_AIX) && defined(_LARGE_FILES) && !defined(__64BIT__) +# define POSIX_FADVISE_AIX_BUG +#endif + + +#if defined(HAVE_POSIX_FALLOCATE) && !defined(POSIX_FADVISE_AIX_BUG) /*[clinic input] os.posix_fallocate @@ -12771,10 +12780,10 @@ } Py_RETURN_NONE; } -#endif /* HAVE_POSIX_FALLOCATE */ - - -#ifdef HAVE_POSIX_FADVISE +#endif /* HAVE_POSIX_FALLOCATE) && !POSIX_FADVISE_AIX_BUG */ + + +#if defined(HAVE_POSIX_FADVISE) && !defined(POSIX_FADVISE_AIX_BUG) /*[clinic input] os.posix_fadvise @@ -12849,7 +12858,7 @@ } Py_RETURN_NONE; } -#endif /* HAVE_POSIX_FADVISE */ +#endif /* HAVE_POSIX_FADVISE && !POSIX_FADVISE_AIX_BUG */ #ifdef HAVE_PUTENV -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 13:20:26 2014 From: python-checkins at python.org (nick.coghlan) Date: Tue, 30 Sep 2014 11:20:26 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_394=3A_Clarify_that_=22py?= =?utf-8?q?thon=22_should_mean_=22python2=22?= Message-ID: <20140930112022.86197.88383@mail.hg.python.org> https://hg.python.org/peps/rev/3d16b0cd10bc changeset: 5564:3d16b0cd10bc user: Nick Coghlan date: Tue Sep 30 21:18:22 2014 +1000 summary: PEP 394: Clarify that "python" should mean "python2" files: pep-0394.txt | 19 +++++++++++-------- 1 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pep-0394.txt b/pep-0394.txt --- a/pep-0394.txt +++ b/pep-0394.txt @@ -8,7 +8,7 @@ Type: Informational Content-Type: text/x-rst Created: 02-Mar-2011 -Post-History: 04-Mar-2011, 20-Jul-2011, 16-Feb-2012 +Post-History: 04-Mar-2011, 20-Jul-2011, 16-Feb-2012, 30-Sep-2012 Resolution: http://mail.python.org/pipermail/python-dev/2012-February/116594.html @@ -42,12 +42,11 @@ * When invoked, ``python2`` should run some version of the Python 2 interpreter, and ``python3`` should run some version of the Python 3 interpreter. -* Similarly, the more general ``python`` command should be installed whenever - any version of Python is installed and should invoke the same version of - Python as either ``python2`` or ``python3``. -* For the time being, it is recommended that ``python`` should refer to - ``python2`` (however, some distributions have already chosen otherwise; see - the `Rationale`_ and `Migration Notes`_ below). +* The more general ``python`` command should be installed whenever + any version of Python 2 is installed and should invoke the same version of + Python as the ``python2`` command (however, note that some distributions + have already chosen to make the have ``python`` implement the ``python3`` + command; see the `Rationale`_ and `Migration Notes`_ below). * The Python 2.x ``idle``, ``pydoc``, and ``python-config`` commands should likewise be available as ``idle2``, ``pydoc2``, and ``python2-config``, with the original commands invoking these versions by default, but possibly @@ -68,7 +67,8 @@ interpreter location remains the preferred approach. These recommendations are the outcome of the relevant python-dev discussions -in March and July 2011 ([1]_, [2]_) and February 2012 ([4]_). +in March and July 2011 ([1]_, [2]_), February 2012 ([4]_) and +September 2014 ([6]_). Rationale @@ -263,6 +263,9 @@ .. [5] Arch Linux announcement that their "python" link now refers Python 3 (https://www.archlinux.org/news/python-is-now-python-3/) +.. [6] PEP 394 - Clarification of what "python" command should invoke + (https://mail.python.org/pipermail/python-dev/2014-September/136374.html) + Copyright =========== This document has been placed in the public domain. -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 13:20:26 2014 From: python-checkins at python.org (nick.coghlan) Date: Tue, 30 Sep 2014 11:20:26 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_394=3A_Change_verb_tense_?= =?utf-8?q?=26_cover_2=2E7_maintenance_extension?= Message-ID: <20140930112022.121785.50037@mail.hg.python.org> https://hg.python.org/peps/rev/0418f146b50f changeset: 5565:0418f146b50f user: Nick Coghlan date: Tue Sep 30 21:20:14 2014 +1000 summary: PEP 394: Change verb tense & cover 2.7 maintenance extension files: pep-0394.txt | 30 +++++++++++++++--------------- 1 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pep-0394.txt b/pep-0394.txt --- a/pep-0394.txt +++ b/pep-0394.txt @@ -76,12 +76,12 @@ This recommendation is needed as, even though the majority of distributions still alias the ``python`` command to Python 2, some now alias it to -Python 3 ([5]_). As some of the former distributions do not yet provide a -``python2`` command by default, there is currently no way for Python 2 code +Python 3 ([5]_). As some of the former distributions did not provide a +``python2`` command by default, there was previously no way for Python 2 code (or any code that invokes the Python 2 interpreter directly rather than via ``sys.executable``) to reliably run on all Unix-like systems without -modification, as the ``python`` command will invoke the wrong interpreter -version on some systems, and the ``python2`` command will fail completely +modification, as the ``python`` command would invoke the wrong interpreter +version on some systems, and the ``python2`` command would fail completely on others. The recommendations in this PEP provide a very simple mechanism to restore cross-platform support, with minimal additional work required on the part of distribution maintainers. @@ -98,7 +98,7 @@ This recommendation will be periodically reviewed over the next few years, and updated when the core development team judges it appropriate. As a point of reference, regular maintenance releases for the Python 2.7 series -will continue until at least 2015. +will continue until at least 2020. Migration Notes @@ -119,8 +119,9 @@ yet familiar with the backwards incompatible changes in Python 3. For example, while the change of ``print`` from a statement to a builtin function is relatively simple for automated converters to handle, the - SyntaxError from attempting to use the Python 2 notation in Python 3 is - thoroughly confusing if you aren't already aware of the change:: + SyntaxError from attempting to use the Python 2 notation in versions of + Python 3 prior to 3.4.2 is thoroughly confusing if you aren't already + aware of the change:: $ python3 -c 'print "Hello, world!"' File "", line 1 @@ -128,6 +129,8 @@ ^ SyntaxError: invalid syntax + (In Python 3.4.2+, that generic error message has been replaced with the + more explicit "SyntaxError: Missing parentheses in call to 'print'") * Avoiding breakage of such third party scripts is the key reason this PEP recommends that ``python`` continue to refer to ``python2`` for the time being. Until the conventions described in this PEP are more widely @@ -169,9 +172,6 @@ ``python`` command is only executed in an interactive manner as a user convenience, or to run scripts that are source compatible with both Python 2 and Python 3. -* one symbolic date being considered for a possible change to the official - recommendation in this PEP is the planned switch of Python 2.7 from full - maintenance to security update only status in 2015 (see PEP 373). Backwards Compatibility @@ -190,7 +190,7 @@ ================================================ While technically a new feature, the ``make install`` and ``make bininstall`` -command in the 2.7 version of CPython will be adjusted to create the +command in the 2.7 version of CPython were adjusted to create the following chains of symbolic links in the relevant ``bin`` directory (the final item listed in the chain is the actual installed binary, preceding items are relative symbolic links):: @@ -198,9 +198,9 @@ python -> python2 -> python2.7 python-config -> python2-config -> python2.7-config -Similar adjustments will be made to the Mac OS X binary installer. +Similar adjustments were made to the Mac OS X binary installer. -This feature will first appear in the default installation process in +This feature first appeared in the default installation process in CPython 2.7.3. The installation commands in the CPython 3.x series already create the @@ -211,7 +211,7 @@ pydoc3 -> pydoc3.2 python3-config -> python3.2-config -And CPython 3.3 will create:: +And CPython 3.3 creates:: python3 -> python3.3 idle3 -> idle3.3 @@ -219,7 +219,7 @@ python3-config -> python3.3-config pysetup3 -> pysetup3.3 -The implementation progress of these features in the default installers is +The implementation progress of these features in the default installers was managed on the tracker as issue #12627 ([3]_). -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 13:24:50 2014 From: python-checkins at python.org (nick.coghlan) Date: Tue, 30 Sep 2014 11:24:50 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_394=3A_fix_typo?= Message-ID: <20140930112447.23189.21@mail.hg.python.org> https://hg.python.org/peps/rev/32b6619e9259 changeset: 5566:32b6619e9259 user: Nick Coghlan date: Tue Sep 30 21:24:40 2014 +1000 summary: PEP 394: fix typo files: pep-0394.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0394.txt b/pep-0394.txt --- a/pep-0394.txt +++ b/pep-0394.txt @@ -45,7 +45,7 @@ * The more general ``python`` command should be installed whenever any version of Python 2 is installed and should invoke the same version of Python as the ``python2`` command (however, note that some distributions - have already chosen to make the have ``python`` implement the ``python3`` + have already chosen to have ``python`` implement the ``python3`` command; see the `Rationale`_ and `Migration Notes`_ below). * The Python 2.x ``idle``, ``pydoc``, and ``python-config`` commands should likewise be available as ``idle2``, ``pydoc2``, and ``python2-config``, -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 13:51:37 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 11:51:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_faulthandler=3A_suppress_c?= =?utf-8?q?rash_reporter_directly_in_test_functions_written_to?= Message-ID: <20140930114558.23217.89464@mail.hg.python.org> https://hg.python.org/cpython/rev/4f3ce83eff17 changeset: 92654:4f3ce83eff17 user: Victor Stinner date: Tue Sep 30 13:40:12 2014 +0200 summary: faulthandler: suppress crash reporter directly in test functions written to crash. files: Modules/faulthandler.c | 47 ++++++++++++++++++++++++++--- 1 files changed, 41 insertions(+), 6 deletions(-) diff --git a/Modules/faulthandler.c b/Modules/faulthandler.c --- a/Modules/faulthandler.c +++ b/Modules/faulthandler.c @@ -5,7 +5,13 @@ #include #include #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK) -#include +# include +#endif +#ifdef MS_WINDOWS +# include +#endif +#ifdef HAVE_SYS_RESOURCE_H +# include #endif /* Allocate at maximum 100 MB of the stack to raise the stack overflow */ @@ -804,6 +810,34 @@ #endif /* FAULTHANDLER_USER */ +static void +faulthandler_suppress_crash_report(void) +{ +#ifdef MS_WINDOWS + UINT mode; + + /* Configure Windows to not display the Windows Error Reporting dialog */ + mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); + SetErrorMode(mode | SEM_NOGPFAULTERRORBOX); +#endif + +#ifdef HAVE_SYS_RESOURCE_H + struct rlimit rl; + + /* Disable creation of core dump */ + if (getrlimit(RLIMIT_CORE, &rl) != 0) { + rl.rlim_cur = 0; + setrlimit(RLIMIT_CORE, &rl); + } +#endif + +#ifdef _MSC_VER + /* Visual Studio: configure abort() to not display an error message nor + open a popup asking to report the fault. */ + _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); +#endif +} + static PyObject * faulthandler_read_null(PyObject *self, PyObject *args) { @@ -813,6 +847,7 @@ if (!PyArg_ParseTuple(args, "|i:_read_null", &release_gil)) return NULL; + faulthandler_suppress_crash_report(); x = NULL; if (release_gil) { Py_BEGIN_ALLOW_THREADS @@ -827,6 +862,7 @@ static PyObject * faulthandler_sigsegv(PyObject *self, PyObject *args) { + faulthandler_suppress_crash_report(); #if defined(MS_WINDOWS) /* For SIGSEGV, faulthandler_fatal_error() restores the previous signal handler and then gives back the execution flow to the program (without @@ -853,6 +889,7 @@ /* Do an integer division by zero: raise a SIGFPE on Intel CPU, but not on PowerPC. Use volatile to disable compile-time optimizations. */ volatile int x = 1, y = 0, z; + faulthandler_suppress_crash_report(); z = x / y; /* If the division by zero didn't raise a SIGFPE (e.g. on PowerPC), raise it manually. */ @@ -865,11 +902,7 @@ static PyObject * faulthandler_sigabrt(PyObject *self, PyObject *args) { -#ifdef _MSC_VER - /* Visual Studio: configure abort() to not display an error message nor - open a popup asking to report the fault. */ - _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); -#endif + faulthandler_suppress_crash_report(); abort(); Py_RETURN_NONE; } @@ -880,6 +913,7 @@ char *message; if (!PyArg_ParseTuple(args, "y:fatal_error", &message)) return NULL; + faulthandler_suppress_crash_report(); Py_FatalError(message); Py_RETURN_NONE; } @@ -905,6 +939,7 @@ size_t depth, size; char *sp = (char *)&depth, *stop; + faulthandler_suppress_crash_report(); depth = 0; stop = stack_overflow(sp - STACK_OVERFLOW_MAX_SIZE, sp + STACK_OVERFLOW_MAX_SIZE, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:06:35 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 12:06:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_faulthandler=3A_test=5Fgil=5Freleased?= =?utf-8?q?=28=29_now_uses_=5Fsigsegv=28=29_instead_of?= Message-ID: <20140930120106.47048.59644@mail.hg.python.org> https://hg.python.org/cpython/rev/6e7d923dfd2a changeset: 92657:6e7d923dfd2a parent: 92654:4f3ce83eff17 parent: 92656:eb2bf1c2f654 user: Victor Stinner date: Tue Sep 30 13:55:30 2014 +0200 summary: (Merge 3.4) faulthandler: test_gil_released() now uses _sigsegv() instead of _read_null(), because _read_null() cannot be used on AIX. On AIX, reading from NULL is allowed: the first page of memory is a mapped read-only on AIX. _read_null() and _sigabrt() don't accept parameters. files: Lib/test/test_faulthandler.py | 4 +- Modules/faulthandler.c | 38 +++++++++++++--------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py --- a/Lib/test/test_faulthandler.py +++ b/Lib/test/test_faulthandler.py @@ -194,10 +194,10 @@ self.check_fatal_error(""" import faulthandler faulthandler.enable() - faulthandler._read_null(True) + faulthandler._sigsegv(True) """, 3, - '(?:Segmentation fault|Bus error|Illegal instruction)') + 'Segmentation fault') def test_enable_file(self): with temporary_filename() as filename: diff --git a/Modules/faulthandler.c b/Modules/faulthandler.c --- a/Modules/faulthandler.c +++ b/Modules/faulthandler.c @@ -843,24 +843,16 @@ { volatile int *x; volatile int y; - int release_gil = 0; - if (!PyArg_ParseTuple(args, "|i:_read_null", &release_gil)) - return NULL; faulthandler_suppress_crash_report(); x = NULL; - if (release_gil) { - Py_BEGIN_ALLOW_THREADS - y = *x; - Py_END_ALLOW_THREADS - } else - y = *x; + y = *x; return PyLong_FromLong(y); } -static PyObject * -faulthandler_sigsegv(PyObject *self, PyObject *args) +static void +faulthandler_raise_sigsegv(void) { faulthandler_suppress_crash_report(); #if defined(MS_WINDOWS) @@ -880,6 +872,22 @@ #else raise(SIGSEGV); #endif +} + +static PyObject * +faulthandler_sigsegv(PyObject *self, PyObject *args) +{ + int release_gil = 0; + if (!PyArg_ParseTuple(args, "|i:_read_null", &release_gil)) + return NULL; + + if (release_gil) { + Py_BEGIN_ALLOW_THREADS + faulthandler_raise_sigsegv(); + Py_END_ALLOW_THREADS + } else { + faulthandler_raise_sigsegv(); + } Py_RETURN_NONE; } @@ -1020,12 +1028,12 @@ "'signum' registered by register()")}, #endif - {"_read_null", faulthandler_read_null, METH_VARARGS, - PyDoc_STR("_read_null(release_gil=False): read from NULL, raise " + {"_read_null", faulthandler_read_null, METH_NOARGS, + PyDoc_STR("_read_null(): read from NULL, raise " "a SIGSEGV or SIGBUS signal depending on the platform")}, {"_sigsegv", faulthandler_sigsegv, METH_VARARGS, - PyDoc_STR("_sigsegv(): raise a SIGSEGV signal")}, - {"_sigabrt", faulthandler_sigabrt, METH_VARARGS, + PyDoc_STR("_sigsegv(release_gil=False): raise a SIGSEGV signal")}, + {"_sigabrt", faulthandler_sigabrt, METH_NOARGS, PyDoc_STR("_sigabrt(): raise a SIGABRT signal")}, {"_sigfpe", (PyCFunction)faulthandler_sigfpe, METH_NOARGS, PyDoc_STR("_sigfpe(): raise a SIGFPE signal")}, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:06:35 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 12:06:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_faulthandler?= =?utf-8?q?=3A_=5Fsigsegv=28=29_and_=5Fsigabrt=28=29_don=27t_accept_parame?= =?utf-8?q?ters?= Message-ID: <20140930120105.86201.61451@mail.hg.python.org> https://hg.python.org/cpython/rev/8379f7021375 changeset: 92655:8379f7021375 branch: 3.4 parent: 92652:8e5e19b3cd4e user: Victor Stinner date: Tue Sep 30 13:49:09 2014 +0200 summary: faulthandler: _sigsegv() and _sigabrt() don't accept parameters files: Modules/faulthandler.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/faulthandler.c b/Modules/faulthandler.c --- a/Modules/faulthandler.c +++ b/Modules/faulthandler.c @@ -1006,9 +1006,9 @@ {"_read_null", faulthandler_read_null, METH_VARARGS, PyDoc_STR("_read_null(release_gil=False): read from NULL, raise " "a SIGSEGV or SIGBUS signal depending on the platform")}, - {"_sigsegv", faulthandler_sigsegv, METH_VARARGS, + {"_sigsegv", faulthandler_sigsegv, METH_NOARGS, PyDoc_STR("_sigsegv(): raise a SIGSEGV signal")}, - {"_sigabrt", faulthandler_sigabrt, METH_VARARGS, + {"_sigabrt", faulthandler_sigabrt, METH_NOARGS, PyDoc_STR("_sigabrt(): raise a SIGABRT signal")}, {"_sigfpe", (PyCFunction)faulthandler_sigfpe, METH_NOARGS, PyDoc_STR("_sigfpe(): raise a SIGFPE signal")}, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:06:35 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 12:06:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_faulthandler?= =?utf-8?q?=3A_test=5Fgil=5Freleased=28=29_now_uses_=5Fsigsegv=28=29_inste?= =?utf-8?b?YWQgb2YgX3JlYWRfbnVsbCgpLA==?= Message-ID: <20140930120105.23215.25520@mail.hg.python.org> https://hg.python.org/cpython/rev/eb2bf1c2f654 changeset: 92656:eb2bf1c2f654 branch: 3.4 user: Victor Stinner date: Tue Sep 30 13:54:14 2014 +0200 summary: faulthandler: test_gil_released() now uses _sigsegv() instead of _read_null(), because _read_null() cannot be used on AIX. On AIX, reading from NULL is allowed: the first page of memory is a mapped read-only on AIX. files: Lib/test/test_faulthandler.py | 4 +- Modules/faulthandler.c | 38 +++++++++++++--------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py --- a/Lib/test/test_faulthandler.py +++ b/Lib/test/test_faulthandler.py @@ -184,10 +184,10 @@ self.check_fatal_error(""" import faulthandler faulthandler.enable() - faulthandler._read_null(True) + faulthandler._sigsegv(True) """, 3, - '(?:Segmentation fault|Bus error|Illegal instruction)') + 'Segmentation fault') def test_enable_file(self): with temporary_filename() as filename: diff --git a/Modules/faulthandler.c b/Modules/faulthandler.c --- a/Modules/faulthandler.c +++ b/Modules/faulthandler.c @@ -809,23 +809,15 @@ { volatile int *x; volatile int y; - int release_gil = 0; - if (!PyArg_ParseTuple(args, "|i:_read_null", &release_gil)) - return NULL; x = NULL; - if (release_gil) { - Py_BEGIN_ALLOW_THREADS - y = *x; - Py_END_ALLOW_THREADS - } else - y = *x; + y = *x; return PyLong_FromLong(y); } -static PyObject * -faulthandler_sigsegv(PyObject *self, PyObject *args) +static void +faulthandler_raise_sigsegv(void) { #if defined(MS_WINDOWS) /* For SIGSEGV, faulthandler_fatal_error() restores the previous signal @@ -844,6 +836,22 @@ #else raise(SIGSEGV); #endif +} + +static PyObject * +faulthandler_sigsegv(PyObject *self, PyObject *args) +{ + int release_gil = 0; + if (!PyArg_ParseTuple(args, "|i:_read_null", &release_gil)) + return NULL; + + if (release_gil) { + Py_BEGIN_ALLOW_THREADS + faulthandler_raise_sigsegv(); + Py_END_ALLOW_THREADS + } else { + faulthandler_raise_sigsegv(); + } Py_RETURN_NONE; } @@ -1003,11 +1011,11 @@ "'signum' registered by register()")}, #endif - {"_read_null", faulthandler_read_null, METH_VARARGS, - PyDoc_STR("_read_null(release_gil=False): read from NULL, raise " + {"_read_null", faulthandler_read_null, METH_NOARGS, + PyDoc_STR("_read_null(): read from NULL, raise " "a SIGSEGV or SIGBUS signal depending on the platform")}, - {"_sigsegv", faulthandler_sigsegv, METH_NOARGS, - PyDoc_STR("_sigsegv(): raise a SIGSEGV signal")}, + {"_sigsegv", faulthandler_sigsegv, METH_VARARGS, + PyDoc_STR("_sigsegv(release_gil=False): raise a SIGSEGV signal")}, {"_sigabrt", faulthandler_sigabrt, METH_NOARGS, PyDoc_STR("_sigabrt(): raise a SIGABRT signal")}, {"_sigfpe", (PyCFunction)faulthandler_sigfpe, METH_NOARGS, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:46:57 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:46:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE2MDM4?= =?utf-8?q?=3A_CVE-2013-1752=3A_ftplib=3A_Limit_amount_of_data_read_by?= Message-ID: <20140930124657.75003.6058@mail.hg.python.org> https://hg.python.org/cpython/rev/783e7b4375ac changeset: 92660:783e7b4375ac branch: 3.2 user: Georg Brandl date: Tue Sep 30 14:12:24 2014 +0200 summary: Issue #16038: CVE-2013-1752: ftplib: Limit amount of data read by limiting the call to readline(). Original patch by Micha? Jastrz?bski and Giampaolo Rodola. files: Lib/ftplib.py | 23 ++++++++++++++++++----- Lib/test/test_ftplib.py | 22 +++++++++++++++++++++- Misc/NEWS | 4 ++++ 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/Lib/ftplib.py b/Lib/ftplib.py --- a/Lib/ftplib.py +++ b/Lib/ftplib.py @@ -49,6 +49,8 @@ # The standard FTP server control port FTP_PORT = 21 +# The sizehint parameter passed to readline() calls +MAXLINE = 8192 # Exception raised when an error or invalid response is received @@ -96,6 +98,7 @@ debugging = 0 host = '' port = FTP_PORT + maxline = MAXLINE sock = None file = None welcome = None @@ -190,7 +193,9 @@ # Internal: return one line from the server, stripping CRLF. # Raise EOFError if the connection is closed def getline(self): - line = self.file.readline() + line = self.file.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 1: print('*get*', self.sanitize(line)) if not line: raise EOFError @@ -444,7 +449,9 @@ with self.transfercmd(cmd) as conn, \ conn.makefile('r', encoding=self.encoding) as fp: while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print('*retr*', repr(line)) if not line: break @@ -494,7 +501,9 @@ self.voidcmd('TYPE A') with self.transfercmd(cmd) as conn: while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != B_CRLF: if buf[-1] in B_CRLF: buf = buf[:-1] @@ -741,7 +750,9 @@ fp = conn.makefile('r', encoding=self.encoding) try: while 1: - line = fp.readline() + line = fp.readline(self.maxline + 1) + if len(line) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if self.debugging > 2: print('*retr*', repr(line)) if not line: break @@ -779,7 +790,9 @@ conn = self.transfercmd(cmd) try: while 1: - buf = fp.readline() + buf = fp.readline(self.maxline + 1) + if len(buf) > self.maxline: + raise Error("got more than %d bytes" % self.maxline) if not buf: break if buf[-2:] != B_CRLF: if buf[-1] in B_CRLF: buf = buf[:-1] diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py --- a/Lib/test/test_ftplib.py +++ b/Lib/test/test_ftplib.py @@ -70,6 +70,7 @@ self.last_received_data = '' self.next_response = '' self.rest = None + self.next_retr_data = RETR_DATA self.push('220 welcome') def collect_incoming_data(self, data): @@ -199,7 +200,7 @@ offset = int(self.rest) else: offset = 0 - self.dtp.push(RETR_DATA[offset:]) + self.dtp.push(self.next_retr_data[offset:]) self.dtp.close_when_done() self.rest = None @@ -213,6 +214,11 @@ self.dtp.push(NLST_DATA) self.dtp.close_when_done() + def cmd_setlongretr(self, arg): + # For testing. Next RETR will return long line. + self.next_retr_data = 'x' * int(arg) + self.push('125 setlongretr ok') + class DummyFTPServer(asyncore.dispatcher, threading.Thread): @@ -628,6 +634,20 @@ self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar') self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar') + def test_line_too_long(self): + self.assertRaises(ftplib.Error, self.client.sendcmd, + 'x' * self.client.maxline * 2) + + def test_retrlines_too_long(self): + self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2)) + received = [] + self.assertRaises(ftplib.Error, + self.client.retrlines, 'retr', received.append) + + def test_storlines_too_long(self): + f = io.BytesIO(b'x' * self.client.maxline * 2) + self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f) + class TestIPv6Environment(TestCase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Library ------- +- Issue #16038: CVE-2013-1752: ftplib: Limit amount of data read by + limiting the call to readline(). Original patch by Micha? + Jastrz?bski and Giampaolo Rodola. + - Issue #16037: HTTPMessage.readheaders() raises an HTTPException when more than 100 headers are read. Adapted from patch by Jyrki Pulliainen. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:46:57 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:46:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE2MDM3?= =?utf-8?q?=3A_HTTPMessage=2Ereadheaders=28=29_raises_an_HTTPException_whe?= =?utf-8?q?n_more_than?= Message-ID: <20140930124656.23193.16494@mail.hg.python.org> https://hg.python.org/cpython/rev/deee87d61436 changeset: 92659:deee87d61436 branch: 3.2 user: Georg Brandl date: Tue Sep 30 14:08:04 2014 +0200 summary: Issue #16037: HTTPMessage.readheaders() raises an HTTPException when more than 100 headers are read. Adapted from patch by Jyrki Pulliainen. files: Doc/library/http.client.rst | 2 +- Lib/http/client.py | 4 ++++ Lib/test/test_httplib.py | 9 +++++++++ Misc/NEWS | 5 ++++- 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/Doc/library/http.client.rst b/Doc/library/http.client.rst --- a/Doc/library/http.client.rst +++ b/Doc/library/http.client.rst @@ -169,9 +169,9 @@ A subclass of :exc:`HTTPException`. Raised if a server responds with a HTTP status code that we don't understand. + The constants defined in this module are: - .. data:: HTTP_PORT The default port for the HTTP protocol (always ``80``). diff --git a/Lib/http/client.py b/Lib/http/client.py --- a/Lib/http/client.py +++ b/Lib/http/client.py @@ -206,6 +206,8 @@ # maximal line length when calling readline(). _MAXLINE = 65536 +_MAXHEADERS = 100 + class HTTPMessage(email.message.Message): # XXX The only usage of this method is in @@ -253,6 +255,8 @@ if len(line) > _MAXLINE: raise LineTooLong("header line") headers.append(line) + if len(headers) > _MAXHEADERS: + raise HTTPException("got more than %d headers" % _MAXHEADERS) if line in (b'\r\n', b'\n', b''): break hstring = b''.join(headers).decode('iso-8859-1') diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py --- a/Lib/test/test_httplib.py +++ b/Lib/test/test_httplib.py @@ -272,6 +272,15 @@ if resp.read(): self.fail("Did not expect response from HEAD request") + def test_too_many_headers(self): + headers = '\r\n'.join('Header%d: foo' % i + for i in range(client._MAXHEADERS + 1)) + '\r\n' + text = ('HTTP/1.1 200 OK\r\n' + headers) + s = FakeSocket(text) + r = client.HTTPResponse(s) + self.assertRaisesRegex(client.HTTPException, + r"got more than \d+ headers", r.begin) + def test_send_file(self): expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' b'Accept-Encoding: identity\r\nContent-Length:') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1,4 +1,4 @@ -++++++++++ ++++++++++++ Python News +++++++++++ @@ -10,6 +10,9 @@ Library ------- +- Issue #16037: HTTPMessage.readheaders() raises an HTTPException when more than + 100 headers are read. Adapted from patch by Jyrki Pulliainen. + - Issue #18709: Fix CVE-2013-4238. The SSL module now handles NULL bytes inside subjectAltName correctly. Formerly the module has used OpenSSL's GENERAL_NAME_print() function to get the string represention of ASN.1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:46:57 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:46:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE4NzA5?= =?utf-8?q?=3A_Fix_CVE-2013-4238=2E_The_SSL_module_now_handles_NULL_bytes?= Message-ID: <20140930124656.86175.94406@mail.hg.python.org> https://hg.python.org/cpython/rev/386b0f478117 changeset: 92658:386b0f478117 branch: 3.2 parent: 91974:634f3fe8cbde user: Georg Brandl date: Tue Sep 30 14:04:51 2014 +0200 summary: Issue #18709: Fix CVE-2013-4238. The SSL module now handles NULL bytes inside subjectAltName correctly. Formerly the module has used OpenSSL's GENERAL_NAME_print() function to get the string represention of ASN.1 strings for ``rfc822Name`` (email), ``dNSName`` (DNS) and ``uniformResourceIdentifier`` (URI). files: Lib/test/nullbytecert.pem | 90 +++++++++++++++++++++++++++ Lib/test/test_ssl.py | 29 ++++++++ Misc/NEWS | 6 + Modules/_ssl.c | 66 ++++++++++++++++++- 4 files changed, 185 insertions(+), 6 deletions(-) diff --git a/Lib/test/nullbytecert.pem b/Lib/test/nullbytecert.pem new file mode 100644 --- /dev/null +++ b/Lib/test/nullbytecert.pem @@ -0,0 +1,90 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 0 (0x0) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev at python.org + Validity + Not Before: Aug 7 13:11:52 2013 GMT + Not After : Aug 7 13:12:52 2013 GMT + Subject: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev at python.org + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b5:ea:ed:c9:fb:46:7d:6f:3b:76:80:dd:3a:f3: + 03:94:0b:a7:a6:db:ec:1d:df:ff:23:74:08:9d:97: + 16:3f:a3:a4:7b:3e:1b:0e:96:59:25:03:a7:26:e2: + 88:a9:cf:79:cd:f7:04:56:b0:ab:79:32:6e:59:c1: + 32:30:54:eb:58:a8:cb:91:f0:42:a5:64:27:cb:d4: + 56:31:88:52:ad:cf:bd:7f:f0:06:64:1f:cc:27:b8: + a3:8b:8c:f3:d8:29:1f:25:0b:f5:46:06:1b:ca:02: + 45:ad:7b:76:0a:9c:bf:bb:b9:ae:0d:16:ab:60:75: + ae:06:3e:9c:7c:31:dc:92:2f:29:1a:e0:4b:0c:91: + 90:6c:e9:37:c5:90:d7:2a:d7:97:15:a3:80:8f:5d: + 7b:49:8f:54:30:d4:97:2c:1c:5b:37:b5:ab:69:30: + 68:43:d3:33:78:4b:02:60:f5:3c:44:80:a1:8f:e7: + f0:0f:d1:5e:87:9e:46:cf:62:fc:f9:bf:0c:65:12: + f1:93:c8:35:79:3f:c8:ec:ec:47:f5:ef:be:44:d5: + ae:82:1e:2d:9a:9f:98:5a:67:65:e1:74:70:7c:cb: + d3:c2:ce:0e:45:49:27:dc:e3:2d:d4:fb:48:0e:2f: + 9e:77:b8:14:46:c0:c4:36:ca:02:ae:6a:91:8c:da: + 2f:85 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Subject Key Identifier: + 88:5A:55:C0:52:FF:61:CD:52:A3:35:0F:EA:5A:9C:24:38:22:F7:5C + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Subject Alternative Name: + ************************************************************* + WARNING: The values for DNS, email and URI are WRONG. OpenSSL + doesn't print the text after a NULL byte. + ************************************************************* + DNS:altnull.python.org, email:null at python.org, URI:http://null.python.org, IP Address:192.0.2.1, IP Address:2001:DB8:0:0:0:0:0:1 + Signature Algorithm: sha1WithRSAEncryption + ac:4f:45:ef:7d:49:a8:21:70:8e:88:59:3e:d4:36:42:70:f5: + a3:bd:8b:d7:a8:d0:58:f6:31:4a:b1:a4:a6:dd:6f:d9:e8:44: + 3c:b6:0a:71:d6:7f:b1:08:61:9d:60:ce:75:cf:77:0c:d2:37: + 86:02:8d:5e:5d:f9:0f:71:b4:16:a8:c1:3d:23:1c:f1:11:b3: + 56:6e:ca:d0:8d:34:94:e6:87:2a:99:f2:ae:ae:cc:c2:e8:86: + de:08:a8:7f:c5:05:fa:6f:81:a7:82:e6:d0:53:9d:34:f4:ac: + 3e:40:fe:89:57:7a:29:a4:91:7e:0b:c6:51:31:e5:10:2f:a4: + 60:76:cd:95:51:1a:be:8b:a1:b0:fd:ad:52:bd:d7:1b:87:60: + d2:31:c7:17:c4:18:4f:2d:08:25:a3:a7:4f:b7:92:ca:e2:f5: + 25:f1:54:75:81:9d:b3:3d:61:a2:f7:da:ed:e1:c6:6f:2c:60: + 1f:d8:6f:c5:92:05:ab:c9:09:62:49:a9:14:ad:55:11:cc:d6: + 4a:19:94:99:97:37:1d:81:5f:8b:cf:a3:a8:96:44:51:08:3d: + 0b:05:65:12:eb:b6:70:80:88:48:72:4f:c6:c2:da:cf:cd:8e: + 5b:ba:97:2f:60:b4:96:56:49:5e:3a:43:76:63:04:be:2a:f6: + c1:ca:a9:94 +-----BEGIN CERTIFICATE----- +MIIE2DCCA8CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBxTELMAkGA1UEBhMCVVMx +DzANBgNVBAgMBk9yZWdvbjESMBAGA1UEBwwJQmVhdmVydG9uMSMwIQYDVQQKDBpQ +eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjEgMB4GA1UECwwXUHl0aG9uIENvcmUg +RGV2ZWxvcG1lbnQxJDAiBgNVBAMMG251bGwucHl0aG9uLm9yZwBleGFtcGxlLm9y +ZzEkMCIGCSqGSIb3DQEJARYVcHl0aG9uLWRldkBweXRob24ub3JnMB4XDTEzMDgw +NzEzMTE1MloXDTEzMDgwNzEzMTI1MlowgcUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQI +DAZPcmVnb24xEjAQBgNVBAcMCUJlYXZlcnRvbjEjMCEGA1UECgwaUHl0aG9uIFNv +ZnR3YXJlIEZvdW5kYXRpb24xIDAeBgNVBAsMF1B5dGhvbiBDb3JlIERldmVsb3Bt +ZW50MSQwIgYDVQQDDBtudWxsLnB5dGhvbi5vcmcAZXhhbXBsZS5vcmcxJDAiBgkq +hkiG9w0BCQEWFXB5dGhvbi1kZXZAcHl0aG9uLm9yZzCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBALXq7cn7Rn1vO3aA3TrzA5QLp6bb7B3f/yN0CJ2XFj+j +pHs+Gw6WWSUDpybiiKnPec33BFawq3kyblnBMjBU61ioy5HwQqVkJ8vUVjGIUq3P +vX/wBmQfzCe4o4uM89gpHyUL9UYGG8oCRa17dgqcv7u5rg0Wq2B1rgY+nHwx3JIv +KRrgSwyRkGzpN8WQ1yrXlxWjgI9de0mPVDDUlywcWze1q2kwaEPTM3hLAmD1PESA +oY/n8A/RXoeeRs9i/Pm/DGUS8ZPINXk/yOzsR/XvvkTVroIeLZqfmFpnZeF0cHzL +08LODkVJJ9zjLdT7SA4vnne4FEbAxDbKAq5qkYzaL4UCAwEAAaOB0DCBzTAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSIWlXAUv9hzVKjNQ/qWpwkOCL3XDALBgNVHQ8E +BAMCBeAwgZAGA1UdEQSBiDCBhYIeYWx0bnVsbC5weXRob24ub3JnAGV4YW1wbGUu +Y29tgSBudWxsQHB5dGhvbi5vcmcAdXNlckBleGFtcGxlLm9yZ4YpaHR0cDovL251 +bGwucHl0aG9uLm9yZwBodHRwOi8vZXhhbXBsZS5vcmeHBMAAAgGHECABDbgAAAAA +AAAAAAAAAAEwDQYJKoZIhvcNAQEFBQADggEBAKxPRe99SaghcI6IWT7UNkJw9aO9 +i9eo0Fj2MUqxpKbdb9noRDy2CnHWf7EIYZ1gznXPdwzSN4YCjV5d+Q9xtBaowT0j +HPERs1ZuytCNNJTmhyqZ8q6uzMLoht4IqH/FBfpvgaeC5tBTnTT0rD5A/olXeimk +kX4LxlEx5RAvpGB2zZVRGr6LobD9rVK91xuHYNIxxxfEGE8tCCWjp0+3ksri9SXx +VHWBnbM9YaL32u3hxm8sYB/Yb8WSBavJCWJJqRStVRHM1koZlJmXNx2BX4vPo6iW +RFEIPQsFZRLrtnCAiEhyT8bC2s/Njlu6ly9gtJZWSV46Q3ZjBL4q9sHKqZQ= +-----END CERTIFICATE----- diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -52,6 +52,7 @@ WRONGCERT = data_file("XXXnonexisting.pem") BADKEY = data_file("badkey.pem") NOKIACERT = data_file("nokia.pem") +NULLBYTECERT = data_file("nullbytecert.pem") def handle_error(prefix): @@ -140,6 +141,27 @@ ('DNS', 'projects.forum.nokia.com')) ) + def test_parse_cert_CVE_2013_4073(self): + p = ssl._ssl._test_decode_cert(NULLBYTECERT) + if support.verbose: + sys.stdout.write("\n" + pprint.pformat(p) + "\n") + subject = ((('countryName', 'US'),), + (('stateOrProvinceName', 'Oregon'),), + (('localityName', 'Beaverton'),), + (('organizationName', 'Python Software Foundation'),), + (('organizationalUnitName', 'Python Core Development'),), + (('commonName', 'null.python.org\x00example.org'),), + (('emailAddress', 'python-dev at python.org'),)) + self.assertEqual(p['subject'], subject) + self.assertEqual(p['issuer'], subject) + self.assertEqual(p['subjectAltName'], + (('DNS', 'altnull.python.org\x00example.com'), + ('email', 'null at python.org\x00user at example.org'), + ('URI', 'http://null.python.org\x00http://example.org'), + ('IP Address', '192.0.2.1'), + ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) + ) + def test_DER_to_PEM(self): with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: pem = f.read() @@ -271,6 +293,13 @@ fail(cert, 'foo.a.com') fail(cert, 'bar.foo.com') + # NULL bytes are bad, CVE-2013-4073 + cert = {'subject': ((('commonName', + 'null.python.org\x00example.org'),),)} + ok(cert, 'null.python.org\x00example.org') # or raise an error? + fail(cert, 'example.org') + fail(cert, 'null.python.org') + # Slightly fake real-world example cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT', 'subject': ((('commonName', 'linuxfrz.org'),),), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,12 @@ Library ------- +- Issue #18709: Fix CVE-2013-4238. The SSL module now handles NULL bytes + inside subjectAltName correctly. Formerly the module has used OpenSSL's + GENERAL_NAME_print() function to get the string represention of ASN.1 + strings for ``rfc822Name`` (email), ``dNSName`` (DNS) and + ``uniformResourceIdentifier`` (URI). + - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. diff --git a/Modules/_ssl.c b/Modules/_ssl.c --- a/Modules/_ssl.c +++ b/Modules/_ssl.c @@ -561,7 +561,7 @@ int i, j; PyObject *peer_alt_names = Py_None; - PyObject *v, *t; + PyObject *v = NULL, *t; X509_EXTENSION *ext = NULL; GENERAL_NAMES *names = NULL; GENERAL_NAME *name; @@ -616,12 +616,14 @@ ext->value->length)); for(j = 0; j < sk_GENERAL_NAME_num(names); j++) { - /* get a rendering of each name in the set of names */ + int gntype; + ASN1_STRING *as = NULL; name = sk_GENERAL_NAME_value(names, j); - if (name->type == GEN_DIRNAME) { - + gntype = name-> type; + switch (gntype) { + case GEN_DIRNAME: /* we special-case DirName as a tuple of tuples of attributes */ @@ -643,11 +645,62 @@ goto fail; } PyTuple_SET_ITEM(t, 1, v); + break; - } else { + case GEN_EMAIL: + case GEN_DNS: + case GEN_URI: + /* GENERAL_NAME_print() doesn't handle NUL bytes in ASN1_string + correctly. */ + t = PyTuple_New(2); + if (t == NULL) + goto fail; + switch (gntype) { + case GEN_EMAIL: + v = PyUnicode_FromString("email"); + as = name->d.rfc822Name; + break; + case GEN_DNS: + v = PyUnicode_FromString("DNS"); + as = name->d.dNSName; + break; + case GEN_URI: + v = PyUnicode_FromString("URI"); + as = name->d.uniformResourceIdentifier; + break; + } + if (v == NULL) { + Py_DECREF(t); + goto fail; + } + PyTuple_SET_ITEM(t, 0, v); + v = PyUnicode_FromStringAndSize((char *)ASN1_STRING_data(as), + ASN1_STRING_length(as)); + if (v == NULL) { + Py_DECREF(t); + goto fail; + } + PyTuple_SET_ITEM(t, 1, v); + break; + default: /* for everything else, we use the OpenSSL print form */ - + switch (gntype) { + /* check for new general name type */ + case GEN_OTHERNAME: + case GEN_X400: + case GEN_EDIPARTY: + case GEN_IPADD: + case GEN_RID: + break; + default: + if (PyErr_WarnFormat(PyExc_RuntimeWarning, 1, + "Unknown general name type %d", + gntype) == -1) { + goto fail; + } + break; + } (void) BIO_reset(biobuf); GENERAL_NAME_print(biobuf, name); len = BIO_gets(biobuf, buf, sizeof(buf)-1); @@ -674,6 +727,7 @@ goto fail; } PyTuple_SET_ITEM(t, 1, v); + break; } /* and add that rendering to the list */ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:46:58 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:46:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE4NzQ3?= =?utf-8?q?=3A_document_issue_with_OpenSSL=27s_CPRNG_state_and_fork?= Message-ID: <20140930124657.75003.81993@mail.hg.python.org> https://hg.python.org/cpython/rev/bdf73458df5f changeset: 92661:bdf73458df5f branch: 3.2 user: Christian Heimes date: Tue Oct 29 21:08:56 2013 +0100 summary: Issue #18747: document issue with OpenSSL's CPRNG state and fork files: Doc/library/os.rst | 4 ++++ Doc/library/ssl.rst | 8 ++++++++ 2 files changed, 12 insertions(+), 0 deletions(-) diff --git a/Doc/library/os.rst b/Doc/library/os.rst --- a/Doc/library/os.rst +++ b/Doc/library/os.rst @@ -1809,6 +1809,10 @@ Note that some platforms including FreeBSD <= 6.3, Cygwin and OS/2 EMX have known issues when using fork() from a thread. + .. warning:: + + See :mod:`ssl` for applications that use the SSL module with fork(). + Availability: Unix. diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst --- a/Doc/library/ssl.rst +++ b/Doc/library/ssl.rst @@ -28,6 +28,14 @@ operating system socket APIs. The installed version of OpenSSL may also cause variations in behavior. +.. warning:: + + OpenSSL's internal random number generator does not properly handle fork. + Applications must change the PRNG state of the parent process if they use + any SSL feature with with :func:`os.fork`. Any successful call of + :func:`~ssl.RAND_add`, :func:`~ssl.RAND_bytes` or + :func:`~ssl.RAND_pseudo_bytes` is sufficient. + This section documents the objects and functions in the ``ssl`` module; for more general information about TLS, SSL, and certificates, the reader is referred to the documents in the "See Also" section at the bottom. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:47:04 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:47:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE2MDQy?= =?utf-8?q?=3A_CVE-2013-1752=3A_smtplib=3A_Limit_amount_of_data_read_by?= Message-ID: <20140930124657.86189.61730@mail.hg.python.org> https://hg.python.org/cpython/rev/0f362676460d changeset: 92662:0f362676460d branch: 3.2 user: Georg Brandl date: Tue Sep 30 14:18:02 2014 +0200 summary: Issue #16042: CVE-2013-1752: smtplib: Limit amount of data read by limiting the call to readline(). Original patch by Christian Heimes. files: Lib/smtplib.py | 5 +++- Lib/test/mock_socket.py | 9 ++++++- Lib/test/test_smtplib.py | 30 +++++++++++++++++++++++++++- Misc/NEWS | 3 ++ 4 files changed, 43 insertions(+), 4 deletions(-) diff --git a/Lib/smtplib.py b/Lib/smtplib.py --- a/Lib/smtplib.py +++ b/Lib/smtplib.py @@ -62,6 +62,7 @@ SMTP_SSL_PORT = 465 CRLF = "\r\n" bCRLF = b"\r\n" +_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3 OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I) @@ -363,7 +364,7 @@ self.file = self.sock.makefile('rb') while 1: try: - line = self.file.readline() + line = self.file.readline(_MAXLINE + 1) except socket.error as e: self.close() raise SMTPServerDisconnected("Connection unexpectedly closed: " @@ -373,6 +374,8 @@ raise SMTPServerDisconnected("Connection unexpectedly closed") if self.debuglevel > 0: print('reply:', repr(line), file=stderr) + if len(line) > _MAXLINE: + raise SMTPResponseException(500, "Line too long.") resp.append(line[4:].strip(b' \t\r\n')) code = line[:3] # Check that the error code is syntactically correct. diff --git a/Lib/test/mock_socket.py b/Lib/test/mock_socket.py --- a/Lib/test/mock_socket.py +++ b/Lib/test/mock_socket.py @@ -21,8 +21,13 @@ """ def __init__(self, lines): self.lines = lines - def readline(self): - return self.lines.pop(0) + b'\r\n' + def readline(self, limit=-1): + result = self.lines.pop(0) + b'\r\n' + if limit >= 0: + # Re-insert the line, removing the \r\n we added. + self.lines.insert(0, result[limit:-2]) + result = result[:limit] + return result def close(self): pass diff --git a/Lib/test/test_smtplib.py b/Lib/test/test_smtplib.py --- a/Lib/test/test_smtplib.py +++ b/Lib/test/test_smtplib.py @@ -537,6 +537,33 @@ HOST, self.port, 'localhost', 3) + at unittest.skipUnless(threading, 'Threading required for this test.') +class TooLongLineTests(unittest.TestCase): + respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n' + + def setUp(self): + self.old_stdout = sys.stdout + self.output = io.StringIO() + sys.stdout = self.output + + self.evt = threading.Event() + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.settimeout(15) + self.port = support.bind_port(self.sock) + servargs = (self.evt, self.respdata, self.sock) + threading.Thread(target=server, args=servargs).start() + self.evt.wait() + self.evt.clear() + + def tearDown(self): + self.evt.wait() + sys.stdout = self.old_stdout + + def testLineTooLong(self): + self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP, + HOST, self.port, 'localhost', 3) + + sim_users = {'Mr.A at somewhere.com':'John A', 'Ms.B at xn--fo-fka.com':'Sally B', 'Mrs.C at somewhereesle.com':'Ruth C', @@ -826,7 +853,8 @@ def test_main(verbose=None): support.run_unittest(GeneralTests, DebuggingServerTests, NonConnectingTests, - BadHELOServerTests, SMTPSimTests) + BadHELOServerTests, SMTPSimTests, + TooLongLineTests) if __name__ == '__main__': test_main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Library ------- +- Issue #16042: CVE-2013-1752: smtplib: Limit amount of data read by + limiting the call to readline(). Original patch by Christian Heimes. + - Issue #16038: CVE-2013-1752: ftplib: Limit amount of data read by limiting the call to readline(). Original patch by Micha? Jastrz?bski and Giampaolo Rodola. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:47:04 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 12:47:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE2MDQx?= =?utf-8?q?=3A_CVE-2013-1752=3A_poplib=3A_Limit_maximum_line_lengths_to_20?= =?utf-8?q?48_to?= Message-ID: <20140930124657.23207.71743@mail.hg.python.org> https://hg.python.org/cpython/rev/76be07730f8d changeset: 92663:76be07730f8d branch: 3.2 user: Georg Brandl date: Tue Sep 30 14:45:39 2014 +0200 summary: Issue #16041: CVE-2013-1752: poplib: Limit maximum line lengths to 2048 to prevent readline() calls from consuming too much memory. Patch by Jyrki Pulliainen. files: Lib/poplib.py | 11 ++++++++++- Lib/test/test_poplib.py | 6 +++++- Misc/NEWS | 4 ++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/Lib/poplib.py b/Lib/poplib.py --- a/Lib/poplib.py +++ b/Lib/poplib.py @@ -32,6 +32,12 @@ LF = b'\n' CRLF = CR+LF +# maximal line length when calling readline(). This is to prevent +# reading arbitrary lenght lines. RFC 1939 limits POP3 line length to +# 512 characters, including CRLF. We have selected 2048 just to be on +# the safe side. +_MAXLINE = 2048 + class POP3: @@ -107,7 +113,10 @@ # Raise error_proto('-ERR EOF') if the connection is closed. def _getline(self): - line = self.file.readline() + line = self.file.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise error_proto('line too long') + if self._debugging > 1: print('*get*', repr(line)) if not line: raise error_proto('-ERR EOF') octets = len(line) diff --git a/Lib/test/test_poplib.py b/Lib/test/test_poplib.py --- a/Lib/test/test_poplib.py +++ b/Lib/test/test_poplib.py @@ -83,7 +83,7 @@ def cmd_list(self, arg): if arg: - self.push('+OK %s %s' %(arg, arg)) + self.push('+OK %s %s' % (arg, arg)) else: self.push('+OK') asynchat.async_chat.push(self, LIST_RESP) @@ -204,6 +204,10 @@ foo = self.client.retr('foo') self.assertEqual(foo, expected) + def test_too_long_lines(self): + self.assertRaises(poplib.error_proto, self.client._shortcmd, + 'echo +%s' % ((poplib._MAXLINE + 10) * 'a')) + def test_dele(self): self.assertOK(self.client.dele('foo')) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Library ------- +- Issue #16041: CVE-2013-1752: poplib: Limit maximum line lengths to 2048 to + prevent readline() calls from consuming too much memory. Patch by Jyrki + Pulliainen. + - Issue #16042: CVE-2013-1752: smtplib: Limit amount of data read by limiting the call to readline(). Original patch by Christian Heimes. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 14:59:15 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 30 Sep 2014 12:59:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Remove_pointless_=22vile_h?= =?utf-8?q?ack=22_that_can_cause_the_build_step_to_fail_when_some?= Message-ID: <20140930125828.86197.77315@mail.hg.python.org> https://hg.python.org/cpython/rev/94af1af93670 changeset: 92664:94af1af93670 parent: 92657:6e7d923dfd2a user: Antoine Pitrou date: Tue Sep 30 14:58:22 2014 +0200 summary: Remove pointless "vile hack" that can cause the build step to fail when some extension modules can't be imported. See issue #5309 for the build failures, issue #458343 for the original motivation. files: Lib/distutils/command/build_ext.py | 11 ++--------- setup.py | 11 ----------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py --- a/Lib/distutils/command/build_ext.py +++ b/Lib/distutils/command/build_ext.py @@ -545,15 +545,8 @@ extra_postargs=extra_args, depends=ext.depends) - # XXX -- this is a Vile HACK! - # - # The setup.py script for Python on Unix needs to be able to - # get this list so it can perform all the clean up needed to - # avoid keeping object files around when cleaning out a failed - # build of an extension module. Since Distutils does not - # track dependencies, we have to get rid of intermediates to - # ensure all the intermediates will be properly re-built. - # + # XXX outdated variable, kept here in case third-part code + # needs it. self._built_objects = objects[:] # Now link the object files together into a "shared object" -- diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -381,17 +381,6 @@ os.remove(newname) os.rename(ext_filename, newname) - # XXX -- This relies on a Vile HACK in - # distutils.command.build_ext.build_extension(). The - # _built_objects attribute is stored there strictly for - # use here. - # If there is a failure, _built_objects may not be there, - # so catch the AttributeError and move on. - try: - for filename in self._built_objects: - os.remove(filename) - except AttributeError: - self.announce('unable to remove files (ignored)') except: exc_type, why, tb = sys.exc_info() self.announce('*** WARNING: importing extension "%s" ' -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 15:00:17 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 13:00:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E2=29=3A_Lax_cookie_par?= =?utf-8?q?sing_in_http=2Ecookies_could_be_a_security_issue_when_combined?= Message-ID: <20140930125916.74989.78537@mail.hg.python.org> https://hg.python.org/cpython/rev/5cfe74a9bfa4 changeset: 92667:5cfe74a9bfa4 branch: 3.2 user: Antoine Pitrou date: Wed Sep 17 00:23:55 2014 +0200 summary: Lax cookie parsing in http.cookies could be a security issue when combined with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. files: Lib/http/cookies.py | 3 ++- Lib/test/test_http_cookies.py | 9 +++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 16 insertions(+), 1 deletions(-) diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -432,6 +432,7 @@ _LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern + \s* # Optional whitespace at start of cookie (?P # Start of group 'key' """ + _LegalCharsPatt + r"""+? # Any word of at least one letter ) # End of group 'key' @@ -532,7 +533,7 @@ while 0 <= i < n: # Start looking for a cookie - match = patt.search(str, i) + match = patt.match(str, i) if not match: # No more cookies break diff --git a/Lib/test/test_http_cookies.py b/Lib/test/test_http_cookies.py --- a/Lib/test/test_http_cookies.py +++ b/Lib/test/test_http_cookies.py @@ -132,6 +132,15 @@ """) + def test_invalid_cookies(self): + # Accepting these could be a security issue + C = cookies.SimpleCookie() + for s in (']foo=x', '[foo=x', 'blah]foo=x', 'blah[foo=x'): + C.load(s) + self.assertEqual(dict(C), {}) + self.assertEqual(C.output(), '') + + class MorselTests(unittest.TestCase): """Tests for the Morsel object.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -117,6 +117,7 @@ Pablo Bleyer Erik van Blokland Eric Blossom +Sergey Bobrov Finn Bock Paul Boddie Matthew Boedicker diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -37,6 +37,10 @@ strings for ``rfc822Name`` (email), ``dNSName`` (DNS) and ``uniformResourceIdentifier`` (URI). +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. + - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 15:00:17 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 13:00:17 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzIyNDE5?= =?utf-8?q?=3A_Limit_the_length_of_incoming_HTTP_request_in_wsgiref_server?= =?utf-8?q?_to?= Message-ID: <20140930125916.86177.36760@mail.hg.python.org> https://hg.python.org/cpython/rev/0d115d14adfd changeset: 92666:0d115d14adfd branch: 3.2 user: Georg Brandl date: Tue Sep 30 14:56:46 2014 +0200 summary: Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. files: Lib/test/test_wsgiref.py | 5 +++++ Lib/wsgiref/simple_server.py | 9 ++++++++- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 18 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py --- a/Lib/test/test_wsgiref.py +++ b/Lib/test/test_wsgiref.py @@ -114,6 +114,11 @@ out, err = run_amock() self.check_hello(out) + def test_request_length(self): + out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n") + self.assertEqual(out.splitlines()[0], + b"HTTP/1.0 414 Request-URI Too Long") + def test_validated_hello(self): out, err = run_amock(validator(hello_app)) # the middleware doesn't support len(), so content-length isn't there diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py --- a/Lib/wsgiref/simple_server.py +++ b/Lib/wsgiref/simple_server.py @@ -114,7 +114,14 @@ def handle(self): """Handle a single HTTP request""" - self.raw_requestline = self.rfile.readline() + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.parse_request(): # An error code has been sent, just exit return diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -219,6 +219,7 @@ Geremy Condra Juan Jos? Conti Matt Conway +Devin Cook David M. Cooke Jason R. Coombs Garrett Cooper diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Library ------- +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + - Issue #22517: When a io.BufferedRWPair object is deallocated, clear its weakrefs. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 15:00:17 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 13:00:17 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzIyNTE3?= =?utf-8?q?=3A_When_a_io=2EBufferedRWPair_object_is_deallocated=2C_clear_i?= =?utf-8?q?ts?= Message-ID: <20140930125915.74995.79589@mail.hg.python.org> https://hg.python.org/cpython/rev/4fa5239624b8 changeset: 92665:4fa5239624b8 branch: 3.2 parent: 92663:76be07730f8d user: Georg Brandl date: Tue Sep 30 14:54:39 2014 +0200 summary: Issue #22517: When a io.BufferedRWPair object is deallocated, clear its weakrefs. files: Lib/test/test_io.py | 6 ++++++ Misc/NEWS | 3 +++ Modules/_io/bufferedio.c | 2 ++ 3 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py --- a/Lib/test/test_io.py +++ b/Lib/test/test_io.py @@ -1454,6 +1454,12 @@ pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) + def test_weakref_clearing(self): + brw = self.tp(self.MockRawIO(), self.MockRawIO()) + ref = weakref.ref(brw) + brw = None + ref = None # Shouldn't segfault. + class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Library ------- +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + - Issue #16041: CVE-2013-1752: poplib: Limit maximum line lengths to 2048 to prevent readline() calls from consuming too much memory. Patch by Jyrki Pulliainen. diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c --- a/Modules/_io/bufferedio.c +++ b/Modules/_io/bufferedio.c @@ -2141,6 +2141,8 @@ bufferedrwpair_dealloc(rwpair *self) { _PyObject_GC_UNTRACK(self); + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); Py_CLEAR(self->reader); Py_CLEAR(self->writer); Py_CLEAR(self->dict); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 15:49:13 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 13:49:13 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzIyNDIx?= =?utf-8?q?_-_Secure_pydoc_server_run=2E_Bind_it_to_localhost_instead_of_a?= =?utf-8?q?ll?= Message-ID: <20140930134833.23215.78997@mail.hg.python.org> https://hg.python.org/cpython/rev/02dae04b3e2b changeset: 92668:02dae04b3e2b branch: 3.2 user: Georg Brandl date: Wed Sep 17 13:17:58 2014 +0800 summary: Issue #22421 - Secure pydoc server run. Bind it to localhost instead of all interfaces. files: Lib/pydoc.py | 4 ++-- Lib/test/test_pydoc.py | 2 ++ Misc/NEWS | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -2431,8 +2431,8 @@ class DocServer(http.server.HTTPServer): def __init__(self, port, callback): - self.host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost' - self.address = ('', port) + self.host = 'localhost' + self.address = (self.host, port) self.callback = callback self.base.__init__(self, self.address, self.handler) self.quit = False diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -510,6 +510,8 @@ return text serverthread = pydoc._start_server(my_url_handler, port=0) + self.assertIn('localhost', serverthread.docserver.address) + starttime = time.time() timeout = 1 #seconds diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Library ------- +- Issue #22421: Fix a regression that caused the pydoc server to be bound to + all interfaces instead of only localhost. + - Issue #22419: Limit the length of incoming HTTP request in wsgiref server to 65536 bytes and send a 414 error code for higher lengths. Patch contributed by Devin Cook. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 16:03:18 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 14:03:18 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE2MDM5?= =?utf-8?q?=3A_CVE-2013-1752=3A_Change_use_of_readline_in_imaplib_module_t?= =?utf-8?q?o_limit?= Message-ID: <20140930140217.74985.88042@mail.hg.python.org> https://hg.python.org/cpython/rev/5d1c03316af7 changeset: 92669:5d1c03316af7 branch: 3.2 user: Georg Brandl date: Tue Sep 30 16:00:09 2014 +0200 summary: Issue #16039: CVE-2013-1752: Change use of readline in imaplib module to limit line length. Patch by Emil Lind. files: Lib/imaplib.py | 14 +++++++++++++- Lib/test/test_imaplib.py | 11 +++++++++++ Misc/NEWS | 3 +++ 3 files changed, 27 insertions(+), 1 deletions(-) diff --git a/Lib/imaplib.py b/Lib/imaplib.py --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -42,6 +42,15 @@ IMAP4_SSL_PORT = 993 AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first +# Maximal line length when calling readline(). This is to prevent +# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1) +# don't specify a line length. RFC 2683 however suggests limiting client +# command lines to 1000 octets and server command lines to 8000 octets. +# We have selected 10000 for some extra margin and since that is supposedly +# also what UW and Panda IMAP does. +_MAXLINE = 10000 + + # Commands Commands = { @@ -263,7 +272,10 @@ def readline(self): """Read line from remote.""" - return self.file.readline() + line = self.file.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise self.error("got more than %d bytes" % _MAXLINE) + return line def send(self, data): diff --git a/Lib/test/test_imaplib.py b/Lib/test/test_imaplib.py --- a/Lib/test/test_imaplib.py +++ b/Lib/test/test_imaplib.py @@ -309,6 +309,17 @@ self.assertEqual(ret, "OK") + def test_linetoolong(self): + class TooLongHandler(SimpleIMAPHandler): + def handle(self): + # Send a very long response line + self.wfile.write(b'* OK ' + imaplib._MAXLINE*b'x' + b'\r\n') + + with self.reaped_server(TooLongHandler) as server: + self.assertRaises(imaplib.IMAP4.error, + self.imap_class, *server.server_address) + + class ThreadedNetworkedTests(BaseThreadedNetworkedTests): server_class = socketserver.TCPServer diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Library ------- +- Issue #16039: CVE-2013-1752: Change use of readline in imaplib module to limit + line length. Patch by Emil Lind. + - Issue #22421: Fix a regression that caused the pydoc server to be bound to all interfaces instead of only localhost. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 16:22:48 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 14:22:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzIwOTM5?= =?utf-8?q?=3A_Use_www=2Eexample=2Ecom_instead_of_www=2Epython=2Eorg_to_av?= =?utf-8?q?oid_test?= Message-ID: <20140930142223.75009.23647@mail.hg.python.org> https://hg.python.org/cpython/rev/97c329849ef3 changeset: 92670:97c329849ef3 branch: 3.2 user: Ned Deily date: Wed Mar 26 23:31:39 2014 -0700 summary: Issue #20939: Use www.example.com instead of www.python.org to avoid test failures when ssl is not present. files: Lib/test/test_urllib2net.py | 10 ++++---- Lib/test/test_urllibnet.py | 26 ++++++++++++------------ Misc/NEWS | 7 ++++++ 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py old mode 100644 new mode 100755 --- a/Lib/test/test_urllib2net.py +++ b/Lib/test/test_urllib2net.py @@ -84,7 +84,7 @@ # calling .close() on urllib2's response objects should close the # underlying socket - response = _urlopen_with_retry("http://www.python.org/") + response = _urlopen_with_retry("http://www.example.com/") sock = response.fp self.assertTrue(not sock.closed) response.close() @@ -254,7 +254,7 @@ class TimeoutTest(unittest.TestCase): def test_http_basic(self): self.assertTrue(socket.getdefaulttimeout() is None) - url = "http://www.python.org" + url = "http://www.example.com" with support.transient_internet(url, timeout=None): u = _urlopen_with_retry(url) self.addCleanup(u.close) @@ -262,7 +262,7 @@ def test_http_default_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) - url = "http://www.python.org" + url = "http://www.example.com" with support.transient_internet(url): socket.setdefaulttimeout(60) try: @@ -274,7 +274,7 @@ def test_http_no_timeout(self): self.assertTrue(socket.getdefaulttimeout() is None) - url = "http://www.python.org" + url = "http://www.example.com" with support.transient_internet(url): socket.setdefaulttimeout(60) try: @@ -285,7 +285,7 @@ self.assertTrue(u.fp.raw._sock.gettimeout() is None) def test_http_timeout(self): - url = "http://www.python.org" + url = "http://www.example.com" with support.transient_internet(url): u = _urlopen_with_retry(url, timeout=120) self.addCleanup(u.close) diff --git a/Lib/test/test_urllibnet.py b/Lib/test/test_urllibnet.py old mode 100644 new mode 100755 --- a/Lib/test/test_urllibnet.py +++ b/Lib/test/test_urllibnet.py @@ -24,8 +24,8 @@ socket.setdefaulttimeout(None) def testURLread(self): - with support.transient_internet("www.python.org"): - f = urllib.request.urlopen("http://www.python.org/") + with support.transient_internet("www.example.com"): + f = urllib.request.urlopen("http://www.example.com/") x = f.read() @@ -38,7 +38,7 @@ for transparent redirection have been written. setUp is not used for always constructing a connection to - http://www.python.org/ since there a few tests that don't use that address + http://www.example.com/ since there a few tests that don't use that address and making a connection is expensive enough to warrant minimizing unneeded connections. @@ -56,7 +56,7 @@ def test_basic(self): # Simple test expected to pass. - with self.urlopen("http://www.python.org/") as open_url: + with self.urlopen("http://www.example.com/") as open_url: for attr in ("read", "readline", "readlines", "fileno", "close", "info", "geturl"): self.assertTrue(hasattr(open_url, attr), "object returned from " @@ -65,7 +65,7 @@ def test_readlines(self): # Test both readline and readlines. - with self.urlopen("http://www.python.org/") as open_url: + with self.urlopen("http://www.example.com/") as open_url: self.assertIsInstance(open_url.readline(), bytes, "readline did not return a string") self.assertIsInstance(open_url.readlines(), list, @@ -73,7 +73,7 @@ def test_info(self): # Test 'info'. - with self.urlopen("http://www.python.org/") as open_url: + with self.urlopen("http://www.example.com/") as open_url: info_obj = open_url.info() self.assertIsInstance(info_obj, email.message.Message, "object returned by 'info' is not an " @@ -82,14 +82,14 @@ def test_geturl(self): # Make sure same URL as opened is returned by geturl. - URL = "http://www.python.org/" + URL = "http://www.example.com/" with self.urlopen(URL) as open_url: gotten_url = open_url.geturl() self.assertEqual(gotten_url, URL) def test_getcode(self): # test getcode() with the fancy opener to get 404 error codes - URL = "http://www.python.org/XXXinvalidXXX" + URL = "http://www.example.com/XXXinvalidXXX" with support.transient_internet(URL): open_url = urllib.request.FancyURLopener().open(URL) try: @@ -104,7 +104,7 @@ # test can't pass on Windows. return # Make sure fd returned by fileno is valid. - with self.urlopen("http://www.python.org/", timeout=None) as open_url: + with self.urlopen("http://www.example.com/", timeout=None) as open_url: fd = open_url.fileno() with os.fdopen(fd, 'rb') as f: self.assertTrue(f.read(), "reading from file created using fd " @@ -148,7 +148,7 @@ def test_basic(self): # Test basic functionality. - with self.urlretrieve("http://www.python.org/") as (file_location, info): + with self.urlretrieve("http://www.example.com/") as (file_location, info): self.assertTrue(os.path.exists(file_location), "file location returned by" " urlretrieve is not a valid path") with open(file_location, 'rb') as f: @@ -157,7 +157,7 @@ def test_specified_path(self): # Make sure that specifying the location of the file to write to works. - with self.urlretrieve("http://www.python.org/", + with self.urlretrieve("http://www.example.com/", support.TESTFN) as (file_location, info): self.assertEqual(file_location, support.TESTFN) self.assertTrue(os.path.exists(file_location)) @@ -166,12 +166,12 @@ def test_header(self): # Make sure header returned as 2nd value from urlretrieve is good. - with self.urlretrieve("http://www.python.org/") as (file_location, info): + with self.urlretrieve("http://www.example.com/") as (file_location, info): self.assertIsInstance(info, email.message.Message, "info is not an instance of email.message.Message") def test_data_header(self): - logo = "http://www.python.org/static/community_logos/python-logo-master-v3-TM.png" + logo = "http://www.example.com/" with self.urlretrieve(logo) as (file_location, fileheaders): datevalue = fileheaders.get('Date') dateformat = '%a, %d %b %Y %H:%M:%S GMT' diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -76,6 +76,13 @@ - Issue #21323: Fix http.server to again handle scripts in CGI subdirectories, broken by the fix for security issue #19435. Patch by Zach Byrne. +Tests +----- + +- Issue #20939: Avoid various network test failures due to new + redirect of http://www.python.org/ to https://www.python.org: + use http://www.example.com instead. + What's New in Python 3.2.5? =========================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 16:32:10 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 14:32:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E2=29=3A_Fix-up_for_0f3?= =?utf-8?q?62676460d=3A_add_missing_size_argument_to_SSLFakeFile=2Ereadlin?= =?utf-8?b?ZSgpLA==?= Message-ID: <20140930143202.74985.59678@mail.hg.python.org> https://hg.python.org/cpython/rev/4065c4539fcb changeset: 92671:4065c4539fcb branch: 3.2 user: Georg Brandl date: Tue Sep 30 16:31:21 2014 +0200 summary: Fix-up for 0f362676460d: add missing size argument to SSLFakeFile.readline(), as in 2.6 backport 8a6def3add5b files: Lib/smtplib.py | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Lib/smtplib.py b/Lib/smtplib.py old mode 100644 new mode 100755 --- a/Lib/smtplib.py +++ b/Lib/smtplib.py @@ -189,10 +189,14 @@ def __init__(self, sslobj): self.sslobj = sslobj - def readline(self): + def readline(self, size=-1): + if size < 0: + size = None str = b"" chr = None while chr != b"\n": + if size is not None and len(str) > size: + break chr = self.sslobj.read(1) if not chr: break -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 16:41:33 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 14:41:33 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E2=29=3A_Add_a_dummy_?= =?utf-8?q?=22touch=22_target_to_the_Makefile_so_that_the_custom_buildbots?= =?utf-8?q?_can?= Message-ID: <20140930144117.74999.65120@mail.hg.python.org> https://hg.python.org/cpython/rev/89b2d5d0f23d changeset: 92672:89b2d5d0f23d branch: 3.2 user: Georg Brandl date: Tue Sep 30 16:41:11 2014 +0200 summary: Add a dummy "touch" target to the Makefile so that the custom buildbots can test this branch. files: Makefile.pre.in | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1371,6 +1371,9 @@ Python/thread.o: @THREADHEADERS@ +# A dummy target so that we are "buildbot step compatible" with newer versions +touch: + # Declare targets that aren't real files .PHONY: all build_all sharedmods oldsharedmods test quicktest memtest .PHONY: install altinstall oldsharedinstall bininstall altbininstall @@ -1378,7 +1381,7 @@ .PHONY: frameworkinstall frameworkinstallframework frameworkinstallstructure .PHONY: frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools .PHONY: frameworkaltinstallunixtools recheck autoconf clean clobber distclean -.PHONY: smelly funny patchcheck altmaninstall +.PHONY: smelly funny patchcheck altmaninstall touch .PHONY: gdbhooks # IF YOU PUT ANYTHING HERE IT WILL GO AWAY -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 17:00:58 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 15:00:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_remove_merge_goop_from_f86?= =?utf-8?q?fde20e9ce?= Message-ID: <20140930150049.23219.69923@mail.hg.python.org> https://hg.python.org/cpython/rev/8145f25f26aa changeset: 92673:8145f25f26aa parent: 92664:94af1af93670 user: Benjamin Peterson date: Tue Sep 30 11:00:46 2014 -0400 summary: remove merge goop from f86fde20e9ce files: Misc/NEWS | 65 ------------------------------------------- 1 files changed, 0 insertions(+), 65 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -35,71 +35,6 @@ - Issue #22258: Fix the internal function set_inheritable() on Illumos. -Library -------- - -- Issue #22396: On 32-bit AIX platform, don't expose os.posix_fadvise() nor - os.posix_fallocate() because their prototypes in system headers are wrong. - -- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its - weakrefs. - -- Issue #22448: Improve canceled timer handles cleanup to prevent - unbound memory usage. Patch by Joshua Moore-Oliva. - -Build ------ - -- Issue #16537: Check whether self.extensions is empty in setup.py. Patch by - Jonathan Hosmer. - - -What's New in Python 3.4.2? -=========================== - -Release date: 2014-10-06 - -Core and Builtins ------------------ - -Library -------- - -- Issue #10510: distutils register and upload methods now use HTML standards - compliant CRLF line endings. - -- Issue #9850: Fixed macpath.join() for empty first component. Patch by - Oleg Oshmyan. - -- Issue #22427: TemporaryDirectory no longer attempts to clean up twice when - used in the with statement in generator. - -- Issue #20912: Now directories added to ZIP file have correct Unix and MS-DOS - directory attributes. - -- Issue #21866: ZipFile.close() no longer writes ZIP64 central directory - records if allowZip64 is false. - -- Issue #22415: Fixed debugging output of the GROUPREF_EXISTS opcode in the re - module. Removed trailing spaces in debugging output. - -- Issue #22423: Unhandled exception in thread no longer causes unhandled - AttributeError when sys.stderr is None. - -- Issue #21332: Ensure that ``bufsize=1`` in subprocess.Popen() selects - line buffering, rather than block buffering. Patch by Akira Li. - - -What's New in Python 3.4.2rc1? -============================== - -Release date: 2014-09-22 - -Core and Builtins ------------------ - -- Issue #22258: Fix the the internal function set_inheritable() on Illumos. ->>>>>>> other This platform exposes the function ``ioctl(FIOCLEX)``, but calling it fails with errno is ENOTTY: "Inappropriate ioctl for device". set_inheritable() now falls back to the slower ``fcntl()`` (``F_GETFD`` and then ``F_SETFD``). -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 17:30:36 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 15:30:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E2=29=3A_Backport_b533c?= =?utf-8?q?c11d114_to_fix_intermittent_test=5Furllibnet_failures=2E?= Message-ID: <20140930153023.75001.16910@mail.hg.python.org> https://hg.python.org/cpython/rev/2802f5cd1384 changeset: 92674:2802f5cd1384 branch: 3.2 parent: 92672:89b2d5d0f23d user: Georg Brandl date: Tue Sep 30 17:30:18 2014 +0200 summary: Backport b533cc11d114 to fix intermittent test_urllibnet failures. files: Lib/test/test_urllibnet.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_urllibnet.py b/Lib/test/test_urllibnet.py --- a/Lib/test/test_urllibnet.py +++ b/Lib/test/test_urllibnet.py @@ -104,7 +104,7 @@ # test can't pass on Windows. return # Make sure fd returned by fileno is valid. - with self.urlopen("http://www.example.com/", timeout=None) as open_url: + with self.urlopen("http://www.google.com/", timeout=None) as open_url: fd = open_url.fileno() with os.fdopen(fd, 'rb') as f: self.assertTrue(f.read(), "reading from file created using fd " -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 17:49:30 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 15:49:30 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Add_new_security_releases=2E?= Message-ID: <20140930154921.86171.15298@mail.hg.python.org> https://hg.python.org/peps/rev/c63dce652248 changeset: 5567:c63dce652248 user: Georg Brandl date: Tue Sep 30 17:49:17 2014 +0200 summary: Add new security releases. files: pep-0392.txt | 6 ++++++ pep-0398.txt | 6 ++++++ 2 files changed, 12 insertions(+), 0 deletions(-) diff --git a/pep-0392.txt b/pep-0392.txt --- a/pep-0392.txt +++ b/pep-0392.txt @@ -97,6 +97,12 @@ -- Only security releases after 3.2.5 -- +3.2.6 schedule +-------------- + +- 3.2.6 candidate 1 (source-only release): October 4, 2014 +- 3.2.6 final (source-only release): October 11, 2014 + Features for 3.2 ================ diff --git a/pep-0398.txt b/pep-0398.txt --- a/pep-0398.txt +++ b/pep-0398.txt @@ -101,6 +101,12 @@ - 3.3.5 candidate 2: March 1, 2014 - 3.3.5 final: March 8, 2014 +3.3.6 schedule +-------------- + +- 3.3.6 candidate 1 (source-only release): October 4, 2014 +- 3.3.6 final (source-only release): October 11, 2014 + Features for 3.3 ================ -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 18:12:01 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 16:12:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDQ4?= =?utf-8?q?=3A_asyncio=2C_cleanup_=5Frun=5Fonce=28=29=2C_only_iterate_once?= =?utf-8?q?_to_remove_delayed?= Message-ID: <20140930161143.59794.84491@mail.hg.python.org> https://hg.python.org/cpython/rev/b85ed8bb7523 changeset: 92675:b85ed8bb7523 branch: 3.4 parent: 92656:eb2bf1c2f654 user: Victor Stinner date: Tue Sep 30 18:08:36 2014 +0200 summary: Issue #22448: asyncio, cleanup _run_once(), only iterate once to remove delayed calls that were cancelled. files: Lib/asyncio/base_events.py | 11 +++++++---- 1 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -994,19 +994,22 @@ 'call_later' callbacks. """ - # Remove delayed calls that were cancelled if their number is too high sched_count = len(self._scheduled) if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and self._timer_cancelled_count / sched_count > _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] for handle in self._scheduled: if handle._cancelled: handle._scheduled = False + else: + new_scheduled.append(handle) - self._scheduled = [x for x in self._scheduled if not x._cancelled] + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled self._timer_cancelled_count = 0 - - heapq.heapify(self._scheduled) else: # Remove delayed calls that were cancelled from head of queue. while self._scheduled and self._scheduled[0]._cancelled: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 18:12:01 2014 From: python-checkins at python.org (victor.stinner) Date: Tue, 30 Sep 2014 16:12:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E4=29_Issue_=2322448=3A_asyncio=2C_cleanup_?= =?utf-8?q?=5Frun=5Fonce=28=29=2C_only_iterate_once_to?= Message-ID: <20140930161143.74985.32584@mail.hg.python.org> https://hg.python.org/cpython/rev/8e9df3414185 changeset: 92676:8e9df3414185 parent: 92673:8145f25f26aa parent: 92675:b85ed8bb7523 user: Victor Stinner date: Tue Sep 30 18:11:00 2014 +0200 summary: (Merge 3.4) Issue #22448: asyncio, cleanup _run_once(), only iterate once to remove delayed calls that were cancelled. files: Lib/asyncio/base_events.py | 11 +++++++---- 1 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -994,19 +994,22 @@ 'call_later' callbacks. """ - # Remove delayed calls that were cancelled if their number is too high sched_count = len(self._scheduled) if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and self._timer_cancelled_count / sched_count > _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] for handle in self._scheduled: if handle._cancelled: handle._scheduled = False + else: + new_scheduled.append(handle) - self._scheduled = [x for x in self._scheduled if not x._cancelled] + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled self._timer_cancelled_count = 0 - - heapq.heapify(self._scheduled) else: # Remove delayed calls that were cancelled from head of queue. while self._scheduled and self._scheduled[0]._cancelled: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 19:35:00 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 17:35:00 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE5ODU1?= =?utf-8?q?=3A_uuid=2Egetnode=28=29_on_Unix_now_looks_on_the_PATH_for_the?= Message-ID: <20140930173446.74985.75515@mail.hg.python.org> https://hg.python.org/cpython/rev/f9cd915410d2 changeset: 92677:f9cd915410d2 branch: 3.2 parent: 92674:2802f5cd1384 user: Georg Brandl date: Tue Sep 30 19:34:19 2014 +0200 summary: Issue #19855: uuid.getnode() on Unix now looks on the PATH for the executables used to find the mac address, with /sbin and /usr/sbin as fallbacks. Issue #11508: Fixed uuid.getnode() and uuid.uuid1() on environment with virtual interface. Original patch by Kent Frazier. Issue #18784: The uuid module no more attempts to load libc via ctypes.CDLL, if all necessary functions are already found in libuuid. Patch by Evgeny Sologubov. Issue #16102: Make uuid._netbios_getnode() work again on Python 3. files: Lib/test/test_uuid.py | 21 +++++++++++++ Lib/uuid.py | 47 ++++++++++++++++++++---------- Misc/ACKS | 2 + Misc/NEWS | 13 ++++++++ 4 files changed, 67 insertions(+), 16 deletions(-) diff --git a/Lib/test/test_uuid.py b/Lib/test/test_uuid.py --- a/Lib/test/test_uuid.py +++ b/Lib/test/test_uuid.py @@ -1,6 +1,8 @@ from unittest import TestCase from test import support import builtins +import io +import os import uuid def importable(name): @@ -360,6 +362,25 @@ self.assertEqual(node1, node2) + def test_find_mac(self): + data = '''\ + +fake hwaddr +cscotun0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 +eth0 Link encap:Ethernet HWaddr 12:34:56:78:90:ab +''' + def mock_popen(cmd): + return io.StringIO(data) + + with support.swap_attr(os, 'popen', mock_popen): + mac = uuid._find_mac( + command='ifconfig', + args='', + hw_identifiers=['hwaddr'], + get_index=lambda x: x + 1, + ) + self.assertEqual(mac, 0x1234567890ab) + def test_uuid1(self): # uuid1 requires ctypes. try: diff --git a/Lib/uuid.py b/Lib/uuid.py --- a/Lib/uuid.py +++ b/Lib/uuid.py @@ -313,25 +313,38 @@ def _find_mac(command, args, hw_identifiers, get_index): import os - for dir in ['', '/sbin/', '/usr/sbin']: + path = os.environ.get("PATH", os.defpath).split(os.pathsep) + path.extend(('/sbin', '/usr/sbin')) + for dir in path: executable = os.path.join(dir, command) - if not os.path.exists(executable): - continue + if (os.path.exists(executable) and + os.access(executable, os.F_OK | os.X_OK) and + not os.path.isdir(executable)): + break + else: + return None - try: - # LC_ALL to get English output, 2>/dev/null to - # prevent output on stderr - cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args) - with os.popen(cmd) as pipe: - for line in pipe: - words = line.lower().split() - for i in range(len(words)): - if words[i] in hw_identifiers: + try: + # LC_ALL to ensure English output, 2>/dev/null to + # prevent output on stderr + cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args) + with os.popen(cmd) as pipe: + for line in pipe: + words = line.lower().split() + for i in range(len(words)): + if words[i] in hw_identifiers: + try: return int( words[get_index(i)].replace(':', ''), 16) - except IOError: - continue - return None + except (ValueError, IndexError): + # Virtual interfaces, such as those provided by + # VPNs, do not have a colon-delimited MAC address + # as expected, but a 16-byte HWAddr separated by + # dashes. These should be ignored in favor of a + # real MAC address + pass + except IOError: + pass def _ifconfig_getnode(): """Get the hardware address on Unix by running ifconfig.""" @@ -406,7 +419,7 @@ if win32wnet.Netbios(ncb) != 0: continue status._unpack() - bytes = map(ord, status.adapter_address) + bytes = status.adapter_address return ((bytes[0]<<40) + (bytes[1]<<32) + (bytes[2]<<24) + (bytes[3]<<16) + (bytes[4]<<8) + bytes[5]) @@ -429,6 +442,8 @@ _uuid_generate_random = lib.uuid_generate_random if hasattr(lib, 'uuid_generate_time'): _uuid_generate_time = lib.uuid_generate_time + if _uuid_generate_random is not None: + break # found everything we were looking for # The uuid_generate_* functions are broken on MacOS X 10.5, as noted # in issue #8621 the function generates the same sequence of values diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -353,6 +353,7 @@ John Fouhy Stefan Franke Martin Franklin +Kent Frazier Robin Friedrich Bradley Froehle Ivan Frohne @@ -1028,6 +1029,7 @@ Rafal Smotrzyk Dirk Soede Paul Sokolovsky +Evgeny Sologubov Cody Somerville Clay Spence Stefan Sperling diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -47,6 +47,19 @@ with non-standard cookie handling in some Web browsers. Reported by Sergey Bobrov. +- Issue #19855: uuid.getnode() on Unix now looks on the PATH for the + executables used to find the mac address, with /sbin and /usr/sbin as + fallbacks. + +- Issue #11508: Fixed uuid.getnode() and uuid.uuid1() on environment with + virtual interface. Original patch by Kent Frazier. + +- Issue #18784: The uuid module no more attempts to load libc via ctypes.CDLL, + if all necessary functions are already found in libuuid. + Patch by Evgeny Sologubov. + +- Issue #16102: Make uuid._netbios_getnode() work again on Python 3. + - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 21:17:11 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 30 Sep 2014 19:17:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2318711=3A_Add_a_ne?= =?utf-8?q?w_=60PyErr=5FFormatV=60_function=2C_similar_to_=60PyErr=5FForma?= =?utf-8?q?t=60_but?= Message-ID: <20140930191633.86183.96415@mail.hg.python.org> https://hg.python.org/cpython/rev/5629481cd26d changeset: 92678:5629481cd26d parent: 92676:8e9df3414185 user: Antoine Pitrou date: Tue Sep 30 21:16:27 2014 +0200 summary: Issue #18711: Add a new `PyErr_FormatV` function, similar to `PyErr_Format` but accepting a `va_list` argument. files: Doc/c-api/exceptions.rst | 8 ++++++++ Doc/data/refcounts.dat | 5 +++++ Include/pyerrors.h | 6 ++++++ Misc/NEWS | 3 +++ PC/python3.def | 1 + PC/python35stub.def | 1 + Python/errors.c | 25 +++++++++++++++---------- 7 files changed, 39 insertions(+), 10 deletions(-) diff --git a/Doc/c-api/exceptions.rst b/Doc/c-api/exceptions.rst --- a/Doc/c-api/exceptions.rst +++ b/Doc/c-api/exceptions.rst @@ -197,6 +197,14 @@ string. +.. c:function:: PyObject* PyErr_FormatV(PyObject *exception, const char *format, va_list vargs) + + Same as :c:func:`PyErr_Format`, but taking a `va_list` argument rather + than a variable number of arguments. + + .. versionadded:: 3.5 + + .. c:function:: void PyErr_SetNone(PyObject *type) This is a shorthand for ``PyErr_SetObject(type, Py_None)``. diff --git a/Doc/data/refcounts.dat b/Doc/data/refcounts.dat --- a/Doc/data/refcounts.dat +++ b/Doc/data/refcounts.dat @@ -349,6 +349,11 @@ PyErr_Format:const char*:format:: PyErr_Format::...:: +PyErr_FormatV:PyObject*::null: +PyErr_FormatV:PyObject*:exception:+1: +PyErr_FormatV:const char*:format:: +PyErr_FormatV:va_list:vargs:: + PyErr_WarnEx:int::: PyErr_WarnEx:PyObject*:category:0: PyErr_WarnEx:const char*:message:: diff --git a/Include/pyerrors.h b/Include/pyerrors.h --- a/Include/pyerrors.h +++ b/Include/pyerrors.h @@ -242,6 +242,12 @@ const char *format, /* ASCII-encoded string */ ... ); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000 +PyAPI_FUNC(PyObject *) PyErr_FormatV( + PyObject *exception, + const char *format, + va_list vargs); +#endif #ifdef MS_WINDOWS PyAPI_FUNC(PyObject *) PyErr_SetFromWindowsErrWithFilename( diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #18711: Add a new `PyErr_FormatV` function, similar to `PyErr_Format` + but accepting a `va_list` argument. + - Issue #22520: Fix overflow checking when generating the repr of a unicode object. diff --git a/PC/python3.def b/PC/python3.def --- a/PC/python3.def +++ b/PC/python3.def @@ -120,6 +120,7 @@ PyErr_ExceptionMatches=python35.PyErr_ExceptionMatches PyErr_Fetch=python35.PyErr_Fetch PyErr_Format=python35.PyErr_Format + PyErr_FormatV=python35.PyErr_FormatV PyErr_GivenExceptionMatches=python35.PyErr_GivenExceptionMatches PyErr_NewException=python35.PyErr_NewException PyErr_NewExceptionWithDoc=python35.PyErr_NewExceptionWithDoc diff --git a/PC/python35stub.def b/PC/python35stub.def --- a/PC/python35stub.def +++ b/PC/python35stub.def @@ -119,6 +119,7 @@ PyErr_ExceptionMatches PyErr_Fetch PyErr_Format +PyErr_FormatV PyErr_GivenExceptionMatches PyErr_NewException PyErr_NewExceptionWithDoc diff --git a/Python/errors.c b/Python/errors.c --- a/Python/errors.c +++ b/Python/errors.c @@ -749,19 +749,11 @@ #define PyErr_BadInternalCall() _PyErr_BadInternalCall(__FILE__, __LINE__) - PyObject * -PyErr_Format(PyObject *exception, const char *format, ...) +PyErr_FormatV(PyObject *exception, const char *format, va_list vargs) { - va_list vargs; PyObject* string; -#ifdef HAVE_STDARG_PROTOTYPES - va_start(vargs, format); -#else - va_start(vargs); -#endif - #ifdef Py_DEBUG /* in debug mode, PyEval_EvalFrameEx() fails with an assertion error if an exception is set when it is called */ @@ -771,12 +763,25 @@ string = PyUnicode_FromFormatV(format, vargs); PyErr_SetObject(exception, string); Py_XDECREF(string); + return NULL; +} + + +PyObject * +PyErr_Format(PyObject *exception, const char *format, ...) +{ + va_list vargs; +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, format); +#else + va_start(vargs); +#endif + PyErr_FormatV(exception, format, vargs); va_end(vargs); return NULL; } - PyObject * PyErr_NewException(const char *name, PyObject *base, PyObject *dict) { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 21:56:22 2014 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 30 Sep 2014 19:56:22 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Reorganize_C_API_docs_of_t?= =?utf-8?q?he_exception_API?= Message-ID: <20140930195615.75005.19161@mail.hg.python.org> https://hg.python.org/cpython/rev/0c126a29d1f9 changeset: 92679:0c126a29d1f9 user: Antoine Pitrou date: Tue Sep 30 21:56:10 2014 +0200 summary: Reorganize C API docs of the exception API files: Doc/c-api/exceptions.rst | 654 ++++++++++++++------------ 1 files changed, 347 insertions(+), 307 deletions(-) diff --git a/Doc/c-api/exceptions.rst b/Doc/c-api/exceptions.rst --- a/Doc/c-api/exceptions.rst +++ b/Doc/c-api/exceptions.rst @@ -9,13 +9,19 @@ The functions described in this chapter will let you handle and raise Python exceptions. It is important to understand some of the basics of Python -exception handling. It works somewhat like the Unix :c:data:`errno` variable: +exception handling. It works somewhat like the POSIX :c:data:`errno` variable: there is a global indicator (per thread) of the last error that occurred. Most -functions don't clear this on success, but will set it to indicate the cause of -the error on failure. Most functions also return an error indicator, usually -*NULL* if they are supposed to return a pointer, or ``-1`` if they return an -integer (exception: the :c:func:`PyArg_\*` functions return ``1`` for success and -``0`` for failure). +C API functions don't clear this on success, but will set it to indicate the +cause of the error on failure. Most C API functions also return an error +indicator, usually *NULL* if they are supposed to return a pointer, or ``-1`` +if they return an integer (exception: the :c:func:`PyArg_\*` functions +return ``1`` for success and ``0`` for failure). + +Concretely, the error indicator consists of three object pointers: the +exception's type, the exception's value, and the traceback object. Any +of those pointers can be NULL if non-set (although some combinations are +forbidden, for example you can't have a non-NULL traceback if the exception +type is NULL). When a function must fail because some function it called failed, it generally doesn't set the error indicator; the function it called already set it. It is @@ -27,12 +33,21 @@ propagated, additional calls into the Python/C API may not behave as intended and may fail in mysterious ways. -The error indicator consists of three Python objects corresponding to the result -of ``sys.exc_info()``. API functions exist to interact with the error indicator -in various ways. There is a separate error indicator for each thread. +.. note:: + The error indicator is **not** the result of :func:`sys.exc_info()`. + The former corresponds to an exception that is not yet caught (and is + therefore still propagating), while the latter returns an exception after + it is caught (and has therefore stopped propagating). -.. XXX Order of these should be more thoughtful. - Either alphabetical or some kind of structure. + +Printing and clearing +===================== + + +.. c:function:: void PyErr_Clear() + + Clear the error indicator. If the error indicator is not set, there is no + effect. .. c:function:: void PyErr_PrintEx(int set_sys_last_vars) @@ -51,6 +66,277 @@ Alias for ``PyErr_PrintEx(1)``. +.. c:function:: void PyErr_WriteUnraisable(PyObject *obj) + + This utility function prints a warning message to ``sys.stderr`` when an + exception has been set but it is impossible for the interpreter to actually + raise the exception. It is used, for example, when an exception occurs in an + :meth:`__del__` method. + + The function is called with a single argument *obj* that identifies the context + in which the unraisable exception occurred. The repr of *obj* will be printed in + the warning message. + + +Raising exceptions +================== + +These functions help you set the current thread's error indicator. +For convenience, some of these functions will always return a +NULL pointer for use in a ``return`` statement. + + +.. c:function:: void PyErr_SetString(PyObject *type, const char *message) + + This is the most common way to set the error indicator. The first argument + specifies the exception type; it is normally one of the standard exceptions, + e.g. :c:data:`PyExc_RuntimeError`. You need not increment its reference count. + The second argument is an error message; it is decoded from ``'utf-8``'. + + +.. c:function:: void PyErr_SetObject(PyObject *type, PyObject *value) + + This function is similar to :c:func:`PyErr_SetString` but lets you specify an + arbitrary Python object for the "value" of the exception. + + +.. c:function:: PyObject* PyErr_Format(PyObject *exception, const char *format, ...) + + This function sets the error indicator and returns *NULL*. *exception* + should be a Python exception class. The *format* and subsequent + parameters help format the error message; they have the same meaning and + values as in :c:func:`PyUnicode_FromFormat`. *format* is an ASCII-encoded + string. + + +.. c:function:: PyObject* PyErr_FormatV(PyObject *exception, const char *format, va_list vargs) + + Same as :c:func:`PyErr_Format`, but taking a `va_list` argument rather + than a variable number of arguments. + + .. versionadded:: 3.5 + + +.. c:function:: void PyErr_SetNone(PyObject *type) + + This is a shorthand for ``PyErr_SetObject(type, Py_None)``. + + +.. c:function:: int PyErr_BadArgument() + + This is a shorthand for ``PyErr_SetString(PyExc_TypeError, message)``, where + *message* indicates that a built-in operation was invoked with an illegal + argument. It is mostly for internal use. + + +.. c:function:: PyObject* PyErr_NoMemory() + + This is a shorthand for ``PyErr_SetNone(PyExc_MemoryError)``; it returns *NULL* + so an object allocation function can write ``return PyErr_NoMemory();`` when it + runs out of memory. + + +.. c:function:: PyObject* PyErr_SetFromErrno(PyObject *type) + + .. index:: single: strerror() + + This is a convenience function to raise an exception when a C library function + has returned an error and set the C variable :c:data:`errno`. It constructs a + tuple object whose first item is the integer :c:data:`errno` value and whose + second item is the corresponding error message (gotten from :c:func:`strerror`), + and then calls ``PyErr_SetObject(type, object)``. On Unix, when the + :c:data:`errno` value is :const:`EINTR`, indicating an interrupted system call, + this calls :c:func:`PyErr_CheckSignals`, and if that set the error indicator, + leaves it set to that. The function always returns *NULL*, so a wrapper + function around a system call can write ``return PyErr_SetFromErrno(type);`` + when the system call returns an error. + + +.. c:function:: PyObject* PyErr_SetFromErrnoWithFilenameObject(PyObject *type, PyObject *filenameObject) + + Similar to :c:func:`PyErr_SetFromErrno`, with the additional behavior that if + *filenameObject* is not *NULL*, it is passed to the constructor of *type* as + a third parameter. In the case of :exc:`OSError` exception, + this is used to define the :attr:`filename` attribute of the + exception instance. + + +.. c:function:: PyObject* PyErr_SetFromErrnoWithFilenameObjects(PyObject *type, PyObject *filenameObject, PyObject *filenameObject2) + + Similar to :c:func:`PyErr_SetFromErrnoWithFilenameObject`, but takes a second + filename object, for raising errors when a function that takes two filenames + fails. + + .. versionadded:: 3.4 + + +.. c:function:: PyObject* PyErr_SetFromErrnoWithFilename(PyObject *type, const char *filename) + + Similar to :c:func:`PyErr_SetFromErrnoWithFilenameObject`, but the filename + is given as a C string. *filename* is decoded from the filesystem encoding + (:func:`os.fsdecode`). + + +.. c:function:: PyObject* PyErr_SetFromWindowsErr(int ierr) + + This is a convenience function to raise :exc:`WindowsError`. If called with + *ierr* of :c:data:`0`, the error code returned by a call to :c:func:`GetLastError` + is used instead. It calls the Win32 function :c:func:`FormatMessage` to retrieve + the Windows description of error code given by *ierr* or :c:func:`GetLastError`, + then it constructs a tuple object whose first item is the *ierr* value and whose + second item is the corresponding error message (gotten from + :c:func:`FormatMessage`), and then calls ``PyErr_SetObject(PyExc_WindowsError, + object)``. This function always returns *NULL*. Availability: Windows. + + +.. c:function:: PyObject* PyErr_SetExcFromWindowsErr(PyObject *type, int ierr) + + Similar to :c:func:`PyErr_SetFromWindowsErr`, with an additional parameter + specifying the exception type to be raised. Availability: Windows. + + +.. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, const char *filename) + + Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, but the + filename is given as a C string. *filename* is decoded from the filesystem + encoding (:func:`os.fsdecode`). Availability: Windows. + + +.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObject(PyObject *type, int ierr, PyObject *filename) + + Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, with an + additional parameter specifying the exception type to be raised. + Availability: Windows. + + +.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObjects(PyObject *type, int ierr, PyObject *filename, PyObject *filename2) + + Similar to :c:func:`PyErr_SetExcFromWindowsErrWithFilenameObject`, + but accepts a second filename object. + Availability: Windows. + + .. versionadded:: 3.4 + + +.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilename(PyObject *type, int ierr, const char *filename) + + Similar to :c:func:`PyErr_SetFromWindowsErrWithFilename`, with an additional + parameter specifying the exception type to be raised. Availability: Windows. + + +.. c:function:: PyObject* PyErr_SetImportError(PyObject *msg, PyObject *name, PyObject *path) + + This is a convenience function to raise :exc:`ImportError`. *msg* will be + set as the exception's message string. *name* and *path*, both of which can + be ``NULL``, will be set as the :exc:`ImportError`'s respective ``name`` + and ``path`` attributes. + + .. versionadded:: 3.3 + + +.. c:function:: void PyErr_SyntaxLocationObject(PyObject *filename, int lineno, int col_offset) + + Set file, line, and offset information for the current exception. If the + current exception is not a :exc:`SyntaxError`, then it sets additional + attributes, which make the exception printing subsystem think the exception + is a :exc:`SyntaxError`. + + .. versionadded:: 3.4 + + +.. c:function:: void PyErr_SyntaxLocationEx(char *filename, int lineno, int col_offset) + + Like :c:func:`PyErr_SyntaxLocationObject`, but *filename* is a byte string + decoded from the filesystem encoding (:func:`os.fsdecode`). + + .. versionadded:: 3.2 + + +.. c:function:: void PyErr_SyntaxLocation(char *filename, int lineno) + + Like :c:func:`PyErr_SyntaxLocationEx`, but the col_offset parameter is + omitted. + + +.. c:function:: void PyErr_BadInternalCall() + + This is a shorthand for ``PyErr_SetString(PyExc_SystemError, message)``, + where *message* indicates that an internal operation (e.g. a Python/C API + function) was invoked with an illegal argument. It is mostly for internal + use. + + +Issuing warnings +================ + +Use these functions to issue warnings from C code. They mirror similar +functions exported by the Python :mod:`warnings` module. They normally +print a warning message to *sys.stderr*; however, it is +also possible that the user has specified that warnings are to be turned into +errors, and in that case they will raise an exception. It is also possible that +the functions raise an exception because of a problem with the warning machinery. +The return value is ``0`` if no exception is raised, or ``-1`` if an exception +is raised. (It is not possible to determine whether a warning message is +actually printed, nor what the reason is for the exception; this is +intentional.) If an exception is raised, the caller should do its normal +exception handling (for example, :c:func:`Py_DECREF` owned references and return +an error value). + +.. c:function:: int PyErr_WarnEx(PyObject *category, char *message, int stack_level) + + Issue a warning message. The *category* argument is a warning category (see + below) or *NULL*; the *message* argument is an UTF-8 encoded string. *stack_level* is a + positive number giving a number of stack frames; the warning will be issued from + the currently executing line of code in that stack frame. A *stack_level* of 1 + is the function calling :c:func:`PyErr_WarnEx`, 2 is the function above that, + and so forth. + + Warning categories must be subclasses of :c:data:`Warning`; the default warning + category is :c:data:`RuntimeWarning`. The standard Python warning categories are + available as global variables whose names are ``PyExc_`` followed by the Python + exception name. These have the type :c:type:`PyObject\*`; they are all class + objects. Their names are :c:data:`PyExc_Warning`, :c:data:`PyExc_UserWarning`, + :c:data:`PyExc_UnicodeWarning`, :c:data:`PyExc_DeprecationWarning`, + :c:data:`PyExc_SyntaxWarning`, :c:data:`PyExc_RuntimeWarning`, and + :c:data:`PyExc_FutureWarning`. :c:data:`PyExc_Warning` is a subclass of + :c:data:`PyExc_Exception`; the other warning categories are subclasses of + :c:data:`PyExc_Warning`. + + For information about warning control, see the documentation for the + :mod:`warnings` module and the :option:`-W` option in the command line + documentation. There is no C API for warning control. + + +.. c:function:: int PyErr_WarnExplicitObject(PyObject *category, PyObject *message, PyObject *filename, int lineno, PyObject *module, PyObject *registry) + + Issue a warning message with explicit control over all warning attributes. This + is a straightforward wrapper around the Python function + :func:`warnings.warn_explicit`, see there for more information. The *module* + and *registry* arguments may be set to *NULL* to get the default effect + described there. + + .. versionadded:: 3.4 + + +.. c:function:: int PyErr_WarnExplicit(PyObject *category, const char *message, const char *filename, int lineno, const char *module, PyObject *registry) + + Similar to :c:func:`PyErr_WarnExplicitObject` except that *message* and + *module* are UTF-8 encoded strings, and *filename* is decoded from the + filesystem encoding (:func:`os.fsdecode`). + + +.. c:function:: int PyErr_WarnFormat(PyObject *category, Py_ssize_t stack_level, const char *format, ...) + + Function similar to :c:func:`PyErr_WarnEx`, but use + :c:func:`PyUnicode_FromFormat` to format the warning message. *format* is + an ASCII-encoded string. + + .. versionadded:: 3.2 + + +Querying the error indicator +============================ + .. c:function:: PyObject* PyErr_Occurred() Test whether the error indicator is set. If set, return the exception *type* @@ -76,12 +362,53 @@ .. c:function:: int PyErr_GivenExceptionMatches(PyObject *given, PyObject *exc) - Return true if the *given* exception matches the exception in *exc*. If + Return true if the *given* exception matches the exception type in *exc*. If *exc* is a class object, this also returns true when *given* is an instance - of a subclass. If *exc* is a tuple, all exceptions in the tuple (and + of a subclass. If *exc* is a tuple, all exception types in the tuple (and recursively in subtuples) are searched for a match. +.. c:function:: void PyErr_Fetch(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback) + + Retrieve the error indicator into three variables whose addresses are passed. + If the error indicator is not set, set all three variables to *NULL*. If it is + set, it will be cleared and you own a reference to each object retrieved. The + value and traceback object may be *NULL* even when the type object is not. + + .. note:: + + This function is normally only used by code that needs to catch exceptions or + by code that needs to save and restore the error indicator temporarily, e.g.:: + + { + PyObject **type, **value, **traceback; + PyErr_Fetch(&type, &value, &traceback); + + /* ... code that might produce other errors ... */ + + PyErr_Restore(type, value, traceback); + } + + +.. c:function:: void PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback) + + Set the error indicator from the three objects. If the error indicator is + already set, it is cleared first. If the objects are *NULL*, the error + indicator is cleared. Do not pass a *NULL* type and non-*NULL* value or + traceback. The exception type should be a class. Do not pass an invalid + exception type or value. (Violating these rules will cause subtle problems + later.) This call takes away a reference to each object: you must own a + reference to each object before the call and after the call you no longer own + these references. (If you don't understand this, don't use this function. I + warned you.) + + .. note:: + + This function is normally only used by code that needs to save and restore the + error indicator temporarily. Use :c:func:`PyErr_Fetch` to save the current + error indicator. + + .. c:function:: void PyErr_NormalizeException(PyObject**exc, PyObject**val, PyObject**tb) Under certain circumstances, the values returned by :c:func:`PyErr_Fetch` below @@ -101,48 +428,10 @@ } -.. c:function:: void PyErr_Clear() - - Clear the error indicator. If the error indicator is not set, there is no - effect. - - -.. c:function:: void PyErr_Fetch(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback) - - Retrieve the error indicator into three variables whose addresses are passed. - If the error indicator is not set, set all three variables to *NULL*. If it is - set, it will be cleared and you own a reference to each object retrieved. The - value and traceback object may be *NULL* even when the type object is not. - - .. note:: - - This function is normally only used by code that needs to handle exceptions or - by code that needs to save and restore the error indicator temporarily. - - -.. c:function:: void PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback) - - Set the error indicator from the three objects. If the error indicator is - already set, it is cleared first. If the objects are *NULL*, the error - indicator is cleared. Do not pass a *NULL* type and non-*NULL* value or - traceback. The exception type should be a class. Do not pass an invalid - exception type or value. (Violating these rules will cause subtle problems - later.) This call takes away a reference to each object: you must own a - reference to each object before the call and after the call you no longer own - these references. (If you don't understand this, don't use this function. I - warned you.) - - .. note:: - - This function is normally only used by code that needs to save and restore the - error indicator temporarily; use :c:func:`PyErr_Fetch` to save the current - exception state. - - .. c:function:: void PyErr_GetExcInfo(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback) Retrieve the exception info, as known from ``sys.exc_info()``. This refers - to an exception that was already caught, not to an exception that was + to an exception that was *already caught*, not to an exception that was freshly raised. Returns new references for the three objects, any of which may be *NULL*. Does not modify the exception info state. @@ -159,7 +448,7 @@ .. c:function:: void PyErr_SetExcInfo(PyObject *type, PyObject *value, PyObject *traceback) Set the exception info, as known from ``sys.exc_info()``. This refers - to an exception that was already caught, not to an exception that was + to an exception that was *already caught*, not to an exception that was freshly raised. This function steals the references of the arguments. To clear the exception state, pass *NULL* for all three arguments. For general rules about the three arguments, see :c:func:`PyErr_Restore`. @@ -174,248 +463,8 @@ .. versionadded:: 3.3 -.. c:function:: void PyErr_SetString(PyObject *type, const char *message) - - This is the most common way to set the error indicator. The first argument - specifies the exception type; it is normally one of the standard exceptions, - e.g. :c:data:`PyExc_RuntimeError`. You need not increment its reference count. - The second argument is an error message; it is decoded from ``'utf-8``'. - - -.. c:function:: void PyErr_SetObject(PyObject *type, PyObject *value) - - This function is similar to :c:func:`PyErr_SetString` but lets you specify an - arbitrary Python object for the "value" of the exception. - - -.. c:function:: PyObject* PyErr_Format(PyObject *exception, const char *format, ...) - - This function sets the error indicator and returns *NULL*. *exception* - should be a Python exception class. The *format* and subsequent - parameters help format the error message; they have the same meaning and - values as in :c:func:`PyUnicode_FromFormat`. *format* is an ASCII-encoded - string. - - -.. c:function:: PyObject* PyErr_FormatV(PyObject *exception, const char *format, va_list vargs) - - Same as :c:func:`PyErr_Format`, but taking a `va_list` argument rather - than a variable number of arguments. - - .. versionadded:: 3.5 - - -.. c:function:: void PyErr_SetNone(PyObject *type) - - This is a shorthand for ``PyErr_SetObject(type, Py_None)``. - - -.. c:function:: int PyErr_BadArgument() - - This is a shorthand for ``PyErr_SetString(PyExc_TypeError, message)``, where - *message* indicates that a built-in operation was invoked with an illegal - argument. It is mostly for internal use. - - -.. c:function:: PyObject* PyErr_NoMemory() - - This is a shorthand for ``PyErr_SetNone(PyExc_MemoryError)``; it returns *NULL* - so an object allocation function can write ``return PyErr_NoMemory();`` when it - runs out of memory. - - -.. c:function:: PyObject* PyErr_SetFromErrno(PyObject *type) - - .. index:: single: strerror() - - This is a convenience function to raise an exception when a C library function - has returned an error and set the C variable :c:data:`errno`. It constructs a - tuple object whose first item is the integer :c:data:`errno` value and whose - second item is the corresponding error message (gotten from :c:func:`strerror`), - and then calls ``PyErr_SetObject(type, object)``. On Unix, when the - :c:data:`errno` value is :const:`EINTR`, indicating an interrupted system call, - this calls :c:func:`PyErr_CheckSignals`, and if that set the error indicator, - leaves it set to that. The function always returns *NULL*, so a wrapper - function around a system call can write ``return PyErr_SetFromErrno(type);`` - when the system call returns an error. - - -.. c:function:: PyObject* PyErr_SetFromErrnoWithFilenameObject(PyObject *type, PyObject *filenameObject) - - Similar to :c:func:`PyErr_SetFromErrno`, with the additional behavior that if - *filenameObject* is not *NULL*, it is passed to the constructor of *type* as - a third parameter. In the case of :exc:`OSError` exception, - this is used to define the :attr:`filename` attribute of the - exception instance. - - -.. c:function:: PyObject* PyErr_SetFromErrnoWithFilenameObjects(PyObject *type, PyObject *filenameObject, PyObject *filenameObject2) - - Similar to :c:func:`PyErr_SetFromErrnoWithFilenameObject`, but takes a second - filename object, for raising errors when a function that takes two filenames - fails. - - .. versionadded:: 3.4 - - -.. c:function:: PyObject* PyErr_SetFromErrnoWithFilename(PyObject *type, const char *filename) - - Similar to :c:func:`PyErr_SetFromErrnoWithFilenameObject`, but the filename - is given as a C string. *filename* is decoded from the filesystem encoding - (:func:`os.fsdecode`). - - -.. c:function:: PyObject* PyErr_SetFromWindowsErr(int ierr) - - This is a convenience function to raise :exc:`WindowsError`. If called with - *ierr* of :c:data:`0`, the error code returned by a call to :c:func:`GetLastError` - is used instead. It calls the Win32 function :c:func:`FormatMessage` to retrieve - the Windows description of error code given by *ierr* or :c:func:`GetLastError`, - then it constructs a tuple object whose first item is the *ierr* value and whose - second item is the corresponding error message (gotten from - :c:func:`FormatMessage`), and then calls ``PyErr_SetObject(PyExc_WindowsError, - object)``. This function always returns *NULL*. Availability: Windows. - - -.. c:function:: PyObject* PyErr_SetExcFromWindowsErr(PyObject *type, int ierr) - - Similar to :c:func:`PyErr_SetFromWindowsErr`, with an additional parameter - specifying the exception type to be raised. Availability: Windows. - - -.. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, const char *filename) - - Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, but the - filename is given as a C string. *filename* is decoded from the filesystem - encoding (:func:`os.fsdecode`). Availability: Windows. - - -.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObject(PyObject *type, int ierr, PyObject *filename) - - Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, with an - additional parameter specifying the exception type to be raised. - Availability: Windows. - - -.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObjects(PyObject *type, int ierr, PyObject *filename, PyObject *filename2) - - Similar to :c:func:`PyErr_SetExcFromWindowsErrWithFilenameObject`, - but accepts a second filename object. - Availability: Windows. - - .. versionadded:: 3.4 - - -.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilename(PyObject *type, int ierr, const char *filename) - - Similar to :c:func:`PyErr_SetFromWindowsErrWithFilename`, with an additional - parameter specifying the exception type to be raised. Availability: Windows. - - -.. c:function:: PyObject* PyErr_SetImportError(PyObject *msg, PyObject *name, PyObject *path) - - This is a convenience function to raise :exc:`ImportError`. *msg* will be - set as the exception's message string. *name* and *path*, both of which can - be ``NULL``, will be set as the :exc:`ImportError`'s respective ``name`` - and ``path`` attributes. - - .. versionadded:: 3.3 - - -.. c:function:: void PyErr_SyntaxLocationObject(PyObject *filename, int lineno, int col_offset) - - Set file, line, and offset information for the current exception. If the - current exception is not a :exc:`SyntaxError`, then it sets additional - attributes, which make the exception printing subsystem think the exception - is a :exc:`SyntaxError`. - - .. versionadded:: 3.4 - - -.. c:function:: void PyErr_SyntaxLocationEx(char *filename, int lineno, int col_offset) - - Like :c:func:`PyErr_SyntaxLocationObject`, but *filename* is a byte string - decoded from the filesystem encoding (:func:`os.fsdecode`). - - .. versionadded:: 3.2 - - -.. c:function:: void PyErr_SyntaxLocation(char *filename, int lineno) - - Like :c:func:`PyErr_SyntaxLocationEx`, but the col_offset parameter is - omitted. - - -.. c:function:: void PyErr_BadInternalCall() - - This is a shorthand for ``PyErr_SetString(PyExc_SystemError, message)``, - where *message* indicates that an internal operation (e.g. a Python/C API - function) was invoked with an illegal argument. It is mostly for internal - use. - - -.. c:function:: int PyErr_WarnEx(PyObject *category, char *message, int stack_level) - - Issue a warning message. The *category* argument is a warning category (see - below) or *NULL*; the *message* argument is an UTF-8 encoded string. *stack_level* is a - positive number giving a number of stack frames; the warning will be issued from - the currently executing line of code in that stack frame. A *stack_level* of 1 - is the function calling :c:func:`PyErr_WarnEx`, 2 is the function above that, - and so forth. - - This function normally prints a warning message to *sys.stderr*; however, it is - also possible that the user has specified that warnings are to be turned into - errors, and in that case this will raise an exception. It is also possible that - the function raises an exception because of a problem with the warning machinery - (the implementation imports the :mod:`warnings` module to do the heavy lifting). - The return value is ``0`` if no exception is raised, or ``-1`` if an exception - is raised. (It is not possible to determine whether a warning message is - actually printed, nor what the reason is for the exception; this is - intentional.) If an exception is raised, the caller should do its normal - exception handling (for example, :c:func:`Py_DECREF` owned references and return - an error value). - - Warning categories must be subclasses of :c:data:`Warning`; the default warning - category is :c:data:`RuntimeWarning`. The standard Python warning categories are - available as global variables whose names are ``PyExc_`` followed by the Python - exception name. These have the type :c:type:`PyObject\*`; they are all class - objects. Their names are :c:data:`PyExc_Warning`, :c:data:`PyExc_UserWarning`, - :c:data:`PyExc_UnicodeWarning`, :c:data:`PyExc_DeprecationWarning`, - :c:data:`PyExc_SyntaxWarning`, :c:data:`PyExc_RuntimeWarning`, and - :c:data:`PyExc_FutureWarning`. :c:data:`PyExc_Warning` is a subclass of - :c:data:`PyExc_Exception`; the other warning categories are subclasses of - :c:data:`PyExc_Warning`. - - For information about warning control, see the documentation for the - :mod:`warnings` module and the :option:`-W` option in the command line - documentation. There is no C API for warning control. - - -.. c:function:: int PyErr_WarnExplicitObject(PyObject *category, PyObject *message, PyObject *filename, int lineno, PyObject *module, PyObject *registry) - - Issue a warning message with explicit control over all warning attributes. This - is a straightforward wrapper around the Python function - :func:`warnings.warn_explicit`, see there for more information. The *module* - and *registry* arguments may be set to *NULL* to get the default effect - described there. - - .. versionadded:: 3.4 - - -.. c:function:: int PyErr_WarnExplicit(PyObject *category, const char *message, const char *filename, int lineno, const char *module, PyObject *registry) - - Similar to :c:func:`PyErr_WarnExplicitObject` except that *message* and - *module* are UTF-8 encoded strings, and *filename* is decoded from the - filesystem encoding (:func:`os.fsdecode`). - - -.. c:function:: int PyErr_WarnFormat(PyObject *category, Py_ssize_t stack_level, const char *format, ...) - - Function similar to :c:func:`PyErr_WarnEx`, but use - :c:func:`PyUnicode_FromFormat` to format the warning message. *format* is - an ASCII-encoded string. - - .. versionadded:: 3.2 +Signal Handling +=============== .. c:function:: int PyErr_CheckSignals() @@ -464,6 +513,9 @@ On Windows, the function now also supports socket handles. +Exception Classes +================= + .. c:function:: PyObject* PyErr_NewException(char *name, PyObject *base, PyObject *dict) This utility function creates and returns a new exception class. The *name* @@ -488,18 +540,6 @@ .. versionadded:: 3.2 -.. c:function:: void PyErr_WriteUnraisable(PyObject *obj) - - This utility function prints a warning message to ``sys.stderr`` when an - exception has been set but it is impossible for the interpreter to actually - raise the exception. It is used, for example, when an exception occurs in an - :meth:`__del__` method. - - The function is called with a single argument *obj* that identifies the context - in which the unraisable exception occurred. The repr of *obj* will be printed in - the warning message. - - Exception Objects ================= -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:02:53 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 20:02:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy40ICgjMjI1Mjgp?= Message-ID: <20140930200246.59794.92795@mail.hg.python.org> https://hg.python.org/cpython/rev/26b767fec7e2 changeset: 92682:26b767fec7e2 parent: 92679:0c126a29d1f9 parent: 92680:04f82abdfb6d user: Benjamin Peterson date: Tue Sep 30 16:02:26 2014 -0400 summary: merge 3.4 (#22528) files: Doc/library/pdb.rst | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -6,6 +6,9 @@ .. module:: pdb :synopsis: The Python debugger for interactive interpreters. +**Source code:** :source:`Lib/pdb.py` + +-------------- .. index:: single: debugging -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:02:53 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 20:02:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_add_link_to_pd?= =?utf-8?q?b_source_=28closes_=2322528=29?= Message-ID: <20140930200245.86183.38145@mail.hg.python.org> https://hg.python.org/cpython/rev/d49b9c8ee8ed changeset: 92681:d49b9c8ee8ed branch: 2.7 parent: 92644:9b4673d7b046 user: Benjamin Peterson date: Tue Sep 30 16:02:06 2014 -0400 summary: add link to pdb source (closes #22528) files: Doc/library/pdb.rst | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -6,6 +6,9 @@ .. module:: pdb :synopsis: The Python debugger for interactive interpreters. +**Source code:** :source:`Lib/pdb.py` + +-------------- .. index:: single: debugging -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:02:53 2014 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 30 Sep 2014 20:02:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_add_link_to_pd?= =?utf-8?q?b_source_=28closes_=2322528=29?= Message-ID: <20140930200232.86175.52173@mail.hg.python.org> https://hg.python.org/cpython/rev/04f82abdfb6d changeset: 92680:04f82abdfb6d branch: 3.4 parent: 92675:b85ed8bb7523 user: Benjamin Peterson date: Tue Sep 30 16:02:06 2014 -0400 summary: add link to pdb source (closes #22528) files: Doc/library/pdb.rst | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -6,6 +6,9 @@ .. module:: pdb :synopsis: The Python debugger for interactive interpreters. +**Source code:** :source:`Lib/pdb.py` + +-------------- .. index:: single: debugging -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:17:58 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:17:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Prepare_sphinx?= =?utf-8?q?_extensions_for_1=2E3=2E?= Message-ID: <20140930201754.23197.7062@mail.hg.python.org> https://hg.python.org/cpython/rev/fbce846c8d81 changeset: 92683:fbce846c8d81 branch: 3.4 parent: 92680:04f82abdfb6d user: Georg Brandl date: Tue Sep 30 22:17:41 2014 +0200 summary: Prepare sphinx extensions for 1.3. files: Doc/tools/sphinxext/c_annotations.py | 3 ++- Doc/tools/sphinxext/pyspecific.py | 1 + 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/sphinxext/c_annotations.py --- a/Doc/tools/sphinxext/c_annotations.py +++ b/Doc/tools/sphinxext/c_annotations.py @@ -13,7 +13,7 @@ Usage: Set the `refcount_file` config value to the path to the reference count data file. - :copyright: Copyright 2007-2013 by Georg Brandl. + :copyright: Copyright 2007-2014 by Georg Brandl. :license: Python license. """ @@ -118,3 +118,4 @@ signode.parent['stableabi'] = 'stableabi' in self.options return old_handle_signature(self, sig, signode) CObject.handle_signature = new_handle_signature + return {'version': '1.0', 'parallel_read_safe': True} diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -345,3 +345,4 @@ app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction) app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod) app.add_directive('miscnews', MiscNews) + return {'version': '1.0', 'parallel_read_safe': True} -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:17:58 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:17:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_with_3=2E4?= Message-ID: <20140930201754.23217.46437@mail.hg.python.org> https://hg.python.org/cpython/rev/50d64b5d98ad changeset: 92684:50d64b5d98ad parent: 92682:26b767fec7e2 parent: 92683:fbce846c8d81 user: Georg Brandl date: Tue Sep 30 22:17:48 2014 +0200 summary: merge with 3.4 files: Doc/tools/sphinxext/c_annotations.py | 3 ++- Doc/tools/sphinxext/pyspecific.py | 1 + 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/sphinxext/c_annotations.py --- a/Doc/tools/sphinxext/c_annotations.py +++ b/Doc/tools/sphinxext/c_annotations.py @@ -13,7 +13,7 @@ Usage: Set the `refcount_file` config value to the path to the reference count data file. - :copyright: Copyright 2007-2013 by Georg Brandl. + :copyright: Copyright 2007-2014 by Georg Brandl. :license: Python license. """ @@ -118,3 +118,4 @@ signode.parent['stableabi'] = 'stableabi' in self.options return old_handle_signature(self, sig, signode) CObject.handle_signature = new_handle_signature + return {'version': '1.0', 'parallel_read_safe': True} diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -345,3 +345,4 @@ app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction) app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod) app.add_directive('miscnews', MiscNews) + return {'version': '1.0', 'parallel_read_safe': True} -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:24:15 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:24:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Dont_define_an?= =?utf-8?q?_empty_SPHINXOPTS=2C_which_overrides_a_definition_from_the?= Message-ID: <20140930202413.75003.52696@mail.hg.python.org> https://hg.python.org/cpython/rev/860c50e40720 changeset: 92685:860c50e40720 branch: 3.4 parent: 92683:fbce846c8d81 user: Georg Brandl date: Tue Sep 30 22:23:57 2014 +0200 summary: Dont define an empty SPHINXOPTS, which overrides a definition from the environment. files: Doc/Makefile | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -6,7 +6,6 @@ # You can set these variables from the command line. PYTHON = python SPHINXBUILD = sphinx-build -SPHINXOPTS = PAPER = SOURCES = DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:24:16 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:24:16 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_with_3=2E4?= Message-ID: <20140930202415.46891.14068@mail.hg.python.org> https://hg.python.org/cpython/rev/457683a973f3 changeset: 92686:457683a973f3 parent: 92684:50d64b5d98ad parent: 92685:860c50e40720 user: Georg Brandl date: Tue Sep 30 22:24:08 2014 +0200 summary: merge with 3.4 files: Doc/Makefile | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -6,7 +6,6 @@ # You can set these variables from the command line. PYTHON = python SPHINXBUILD = sphinx-build -SPHINXOPTS = PAPER = SOURCES = DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:43:49 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:43:49 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Prepare_sphinx?= =?utf-8?q?_extensions_for_1=2E3=2E?= Message-ID: <20140930204343.86173.13785@mail.hg.python.org> https://hg.python.org/cpython/rev/e4b8628b2613 changeset: 92687:e4b8628b2613 branch: 2.7 parent: 92681:d49b9c8ee8ed user: Georg Brandl date: Tue Sep 30 22:17:41 2014 +0200 summary: Prepare sphinx extensions for 1.3. files: Doc/tools/sphinxext/c_annotations.py | 3 ++- Doc/tools/sphinxext/pyspecific.py | 1 + 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/sphinxext/c_annotations.py --- a/Doc/tools/sphinxext/c_annotations.py +++ b/Doc/tools/sphinxext/c_annotations.py @@ -13,7 +13,7 @@ Usage: Set the `refcount_file` config value to the path to the reference count data file. - :copyright: Copyright 2007-2013 by Georg Brandl. + :copyright: Copyright 2007-2014 by Georg Brandl. :license: Python license. """ @@ -118,3 +118,4 @@ signode.parent['stableabi'] = 'stableabi' in self.options return old_handle_signature(self, sig, signode) CObject.handle_signature = new_handle_signature + return {'version': '1.0', 'parallel_read_safe': True} diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py --- a/Doc/tools/sphinxext/pyspecific.py +++ b/Doc/tools/sphinxext/pyspecific.py @@ -305,3 +305,4 @@ app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)') app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction) app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod) + return {'version': '1.0', 'parallel_read_safe': True} -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:43:51 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:43:51 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Dont_define_an?= =?utf-8?q?_empty_SPHINXOPTS=2C_which_overrides_a_definition_from_the?= Message-ID: <20140930204350.86199.84480@mail.hg.python.org> https://hg.python.org/cpython/rev/027850416c8e changeset: 92688:027850416c8e branch: 2.7 user: Georg Brandl date: Tue Sep 30 22:23:57 2014 +0200 summary: Dont define an empty SPHINXOPTS, which overrides a definition from the environment. files: Doc/Makefile | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -6,7 +6,6 @@ # You can set these variables from the command line. PYTHON = python SPHINXBUILD = sphinx-build -SPHINXOPTS = PAPER = SOURCES = DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:51:56 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:51:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Move_Doc/tools?= =?utf-8?q?/sphinxext_content_to_Doc/tools=2C_there_is_no_need_for_the_nes?= =?utf-8?q?ted?= Message-ID: <20140930205149.75005.47177@mail.hg.python.org> https://hg.python.org/cpython/rev/0d98344af1bb changeset: 92689:0d98344af1bb branch: 2.7 user: Georg Brandl date: Tue Sep 30 22:51:30 2014 +0200 summary: Move Doc/tools/sphinxext content to Doc/tools, there is no need for the nested subdirectory anymore. files: Doc/Makefile | 4 +- Doc/README.txt | 3 +- Doc/conf.py | 6 +- Doc/make.bat | 2 +- Doc/tools/sphinxext/c_annotations.py | 0 Doc/tools/sphinxext/download.html | 0 Doc/tools/sphinxext/indexcontent.html | 0 Doc/tools/sphinxext/indexsidebar.html | 0 Doc/tools/sphinxext/layout.html | 0 Doc/tools/sphinxext/opensearch.xml | 0 Doc/tools/sphinxext/patchlevel.py | 0 Doc/tools/sphinxext/pyspecific.py | 0 Doc/tools/roman.py | 80 ---------- Doc/tools/sphinx-build.py | 28 --- Doc/tools/sphinxext/static/basic.css | 0 Doc/tools/sphinxext/static/copybutton.js | 0 Doc/tools/sphinxext/static/py.png | 0 Doc/tools/sphinxext/static/sidebar.js | 0 Doc/tools/sphinxext/static/version_switch.js | 0 Doc/tools/sphinxext/susp-ignored.csv | 0 Doc/tools/sphinxext/suspicious.py | 0 Lib/pydoc.py | 2 +- 22 files changed, 8 insertions(+), 117 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -8,7 +8,7 @@ SPHINXBUILD = sphinx-build PAPER = SOURCES = -DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) +DISTVERSION = $(shell $(PYTHON) tools/patchlevel.py) ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) @@ -79,7 +79,7 @@ @$(MAKE) build BUILDER=$(BUILDER) || { \ echo "Suspicious check complete; look for any errors in the above output" \ "or in build/$(BUILDER)/suspicious.csv. If all issues are false" \ - "positives, append that file to tools/sphinxext/susp-ignored.csv."; \ + "positives, append that file to tools/susp-ignored.csv."; \ false; } coverage: BUILDER = coverage diff --git a/Doc/README.txt b/Doc/README.txt --- a/Doc/README.txt +++ b/Doc/README.txt @@ -79,8 +79,7 @@ * "pydoc-topics", which builds a Python module containing a dictionary with plain text documentation for the labels defined in - `tools/sphinxext/pyspecific.py` -- pydoc needs these to show topic and - keyword help. + `tools/pyspecific.py` -- pydoc needs these to show topic and keyword help. * "suspicious", which checks the parsed markup for text that looks like malformed and thus unconverted reST. diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -8,14 +8,14 @@ # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time -sys.path.append(os.path.abspath('tools/sphinxext')) +sys.path.append(os.path.abspath('tools')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] -templates_path = ['tools/sphinxext'] +templates_path = ['tools'] # General substitutions. project = 'Python' @@ -89,7 +89,7 @@ html_use_opensearch = 'http://docs.python.org/' # Additional static files. -html_static_path = ['tools/sphinxext/static'] +html_static_path = ['tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') diff --git a/Doc/make.bat b/Doc/make.bat --- a/Doc/make.bat +++ b/Doc/make.bat @@ -12,7 +12,7 @@ if NOT DEFINED ProgramFiles(x86) set _PRGMFLS=%ProgramFiles% if "%HTMLHELP%" EQU "" set HTMLHELP=%_PRGMFLS%\HTML Help Workshop\hhc.exe -if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/sphinxext/patchlevel.py`) do set DISTVERSION=%%v +if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/patchlevel.py`) do set DISTVERSION=%%v if "%BUILDDIR%" EQU "" set BUILDDIR=build diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/c_annotations.py rename from Doc/tools/sphinxext/c_annotations.py rename to Doc/tools/c_annotations.py diff --git a/Doc/tools/sphinxext/download.html b/Doc/tools/download.html rename from Doc/tools/sphinxext/download.html rename to Doc/tools/download.html diff --git a/Doc/tools/sphinxext/indexcontent.html b/Doc/tools/indexcontent.html rename from Doc/tools/sphinxext/indexcontent.html rename to Doc/tools/indexcontent.html diff --git a/Doc/tools/sphinxext/indexsidebar.html b/Doc/tools/indexsidebar.html rename from Doc/tools/sphinxext/indexsidebar.html rename to Doc/tools/indexsidebar.html diff --git a/Doc/tools/sphinxext/layout.html b/Doc/tools/layout.html rename from Doc/tools/sphinxext/layout.html rename to Doc/tools/layout.html diff --git a/Doc/tools/sphinxext/opensearch.xml b/Doc/tools/opensearch.xml rename from Doc/tools/sphinxext/opensearch.xml rename to Doc/tools/opensearch.xml diff --git a/Doc/tools/sphinxext/patchlevel.py b/Doc/tools/patchlevel.py rename from Doc/tools/sphinxext/patchlevel.py rename to Doc/tools/patchlevel.py diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/pyspecific.py rename from Doc/tools/sphinxext/pyspecific.py rename to Doc/tools/pyspecific.py diff --git a/Doc/tools/roman.py b/Doc/tools/roman.py deleted file mode 100644 --- a/Doc/tools/roman.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Convert to and from Roman numerals""" - -__author__ = "Mark Pilgrim (f8dy at diveintopython.org)" -__version__ = "1.4" -__date__ = "8 August 2001" -__copyright__ = """Copyright (c) 2001 Mark Pilgrim - -This program is part of "Dive Into Python", a free Python tutorial for -experienced programmers. Visit http://diveintopython.org/ for the -latest version. - -This program is free software; you can redistribute it and/or modify -it under the terms of the Python 2.1.1 license, available at -http://www.python.org/2.1.1/license.html -""" - -import re - -#Define exceptions -class RomanError(Exception): pass -class OutOfRangeError(RomanError): pass -class NotIntegerError(RomanError): pass -class InvalidRomanNumeralError(RomanError): pass - -#Define digit mapping -romanNumeralMap = (('M', 1000), - ('CM', 900), - ('D', 500), - ('CD', 400), - ('C', 100), - ('XC', 90), - ('L', 50), - ('XL', 40), - ('X', 10), - ('IX', 9), - ('V', 5), - ('IV', 4), - ('I', 1)) - -def toRoman(n): - """convert integer to Roman numeral""" - if not (0 < n < 5000): - raise OutOfRangeError("number out of range (must be 1..4999)") - if int(n) != n: - raise NotIntegerError("decimals can not be converted") - - result = "" - for numeral, integer in romanNumeralMap: - while n >= integer: - result += numeral - n -= integer - return result - -#Define pattern to detect valid Roman numerals -romanNumeralPattern = re.compile(""" - ^ # beginning of string - M{0,4} # thousands - 0 to 4 M's - (CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's), - # or 500-800 (D, followed by 0 to 3 C's) - (XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's), - # or 50-80 (L, followed by 0 to 3 X's) - (IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's), - # or 5-8 (V, followed by 0 to 3 I's) - $ # end of string - """ ,re.VERBOSE) - -def fromRoman(s): - """convert Roman numeral to integer""" - if not s: - raise InvalidRomanNumeralError('Input can not be blank') - if not romanNumeralPattern.search(s): - raise InvalidRomanNumeralError('Invalid Roman numeral: %s' % s) - - result = 0 - index = 0 - for numeral, integer in romanNumeralMap: - while s[index:index+len(numeral)] == numeral: - result += integer - index += len(numeral) - return result diff --git a/Doc/tools/sphinx-build.py b/Doc/tools/sphinx-build.py deleted file mode 100644 --- a/Doc/tools/sphinx-build.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Sphinx - Python documentation toolchain - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - :copyright: 2007-2010 by Georg Brandl. - :license: Python license. -""" - -import sys -import warnings - -# Get rid of UserWarnings reported by pkg_resources. -warnings.filterwarnings('ignore', category=UserWarning, module='jinja2') - -if __name__ == '__main__': - - if sys.version_info[:3] < (2, 4, 0) or sys.version_info[:3] > (3, 0, 0): - sys.stderr.write("""\ -Error: Sphinx needs to be executed with Python 2.4 or newer (not 3.x though). -(If you run this from the Makefile, you can set the PYTHON variable -to the path of an alternative interpreter executable, e.g., -``make html PYTHON=python2.5``). -""") - sys.exit(1) - - from sphinx import main - sys.exit(main(sys.argv)) diff --git a/Doc/tools/sphinxext/static/basic.css b/Doc/tools/static/basic.css rename from Doc/tools/sphinxext/static/basic.css rename to Doc/tools/static/basic.css diff --git a/Doc/tools/sphinxext/static/copybutton.js b/Doc/tools/static/copybutton.js rename from Doc/tools/sphinxext/static/copybutton.js rename to Doc/tools/static/copybutton.js diff --git a/Doc/tools/sphinxext/static/py.png b/Doc/tools/static/py.png rename from Doc/tools/sphinxext/static/py.png rename to Doc/tools/static/py.png diff --git a/Doc/tools/sphinxext/static/sidebar.js b/Doc/tools/static/sidebar.js rename from Doc/tools/sphinxext/static/sidebar.js rename to Doc/tools/static/sidebar.js diff --git a/Doc/tools/sphinxext/static/version_switch.js b/Doc/tools/static/version_switch.js rename from Doc/tools/sphinxext/static/version_switch.js rename to Doc/tools/static/version_switch.js diff --git a/Doc/tools/sphinxext/susp-ignored.csv b/Doc/tools/susp-ignored.csv rename from Doc/tools/sphinxext/susp-ignored.csv rename to Doc/tools/susp-ignored.csv diff --git a/Doc/tools/sphinxext/suspicious.py b/Doc/tools/suspicious.py rename from Doc/tools/sphinxext/suspicious.py rename to Doc/tools/suspicious.py diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -1599,7 +1599,7 @@ # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the - # list of needed labels in Doc/tools/sphinxext/pyspecific.py and + # list of needed labels in Doc/tools/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:56:59 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:56:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E4_-=3E_default?= =?utf-8?q?=29=3A_merge_with_3=2E4?= Message-ID: <20140930205654.86195.59743@mail.hg.python.org> https://hg.python.org/cpython/rev/2de58aebae64 changeset: 92692:2de58aebae64 parent: 92691:488a401a5d23 parent: 92690:40c031308967 user: Georg Brandl date: Tue Sep 30 22:56:47 2014 +0200 summary: merge with 3.4 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:56:59 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:56:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Move_Doc/tools?= =?utf-8?q?/sphinxext_content_to_Doc/tools=2C_there_is_no_need_for_the_nes?= =?utf-8?q?ted?= Message-ID: <20140930205654.74993.26893@mail.hg.python.org> https://hg.python.org/cpython/rev/40c031308967 changeset: 92690:40c031308967 branch: 3.4 parent: 92685:860c50e40720 user: Georg Brandl date: Tue Sep 30 22:51:30 2014 +0200 summary: Move Doc/tools/sphinxext content to Doc/tools, there is no need for the nested subdirectory anymore. files: Doc/Makefile | 4 +- Doc/README.txt | 3 +- Doc/conf.py | 6 +- Doc/make.bat | 2 +- Doc/tools/sphinxext/c_annotations.py | 0 Doc/tools/sphinxext/download.html | 0 Doc/tools/sphinxext/indexcontent.html | 0 Doc/tools/sphinxext/indexsidebar.html | 0 Doc/tools/sphinxext/layout.html | 0 Doc/tools/sphinxext/opensearch.xml | 0 Doc/tools/sphinxext/patchlevel.py | 0 Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css | 0 Doc/tools/sphinxext/pydoctheme/theme.conf | 0 Doc/tools/sphinxext/pyspecific.py | 0 Doc/tools/roman.py | 80 ---------- Doc/tools/sphinxext/static/basic.css | 0 Doc/tools/sphinxext/static/copybutton.js | 0 Doc/tools/sphinxext/static/py.png | 0 Doc/tools/sphinxext/static/sidebar.js | 0 Doc/tools/sphinxext/static/version_switch.js | 0 Doc/tools/sphinxext/susp-ignored.csv | 0 Doc/tools/sphinxext/suspicious.py | 0 Lib/pydoc.py | 2 +- 23 files changed, 8 insertions(+), 89 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -8,7 +8,7 @@ SPHINXBUILD = sphinx-build PAPER = SOURCES = -DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) +DISTVERSION = $(shell $(PYTHON) tools/patchlevel.py) ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) @@ -79,7 +79,7 @@ @$(MAKE) build BUILDER=$(BUILDER) || { \ echo "Suspicious check complete; look for any errors in the above output" \ "or in build/$(BUILDER)/suspicious.csv. If all issues are false" \ - "positives, append that file to tools/sphinxext/susp-ignored.csv."; \ + "positives, append that file to tools/susp-ignored.csv."; \ false; } coverage: BUILDER = coverage diff --git a/Doc/README.txt b/Doc/README.txt --- a/Doc/README.txt +++ b/Doc/README.txt @@ -79,8 +79,7 @@ * "pydoc-topics", which builds a Python module containing a dictionary with plain text documentation for the labels defined in - `tools/sphinxext/pyspecific.py` -- pydoc needs these to show topic and - keyword help. + `tools/pyspecific.py` -- pydoc needs these to show topic and keyword help. * "suspicious", which checks the parsed markup for text that looks like malformed and thus unconverted reST. diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -7,14 +7,14 @@ # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time -sys.path.append(os.path.abspath('tools/sphinxext')) +sys.path.append(os.path.abspath('tools')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] -templates_path = ['tools/sphinxext'] +templates_path = ['tools'] # General substitutions. project = 'Python' @@ -96,7 +96,7 @@ html_use_opensearch = 'http://docs.python.org/' + version # Additional static files. -html_static_path = ['tools/sphinxext/static'] +html_static_path = ['tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') diff --git a/Doc/make.bat b/Doc/make.bat --- a/Doc/make.bat +++ b/Doc/make.bat @@ -12,7 +12,7 @@ if NOT DEFINED ProgramFiles(x86) set _PRGMFLS=%ProgramFiles% if "%HTMLHELP%" EQU "" set HTMLHELP=%_PRGMFLS%\HTML Help Workshop\hhc.exe -if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/sphinxext/patchlevel.py`) do set DISTVERSION=%%v +if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/patchlevel.py`) do set DISTVERSION=%%v if "%BUILDDIR%" EQU "" set BUILDDIR=build diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/c_annotations.py rename from Doc/tools/sphinxext/c_annotations.py rename to Doc/tools/c_annotations.py diff --git a/Doc/tools/sphinxext/download.html b/Doc/tools/download.html rename from Doc/tools/sphinxext/download.html rename to Doc/tools/download.html diff --git a/Doc/tools/sphinxext/indexcontent.html b/Doc/tools/indexcontent.html rename from Doc/tools/sphinxext/indexcontent.html rename to Doc/tools/indexcontent.html diff --git a/Doc/tools/sphinxext/indexsidebar.html b/Doc/tools/indexsidebar.html rename from Doc/tools/sphinxext/indexsidebar.html rename to Doc/tools/indexsidebar.html diff --git a/Doc/tools/sphinxext/layout.html b/Doc/tools/layout.html rename from Doc/tools/sphinxext/layout.html rename to Doc/tools/layout.html diff --git a/Doc/tools/sphinxext/opensearch.xml b/Doc/tools/opensearch.xml rename from Doc/tools/sphinxext/opensearch.xml rename to Doc/tools/opensearch.xml diff --git a/Doc/tools/sphinxext/patchlevel.py b/Doc/tools/patchlevel.py rename from Doc/tools/sphinxext/patchlevel.py rename to Doc/tools/patchlevel.py diff --git a/Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css b/Doc/tools/pydoctheme/static/pydoctheme.css rename from Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css rename to Doc/tools/pydoctheme/static/pydoctheme.css diff --git a/Doc/tools/sphinxext/pydoctheme/theme.conf b/Doc/tools/pydoctheme/theme.conf rename from Doc/tools/sphinxext/pydoctheme/theme.conf rename to Doc/tools/pydoctheme/theme.conf diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/pyspecific.py rename from Doc/tools/sphinxext/pyspecific.py rename to Doc/tools/pyspecific.py diff --git a/Doc/tools/roman.py b/Doc/tools/roman.py deleted file mode 100644 --- a/Doc/tools/roman.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Convert to and from Roman numerals""" - -__author__ = "Mark Pilgrim (f8dy at diveintopython.org)" -__version__ = "1.4" -__date__ = "8 August 2001" -__copyright__ = """Copyright (c) 2001 Mark Pilgrim - -This program is part of "Dive Into Python", a free Python tutorial for -experienced programmers. Visit http://diveintopython.org/ for the -latest version. - -This program is free software; you can redistribute it and/or modify -it under the terms of the Python 2.1.1 license, available at -http://www.python.org/2.1.1/license.html -""" - -import re - -#Define exceptions -class RomanError(Exception): pass -class OutOfRangeError(RomanError): pass -class NotIntegerError(RomanError): pass -class InvalidRomanNumeralError(RomanError): pass - -#Define digit mapping -romanNumeralMap = (('M', 1000), - ('CM', 900), - ('D', 500), - ('CD', 400), - ('C', 100), - ('XC', 90), - ('L', 50), - ('XL', 40), - ('X', 10), - ('IX', 9), - ('V', 5), - ('IV', 4), - ('I', 1)) - -def toRoman(n): - """convert integer to Roman numeral""" - if not (0 < n < 5000): - raise OutOfRangeError("number out of range (must be 1..4999)") - if int(n) != n: - raise NotIntegerError("decimals can not be converted") - - result = "" - for numeral, integer in romanNumeralMap: - while n >= integer: - result += numeral - n -= integer - return result - -#Define pattern to detect valid Roman numerals -romanNumeralPattern = re.compile(""" - ^ # beginning of string - M{0,4} # thousands - 0 to 4 M's - (CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's), - # or 500-800 (D, followed by 0 to 3 C's) - (XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's), - # or 50-80 (L, followed by 0 to 3 X's) - (IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's), - # or 5-8 (V, followed by 0 to 3 I's) - $ # end of string - """ ,re.VERBOSE) - -def fromRoman(s): - """convert Roman numeral to integer""" - if not s: - raise InvalidRomanNumeralError('Input can not be blank') - if not romanNumeralPattern.search(s): - raise InvalidRomanNumeralError('Invalid Roman numeral: %s' % s) - - result = 0 - index = 0 - for numeral, integer in romanNumeralMap: - while s[index:index+len(numeral)] == numeral: - result += integer - index += len(numeral) - return result diff --git a/Doc/tools/sphinxext/static/basic.css b/Doc/tools/static/basic.css rename from Doc/tools/sphinxext/static/basic.css rename to Doc/tools/static/basic.css diff --git a/Doc/tools/sphinxext/static/copybutton.js b/Doc/tools/static/copybutton.js rename from Doc/tools/sphinxext/static/copybutton.js rename to Doc/tools/static/copybutton.js diff --git a/Doc/tools/sphinxext/static/py.png b/Doc/tools/static/py.png rename from Doc/tools/sphinxext/static/py.png rename to Doc/tools/static/py.png diff --git a/Doc/tools/sphinxext/static/sidebar.js b/Doc/tools/static/sidebar.js rename from Doc/tools/sphinxext/static/sidebar.js rename to Doc/tools/static/sidebar.js diff --git a/Doc/tools/sphinxext/static/version_switch.js b/Doc/tools/static/version_switch.js rename from Doc/tools/sphinxext/static/version_switch.js rename to Doc/tools/static/version_switch.js diff --git a/Doc/tools/sphinxext/susp-ignored.csv b/Doc/tools/susp-ignored.csv rename from Doc/tools/sphinxext/susp-ignored.csv rename to Doc/tools/susp-ignored.csv diff --git a/Doc/tools/sphinxext/suspicious.py b/Doc/tools/suspicious.py rename from Doc/tools/sphinxext/suspicious.py rename to Doc/tools/suspicious.py diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -1638,7 +1638,7 @@ # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the - # list of needed labels in Doc/tools/sphinxext/pyspecific.py and + # list of needed labels in Doc/tools/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:56:59 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:56:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Move_Doc/tools/sphinxext_c?= =?utf-8?q?ontent_to_Doc/tools=2C_there_is_no_need_for_the_nested?= Message-ID: <20140930205654.75009.17521@mail.hg.python.org> https://hg.python.org/cpython/rev/488a401a5d23 changeset: 92691:488a401a5d23 parent: 92686:457683a973f3 user: Georg Brandl date: Tue Sep 30 22:56:38 2014 +0200 summary: Move Doc/tools/sphinxext content to Doc/tools, there is no need for the nested subdirectory anymore. files: Doc/Makefile | 4 +- Doc/README.txt | 3 +- Doc/conf.py | 8 +- Doc/make.bat | 2 +- Doc/tools/sphinxext/c_annotations.py | 0 Doc/tools/sphinxext/download.html | 0 Doc/tools/sphinxext/indexcontent.html | 0 Doc/tools/sphinxext/indexsidebar.html | 0 Doc/tools/sphinxext/layout.html | 0 Doc/tools/sphinxext/opensearch.xml | 0 Doc/tools/sphinxext/patchlevel.py | 0 Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css | 0 Doc/tools/sphinxext/pydoctheme/theme.conf | 0 Doc/tools/sphinxext/pyspecific.py | 0 Doc/tools/roman.py | 80 ---------- Doc/tools/sphinxext/static/basic.css | 0 Doc/tools/sphinxext/static/copybutton.js | 0 Doc/tools/sphinxext/static/py.png | 0 Doc/tools/sphinxext/static/sidebar.js | 0 Doc/tools/sphinxext/static/version_switch.js | 0 Doc/tools/sphinxext/susp-ignored.csv | 0 Doc/tools/sphinxext/suspicious.py | 0 Lib/pydoc.py | 2 +- 23 files changed, 9 insertions(+), 90 deletions(-) diff --git a/Doc/Makefile b/Doc/Makefile --- a/Doc/Makefile +++ b/Doc/Makefile @@ -8,7 +8,7 @@ SPHINXBUILD = sphinx-build PAPER = SOURCES = -DISTVERSION = $(shell $(PYTHON) tools/sphinxext/patchlevel.py) +DISTVERSION = $(shell $(PYTHON) tools/patchlevel.py) ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees -D latex_paper_size=$(PAPER) \ $(SPHINXOPTS) . build/$(BUILDER) $(SOURCES) @@ -79,7 +79,7 @@ @$(MAKE) build BUILDER=$(BUILDER) || { \ echo "Suspicious check complete; look for any errors in the above output" \ "or in build/$(BUILDER)/suspicious.csv. If all issues are false" \ - "positives, append that file to tools/sphinxext/susp-ignored.csv."; \ + "positives, append that file to tools/susp-ignored.csv."; \ false; } coverage: BUILDER = coverage diff --git a/Doc/README.txt b/Doc/README.txt --- a/Doc/README.txt +++ b/Doc/README.txt @@ -79,8 +79,7 @@ * "pydoc-topics", which builds a Python module containing a dictionary with plain text documentation for the labels defined in - `tools/sphinxext/pyspecific.py` -- pydoc needs these to show topic and - keyword help. + `tools/pyspecific.py` -- pydoc needs these to show topic and keyword help. * "suspicious", which checks the parsed markup for text that looks like malformed and thus unconverted reST. diff --git a/Doc/conf.py b/Doc/conf.py --- a/Doc/conf.py +++ b/Doc/conf.py @@ -7,14 +7,14 @@ # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time -sys.path.append(os.path.abspath('tools/sphinxext')) +sys.path.append(os.path.abspath('tools')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] -templates_path = ['tools/sphinxext'] +templates_path = ['tools'] # General substitutions. project = 'Python' @@ -68,7 +68,7 @@ # ----------------------- html_theme = 'pydoctheme' -html_theme_path = ['tools/sphinxext'] +html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} html_short_title = '%s Documentation' % release @@ -96,7 +96,7 @@ html_use_opensearch = 'http://docs.python.org/' + version # Additional static files. -html_static_path = ['tools/sphinxext/static'] +html_static_path = ['tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') diff --git a/Doc/make.bat b/Doc/make.bat --- a/Doc/make.bat +++ b/Doc/make.bat @@ -12,7 +12,7 @@ if NOT DEFINED ProgramFiles(x86) set _PRGMFLS=%ProgramFiles% if "%HTMLHELP%" EQU "" set HTMLHELP=%_PRGMFLS%\HTML Help Workshop\hhc.exe -if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/sphinxext/patchlevel.py`) do set DISTVERSION=%%v +if "%DISTVERSION%" EQU "" for /f "usebackq" %%v in (`%PYTHON% tools/patchlevel.py`) do set DISTVERSION=%%v if "%BUILDDIR%" EQU "" set BUILDDIR=build diff --git a/Doc/tools/sphinxext/c_annotations.py b/Doc/tools/c_annotations.py rename from Doc/tools/sphinxext/c_annotations.py rename to Doc/tools/c_annotations.py diff --git a/Doc/tools/sphinxext/download.html b/Doc/tools/download.html rename from Doc/tools/sphinxext/download.html rename to Doc/tools/download.html diff --git a/Doc/tools/sphinxext/indexcontent.html b/Doc/tools/indexcontent.html rename from Doc/tools/sphinxext/indexcontent.html rename to Doc/tools/indexcontent.html diff --git a/Doc/tools/sphinxext/indexsidebar.html b/Doc/tools/indexsidebar.html rename from Doc/tools/sphinxext/indexsidebar.html rename to Doc/tools/indexsidebar.html diff --git a/Doc/tools/sphinxext/layout.html b/Doc/tools/layout.html rename from Doc/tools/sphinxext/layout.html rename to Doc/tools/layout.html diff --git a/Doc/tools/sphinxext/opensearch.xml b/Doc/tools/opensearch.xml rename from Doc/tools/sphinxext/opensearch.xml rename to Doc/tools/opensearch.xml diff --git a/Doc/tools/sphinxext/patchlevel.py b/Doc/tools/patchlevel.py rename from Doc/tools/sphinxext/patchlevel.py rename to Doc/tools/patchlevel.py diff --git a/Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css b/Doc/tools/pydoctheme/static/pydoctheme.css rename from Doc/tools/sphinxext/pydoctheme/static/pydoctheme.css rename to Doc/tools/pydoctheme/static/pydoctheme.css diff --git a/Doc/tools/sphinxext/pydoctheme/theme.conf b/Doc/tools/pydoctheme/theme.conf rename from Doc/tools/sphinxext/pydoctheme/theme.conf rename to Doc/tools/pydoctheme/theme.conf diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/pyspecific.py rename from Doc/tools/sphinxext/pyspecific.py rename to Doc/tools/pyspecific.py diff --git a/Doc/tools/roman.py b/Doc/tools/roman.py deleted file mode 100644 --- a/Doc/tools/roman.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Convert to and from Roman numerals""" - -__author__ = "Mark Pilgrim (f8dy at diveintopython.org)" -__version__ = "1.4" -__date__ = "8 August 2001" -__copyright__ = """Copyright (c) 2001 Mark Pilgrim - -This program is part of "Dive Into Python", a free Python tutorial for -experienced programmers. Visit http://diveintopython.org/ for the -latest version. - -This program is free software; you can redistribute it and/or modify -it under the terms of the Python 2.1.1 license, available at -http://www.python.org/2.1.1/license.html -""" - -import re - -#Define exceptions -class RomanError(Exception): pass -class OutOfRangeError(RomanError): pass -class NotIntegerError(RomanError): pass -class InvalidRomanNumeralError(RomanError): pass - -#Define digit mapping -romanNumeralMap = (('M', 1000), - ('CM', 900), - ('D', 500), - ('CD', 400), - ('C', 100), - ('XC', 90), - ('L', 50), - ('XL', 40), - ('X', 10), - ('IX', 9), - ('V', 5), - ('IV', 4), - ('I', 1)) - -def toRoman(n): - """convert integer to Roman numeral""" - if not (0 < n < 5000): - raise OutOfRangeError("number out of range (must be 1..4999)") - if int(n) != n: - raise NotIntegerError("decimals can not be converted") - - result = "" - for numeral, integer in romanNumeralMap: - while n >= integer: - result += numeral - n -= integer - return result - -#Define pattern to detect valid Roman numerals -romanNumeralPattern = re.compile(""" - ^ # beginning of string - M{0,4} # thousands - 0 to 4 M's - (CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's), - # or 500-800 (D, followed by 0 to 3 C's) - (XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's), - # or 50-80 (L, followed by 0 to 3 X's) - (IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's), - # or 5-8 (V, followed by 0 to 3 I's) - $ # end of string - """ ,re.VERBOSE) - -def fromRoman(s): - """convert Roman numeral to integer""" - if not s: - raise InvalidRomanNumeralError('Input can not be blank') - if not romanNumeralPattern.search(s): - raise InvalidRomanNumeralError('Invalid Roman numeral: %s' % s) - - result = 0 - index = 0 - for numeral, integer in romanNumeralMap: - while s[index:index+len(numeral)] == numeral: - result += integer - index += len(numeral) - return result diff --git a/Doc/tools/sphinxext/static/basic.css b/Doc/tools/static/basic.css rename from Doc/tools/sphinxext/static/basic.css rename to Doc/tools/static/basic.css diff --git a/Doc/tools/sphinxext/static/copybutton.js b/Doc/tools/static/copybutton.js rename from Doc/tools/sphinxext/static/copybutton.js rename to Doc/tools/static/copybutton.js diff --git a/Doc/tools/sphinxext/static/py.png b/Doc/tools/static/py.png rename from Doc/tools/sphinxext/static/py.png rename to Doc/tools/static/py.png diff --git a/Doc/tools/sphinxext/static/sidebar.js b/Doc/tools/static/sidebar.js rename from Doc/tools/sphinxext/static/sidebar.js rename to Doc/tools/static/sidebar.js diff --git a/Doc/tools/sphinxext/static/version_switch.js b/Doc/tools/static/version_switch.js rename from Doc/tools/sphinxext/static/version_switch.js rename to Doc/tools/static/version_switch.js diff --git a/Doc/tools/sphinxext/susp-ignored.csv b/Doc/tools/susp-ignored.csv rename from Doc/tools/sphinxext/susp-ignored.csv rename to Doc/tools/susp-ignored.csv diff --git a/Doc/tools/sphinxext/suspicious.py b/Doc/tools/suspicious.py rename from Doc/tools/sphinxext/suspicious.py rename to Doc/tools/suspicious.py diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -1636,7 +1636,7 @@ # in pydoc_data/topics.py. # # CAUTION: if you change one of these dictionaries, be sure to adapt the - # list of needed labels in Doc/tools/sphinxext/pyspecific.py and + # list of needed labels in Doc/tools/pyspecific.py and # regenerate the pydoc_data/topics.py file by running # make pydoc-topics # in Doc/ and copying the output file into the Lib/ directory. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Sep 30 22:57:45 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:57:45 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Path_update=2E?= Message-ID: <20140930205742.86193.8719@mail.hg.python.org> https://hg.python.org/peps/rev/4d6b1cb66ae1 changeset: 5568:4d6b1cb66ae1 user: Georg Brandl date: Tue Sep 30 22:57:38 2014 +0200 summary: Path update. files: pep-0101.txt | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -178,7 +178,7 @@ ___ Commit any changes to pydoc_topics.py and the doc sources. - ___ Make sure the SOURCE_URI in ``Doc/tools/sphinxext/pyspecific.py`` + ___ Make sure the SOURCE_URI in ``Doc/tools/pyspecific.py`` points to the right branch in the hg repository (or ``default`` for unstable releases of the default branch). @@ -283,7 +283,7 @@ pushed. Login to hg.python.org and edit (as the "hg" user) ``/data/hg/repos/cpython/.hg/hgrc`` to that effect. - ___ For a final major release, Doc/tools/sphinxext/static/version_switch.js + ___ For a final major release, Doc/tools/static/version_switch.js must be updated in all maintained branches, so that the new maintenance branch is not "dev" anymore and there is a new "dev" version. -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Sep 30 22:58:17 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 20:58:17 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_Path_update=2E?= Message-ID: <20140930205810.23211.99501@mail.hg.python.org> https://hg.python.org/devguide/rev/3774a980615c changeset: 715:3774a980615c user: Georg Brandl date: Tue Sep 30 22:58:03 2014 +0200 summary: Path update. files: documenting.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/documenting.rst b/documenting.rst --- a/documenting.rst +++ b/documenting.rst @@ -1503,7 +1503,7 @@ * "pydoc-topics", which builds a Python module containing a dictionary with plain text documentation for the labels defined in - :file:`Doc/tools/sphinxext/pyspecific.py` -- pydoc needs these to show topic + :file:`Doc/tools/pyspecific.py` -- pydoc needs these to show topic and keyword help. * "suspicious", which checks the parsed markup for text that looks like -- Repository URL: https://hg.python.org/devguide From python-checkins at python.org Tue Sep 30 23:03:24 2014 From: python-checkins at python.org (georg.brandl) Date: Tue, 30 Sep 2014 21:03:24 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Remove_duplica?= =?utf-8?q?te_PyLong_function_descriptions=2E?= Message-ID: <20140930210323.23205.44397@mail.hg.python.org> https://hg.python.org/cpython/rev/68de12ae664d changeset: 92693:68de12ae664d branch: 2.7 parent: 92689:0d98344af1bb user: Georg Brandl date: Tue Sep 30 23:02:52 2014 +0200 summary: Remove duplicate PyLong function descriptions. files: Doc/c-api/long.rst | 28 ---------------------------- 1 files changed, 0 insertions(+), 28 deletions(-) diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst --- a/Doc/c-api/long.rst +++ b/Doc/c-api/long.rst @@ -65,22 +65,6 @@ .. versionadded:: 2.6 -.. c:function:: PyObject* PyLong_FromSsize_t(Py_ssize_t v) - - Return a new :c:type:`PyLongObject` object with a value of *v*, or *NULL* - on failure. - - .. versionadded:: 2.6 - - -.. c:function:: PyObject* PyLong_FromSize_t(size_t v) - - Return a new :c:type:`PyLongObject` object with a value of *v*, or *NULL* - on failure. - - .. versionadded:: 2.6 - - .. c:function:: PyObject* PyLong_FromLongLong(PY_LONG_LONG v) Return a new :c:type:`PyLongObject` object from a C :c:type:`long long`, or *NULL* @@ -199,18 +183,6 @@ raised. -.. c:function:: Py_ssize_t PyLong_AsSsize_t(PyObject *pylong) - - .. index:: - single: PY_SSIZE_T_MAX - - Return a :c:type:`Py_ssize_t` representation of the contents of *pylong*. If - *pylong* is greater than :const:`PY_SSIZE_T_MAX`, an :exc:`OverflowError` is - raised. - - .. versionadded:: 2.6 - - .. c:function:: PY_LONG_LONG PyLong_AsLongLong(PyObject *pylong) .. index:: -- Repository URL: https://hg.python.org/cpython